1 // SPDX-License-Identifier: GPL-2.0
5 // Copyright 2023 Arm Ltd
7 #include <kunit/test.h>
10 #define BLOCK_TEST_SIZE 12
12 static const struct regmap_config test_regmap_config = {
13 .max_register = BLOCK_TEST_SIZE,
15 .val_bits = sizeof(unsigned int) * 8,
18 struct regcache_types {
19 enum regcache_type type;
23 static void case_to_desc(const struct regcache_types *t, char *desc)
25 strcpy(desc, t->name);
28 static const struct regcache_types regcache_types_list[] = {
29 { REGCACHE_NONE, "none" },
30 { REGCACHE_FLAT, "flat" },
31 { REGCACHE_RBTREE, "rbtree" },
32 { REGCACHE_MAPLE, "maple" },
35 KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, case_to_desc);
37 static const struct regcache_types real_cache_types_list[] = {
38 { REGCACHE_FLAT, "flat" },
39 { REGCACHE_RBTREE, "rbtree" },
40 { REGCACHE_MAPLE, "maple" },
43 KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, case_to_desc);
45 static const struct regcache_types sparse_cache_types_list[] = {
46 { REGCACHE_RBTREE, "rbtree" },
47 { REGCACHE_MAPLE, "maple" },
50 KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, case_to_desc);
52 static struct regmap *gen_regmap(struct regmap_config *config,
53 struct regmap_ram_data **data)
57 size_t size = (config->max_register + 1) * sizeof(unsigned int);
59 struct reg_default *defaults;
61 config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
62 config->cache_type == REGCACHE_MAPLE;
64 buf = kmalloc(size, GFP_KERNEL);
66 return ERR_PTR(-ENOMEM);
68 get_random_bytes(buf, size);
70 *data = kzalloc(sizeof(**data), GFP_KERNEL);
72 return ERR_PTR(-ENOMEM);
75 if (config->num_reg_defaults) {
76 defaults = kcalloc(config->num_reg_defaults,
77 sizeof(struct reg_default),
80 return ERR_PTR(-ENOMEM);
81 config->reg_defaults = defaults;
83 for (i = 0; i < config->num_reg_defaults; i++) {
84 defaults[i].reg = i * config->reg_stride;
85 defaults[i].def = buf[i * config->reg_stride];
89 ret = regmap_init_ram(config, *data);
98 static bool reg_5_false(struct device *context, unsigned int reg)
103 static void basic_read_write(struct kunit *test)
105 struct regcache_types *t = (struct regcache_types *)test->param_value;
107 struct regmap_config config;
108 struct regmap_ram_data *data;
109 unsigned int val, rval;
111 config = test_regmap_config;
112 config.cache_type = t->type;
114 map = gen_regmap(&config, &data);
115 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
119 get_random_bytes(&val, sizeof(val));
121 /* If we write a value to a register we can read it back */
122 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
123 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
124 KUNIT_EXPECT_EQ(test, val, rval);
126 /* If using a cache the cache satisfied the read */
127 KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[0]);
132 static void bulk_write(struct kunit *test)
134 struct regcache_types *t = (struct regcache_types *)test->param_value;
136 struct regmap_config config;
137 struct regmap_ram_data *data;
138 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
141 config = test_regmap_config;
142 config.cache_type = t->type;
144 map = gen_regmap(&config, &data);
145 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
149 get_random_bytes(&val, sizeof(val));
152 * Data written via the bulk API can be read back with single
155 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
157 for (i = 0; i < BLOCK_TEST_SIZE; i++)
158 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
160 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
162 /* If using a cache the cache satisfied the read */
163 for (i = 0; i < BLOCK_TEST_SIZE; i++)
164 KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
169 static void bulk_read(struct kunit *test)
171 struct regcache_types *t = (struct regcache_types *)test->param_value;
173 struct regmap_config config;
174 struct regmap_ram_data *data;
175 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
178 config = test_regmap_config;
179 config.cache_type = t->type;
181 map = gen_regmap(&config, &data);
182 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
186 get_random_bytes(&val, sizeof(val));
188 /* Data written as single writes can be read via the bulk API */
189 for (i = 0; i < BLOCK_TEST_SIZE; i++)
190 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
191 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
193 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
195 /* If using a cache the cache satisfied the read */
196 for (i = 0; i < BLOCK_TEST_SIZE; i++)
197 KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
202 static void write_readonly(struct kunit *test)
204 struct regcache_types *t = (struct regcache_types *)test->param_value;
206 struct regmap_config config;
207 struct regmap_ram_data *data;
211 config = test_regmap_config;
212 config.cache_type = t->type;
213 config.num_reg_defaults = BLOCK_TEST_SIZE;
214 config.writeable_reg = reg_5_false;
216 map = gen_regmap(&config, &data);
217 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
221 get_random_bytes(&val, sizeof(val));
223 for (i = 0; i < BLOCK_TEST_SIZE; i++)
224 data->written[i] = false;
226 /* Change the value of all registers, readonly should fail */
227 for (i = 0; i < BLOCK_TEST_SIZE; i++)
228 KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
230 /* Did that match what we see on the device? */
231 for (i = 0; i < BLOCK_TEST_SIZE; i++)
232 KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
237 static void read_writeonly(struct kunit *test)
239 struct regcache_types *t = (struct regcache_types *)test->param_value;
241 struct regmap_config config;
242 struct regmap_ram_data *data;
246 config = test_regmap_config;
247 config.cache_type = t->type;
248 config.readable_reg = reg_5_false;
250 map = gen_regmap(&config, &data);
251 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
255 for (i = 0; i < BLOCK_TEST_SIZE; i++)
256 data->read[i] = false;
259 * Try to read all the registers, the writeonly one should
260 * fail if we aren't using the flat cache.
262 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
263 if (t->type != REGCACHE_FLAT) {
264 KUNIT_EXPECT_EQ(test, i != 5,
265 regmap_read(map, i, &val) == 0);
267 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
271 /* Did we trigger a hardware access? */
272 KUNIT_EXPECT_FALSE(test, data->read[5]);
277 static void reg_defaults(struct kunit *test)
279 struct regcache_types *t = (struct regcache_types *)test->param_value;
281 struct regmap_config config;
282 struct regmap_ram_data *data;
283 unsigned int rval[BLOCK_TEST_SIZE];
286 config = test_regmap_config;
287 config.cache_type = t->type;
288 config.num_reg_defaults = BLOCK_TEST_SIZE;
290 map = gen_regmap(&config, &data);
291 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
295 /* Read back the expected default data */
296 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
298 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
300 /* The data should have been read from cache if there was one */
301 for (i = 0; i < BLOCK_TEST_SIZE; i++)
302 KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
305 static void reg_defaults_read_dev(struct kunit *test)
307 struct regcache_types *t = (struct regcache_types *)test->param_value;
309 struct regmap_config config;
310 struct regmap_ram_data *data;
311 unsigned int rval[BLOCK_TEST_SIZE];
314 config = test_regmap_config;
315 config.cache_type = t->type;
316 config.num_reg_defaults_raw = BLOCK_TEST_SIZE;
318 map = gen_regmap(&config, &data);
319 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
323 /* We should have read the cache defaults back from the map */
324 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
325 KUNIT_EXPECT_EQ(test, t->type != REGCACHE_NONE, data->read[i]);
326 data->read[i] = false;
329 /* Read back the expected default data */
330 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
332 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
334 /* The data should have been read from cache if there was one */
335 for (i = 0; i < BLOCK_TEST_SIZE; i++)
336 KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
339 static void register_patch(struct kunit *test)
341 struct regcache_types *t = (struct regcache_types *)test->param_value;
343 struct regmap_config config;
344 struct regmap_ram_data *data;
345 struct reg_sequence patch[2];
346 unsigned int rval[BLOCK_TEST_SIZE];
349 /* We need defaults so readback works */
350 config = test_regmap_config;
351 config.cache_type = t->type;
352 config.num_reg_defaults = BLOCK_TEST_SIZE;
354 map = gen_regmap(&config, &data);
355 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
359 /* Stash the original values */
360 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
363 /* Patch a couple of values */
365 patch[0].def = rval[2] + 1;
366 patch[0].delay_us = 0;
368 patch[1].def = rval[5] + 1;
369 patch[1].delay_us = 0;
370 KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
373 /* Only the patched registers are written */
374 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
378 KUNIT_EXPECT_TRUE(test, data->written[i]);
379 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
382 KUNIT_EXPECT_FALSE(test, data->written[i]);
383 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
391 static void stride(struct kunit *test)
393 struct regcache_types *t = (struct regcache_types *)test->param_value;
395 struct regmap_config config;
396 struct regmap_ram_data *data;
400 config = test_regmap_config;
401 config.cache_type = t->type;
402 config.reg_stride = 2;
403 config.num_reg_defaults = BLOCK_TEST_SIZE / 2;
405 map = gen_regmap(&config, &data);
406 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
410 /* Only even registers can be accessed, try both read and write */
411 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
412 data->read[i] = false;
413 data->written[i] = false;
416 KUNIT_EXPECT_NE(test, 0, regmap_read(map, i, &rval));
417 KUNIT_EXPECT_NE(test, 0, regmap_write(map, i, rval));
418 KUNIT_EXPECT_FALSE(test, data->read[i]);
419 KUNIT_EXPECT_FALSE(test, data->written[i]);
421 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
422 KUNIT_EXPECT_EQ(test, data->vals[i], rval);
423 KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE,
426 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval));
427 KUNIT_EXPECT_TRUE(test, data->written[i]);
434 static struct regmap_range_cfg test_range = {
436 .selector_mask = 0xff,
445 static bool test_range_window_volatile(struct device *dev, unsigned int reg)
447 if (reg >= test_range.window_start &&
448 reg <= test_range.window_start + test_range.window_len)
454 static bool test_range_all_volatile(struct device *dev, unsigned int reg)
456 if (test_range_window_volatile(dev, reg))
459 if (reg >= test_range.range_min && reg <= test_range.range_max)
465 static void basic_ranges(struct kunit *test)
467 struct regcache_types *t = (struct regcache_types *)test->param_value;
469 struct regmap_config config;
470 struct regmap_ram_data *data;
474 config = test_regmap_config;
475 config.cache_type = t->type;
476 config.volatile_reg = test_range_all_volatile;
477 config.ranges = &test_range;
478 config.num_ranges = 1;
479 config.max_register = test_range.range_max;
481 map = gen_regmap(&config, &data);
482 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
486 for (i = test_range.range_min; i < test_range.range_max; i++) {
487 data->read[i] = false;
488 data->written[i] = false;
491 /* Reset the page to a non-zero value to trigger a change */
492 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
493 test_range.range_max));
495 /* Check we set the page and use the window for writes */
496 data->written[test_range.selector_reg] = false;
497 data->written[test_range.window_start] = false;
498 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
499 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
500 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
502 data->written[test_range.selector_reg] = false;
503 data->written[test_range.window_start] = false;
504 KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
505 test_range.range_min +
506 test_range.window_len,
508 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
509 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
512 data->written[test_range.selector_reg] = false;
513 data->read[test_range.window_start] = false;
514 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
515 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
516 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
518 data->written[test_range.selector_reg] = false;
519 data->read[test_range.window_start] = false;
520 KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
521 test_range.range_min +
522 test_range.window_len,
524 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
525 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
527 /* No physical access triggered in the virtual range */
528 for (i = test_range.range_min; i < test_range.range_max; i++) {
529 KUNIT_EXPECT_FALSE(test, data->read[i]);
530 KUNIT_EXPECT_FALSE(test, data->written[i]);
536 /* Try to stress dynamic creation of cache data structures */
537 static void stress_insert(struct kunit *test)
539 struct regcache_types *t = (struct regcache_types *)test->param_value;
541 struct regmap_config config;
542 struct regmap_ram_data *data;
543 unsigned int rval, *vals;
547 config = test_regmap_config;
548 config.cache_type = t->type;
549 config.max_register = 300;
551 map = gen_regmap(&config, &data);
552 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
556 vals = kunit_kcalloc(test, sizeof(unsigned long), config.max_register,
558 KUNIT_ASSERT_FALSE(test, vals == NULL);
559 buf_sz = sizeof(unsigned long) * config.max_register;
561 get_random_bytes(vals, buf_sz);
563 /* Write data into the map/cache in ever decreasing strides */
564 for (i = 0; i < config.max_register; i += 100)
565 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
566 for (i = 0; i < config.max_register; i += 50)
567 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
568 for (i = 0; i < config.max_register; i += 25)
569 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
570 for (i = 0; i < config.max_register; i += 10)
571 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
572 for (i = 0; i < config.max_register; i += 5)
573 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
574 for (i = 0; i < config.max_register; i += 3)
575 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
576 for (i = 0; i < config.max_register; i += 2)
577 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
578 for (i = 0; i < config.max_register; i++)
579 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
581 /* Do reads from the cache (if there is one) match? */
582 for (i = 0; i < config.max_register; i ++) {
583 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
584 KUNIT_EXPECT_EQ(test, rval, vals[i]);
585 KUNIT_EXPECT_EQ(test, t->type == REGCACHE_NONE, data->read[i]);
591 static void cache_bypass(struct kunit *test)
593 struct regcache_types *t = (struct regcache_types *)test->param_value;
595 struct regmap_config config;
596 struct regmap_ram_data *data;
597 unsigned int val, rval;
599 config = test_regmap_config;
600 config.cache_type = t->type;
602 map = gen_regmap(&config, &data);
603 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
607 get_random_bytes(&val, sizeof(val));
609 /* Ensure the cache has a value in it */
610 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
612 /* Bypass then write a different value */
613 regcache_cache_bypass(map, true);
614 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val + 1));
616 /* Read the bypassed value */
617 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
618 KUNIT_EXPECT_EQ(test, val + 1, rval);
619 KUNIT_EXPECT_EQ(test, data->vals[0], rval);
621 /* Disable bypass, the cache should still return the original value */
622 regcache_cache_bypass(map, false);
623 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
624 KUNIT_EXPECT_EQ(test, val, rval);
629 static void cache_sync(struct kunit *test)
631 struct regcache_types *t = (struct regcache_types *)test->param_value;
633 struct regmap_config config;
634 struct regmap_ram_data *data;
635 unsigned int val[BLOCK_TEST_SIZE];
638 config = test_regmap_config;
639 config.cache_type = t->type;
641 map = gen_regmap(&config, &data);
642 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
646 get_random_bytes(&val, sizeof(val));
648 /* Put some data into the cache */
649 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
651 for (i = 0; i < BLOCK_TEST_SIZE; i++)
652 data->written[i] = false;
654 /* Trash the data on the device itself then resync */
655 regcache_mark_dirty(map);
656 memset(data->vals, 0, sizeof(val));
657 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
659 /* Did we just write the correct data out? */
660 KUNIT_EXPECT_MEMEQ(test, data->vals, val, sizeof(val));
661 for (i = 0; i < BLOCK_TEST_SIZE; i++)
662 KUNIT_EXPECT_EQ(test, true, data->written[i]);
667 static void cache_sync_defaults(struct kunit *test)
669 struct regcache_types *t = (struct regcache_types *)test->param_value;
671 struct regmap_config config;
672 struct regmap_ram_data *data;
676 config = test_regmap_config;
677 config.cache_type = t->type;
678 config.num_reg_defaults = BLOCK_TEST_SIZE;
680 map = gen_regmap(&config, &data);
681 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
685 get_random_bytes(&val, sizeof(val));
687 /* Change the value of one register */
688 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 2, val));
691 regcache_mark_dirty(map);
692 for (i = 0; i < BLOCK_TEST_SIZE; i++)
693 data->written[i] = false;
694 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
696 /* Did we just sync the one register we touched? */
697 for (i = 0; i < BLOCK_TEST_SIZE; i++)
698 KUNIT_EXPECT_EQ(test, i == 2, data->written[i]);
703 static void cache_sync_readonly(struct kunit *test)
705 struct regcache_types *t = (struct regcache_types *)test->param_value;
707 struct regmap_config config;
708 struct regmap_ram_data *data;
712 config = test_regmap_config;
713 config.cache_type = t->type;
714 config.writeable_reg = reg_5_false;
716 map = gen_regmap(&config, &data);
717 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
721 /* Read all registers to fill the cache */
722 for (i = 0; i < BLOCK_TEST_SIZE; i++)
723 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
725 /* Change the value of all registers, readonly should fail */
726 get_random_bytes(&val, sizeof(val));
727 regcache_cache_only(map, true);
728 for (i = 0; i < BLOCK_TEST_SIZE; i++)
729 KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
730 regcache_cache_only(map, false);
733 for (i = 0; i < BLOCK_TEST_SIZE; i++)
734 data->written[i] = false;
735 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
737 /* Did that match what we see on the device? */
738 for (i = 0; i < BLOCK_TEST_SIZE; i++)
739 KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
744 static void cache_sync_patch(struct kunit *test)
746 struct regcache_types *t = (struct regcache_types *)test->param_value;
748 struct regmap_config config;
749 struct regmap_ram_data *data;
750 struct reg_sequence patch[2];
751 unsigned int rval[BLOCK_TEST_SIZE], val;
754 /* We need defaults so readback works */
755 config = test_regmap_config;
756 config.cache_type = t->type;
757 config.num_reg_defaults = BLOCK_TEST_SIZE;
759 map = gen_regmap(&config, &data);
760 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
764 /* Stash the original values */
765 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
768 /* Patch a couple of values */
770 patch[0].def = rval[2] + 1;
771 patch[0].delay_us = 0;
773 patch[1].def = rval[5] + 1;
774 patch[1].delay_us = 0;
775 KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
779 regcache_mark_dirty(map);
780 for (i = 0; i < BLOCK_TEST_SIZE; i++)
781 data->written[i] = false;
782 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
784 /* The patch should be on the device but not in the cache */
785 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
786 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
787 KUNIT_EXPECT_EQ(test, val, rval[i]);
792 KUNIT_EXPECT_EQ(test, true, data->written[i]);
793 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
796 KUNIT_EXPECT_EQ(test, false, data->written[i]);
797 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
805 static void cache_drop(struct kunit *test)
807 struct regcache_types *t = (struct regcache_types *)test->param_value;
809 struct regmap_config config;
810 struct regmap_ram_data *data;
811 unsigned int rval[BLOCK_TEST_SIZE];
814 config = test_regmap_config;
815 config.cache_type = t->type;
816 config.num_reg_defaults = BLOCK_TEST_SIZE;
818 map = gen_regmap(&config, &data);
819 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
823 /* Ensure the data is read from the cache */
824 for (i = 0; i < BLOCK_TEST_SIZE; i++)
825 data->read[i] = false;
826 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
828 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
829 KUNIT_EXPECT_FALSE(test, data->read[i]);
830 data->read[i] = false;
832 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
834 /* Drop some registers */
835 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 3, 5));
837 /* Reread and check only the dropped registers hit the device. */
838 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
840 for (i = 0; i < BLOCK_TEST_SIZE; i++)
841 KUNIT_EXPECT_EQ(test, data->read[i], i >= 3 && i <= 5);
842 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
847 static void cache_present(struct kunit *test)
849 struct regcache_types *t = (struct regcache_types *)test->param_value;
851 struct regmap_config config;
852 struct regmap_ram_data *data;
856 config = test_regmap_config;
857 config.cache_type = t->type;
859 map = gen_regmap(&config, &data);
860 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
864 for (i = 0; i < BLOCK_TEST_SIZE; i++)
865 data->read[i] = false;
867 /* No defaults so no registers cached. */
868 for (i = 0; i < BLOCK_TEST_SIZE; i++)
869 KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, i));
871 /* We didn't trigger any reads */
872 for (i = 0; i < BLOCK_TEST_SIZE; i++)
873 KUNIT_ASSERT_FALSE(test, data->read[i]);
876 for (i = 0; i < BLOCK_TEST_SIZE; i++)
877 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
879 /* Now everything should be cached */
880 for (i = 0; i < BLOCK_TEST_SIZE; i++)
881 KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, i));
886 /* Check that caching the window register works with sync */
887 static void cache_range_window_reg(struct kunit *test)
889 struct regcache_types *t = (struct regcache_types *)test->param_value;
891 struct regmap_config config;
892 struct regmap_ram_data *data;
896 config = test_regmap_config;
897 config.cache_type = t->type;
898 config.volatile_reg = test_range_window_volatile;
899 config.ranges = &test_range;
900 config.num_ranges = 1;
901 config.max_register = test_range.range_max;
903 map = gen_regmap(&config, &data);
904 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
908 /* Write new values to the entire range */
909 for (i = test_range.range_min; i <= test_range.range_max; i++)
910 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, i, 0));
912 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
913 KUNIT_ASSERT_EQ(test, val, 2);
915 /* Write to the first register in the range to reset the page */
916 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
917 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
918 KUNIT_ASSERT_EQ(test, val, 0);
920 /* Trigger a cache sync */
921 regcache_mark_dirty(map);
922 KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
924 /* Write to the first register again, the page should be reset */
925 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
926 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
927 KUNIT_ASSERT_EQ(test, val, 0);
929 /* Trigger another cache sync */
930 regcache_mark_dirty(map);
931 KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
933 /* Write to the last register again, the page should be reset */
934 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_max, 0));
935 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
936 KUNIT_ASSERT_EQ(test, val, 2);
939 struct raw_test_types {
942 enum regcache_type cache_type;
943 enum regmap_endian val_endian;
946 static void raw_to_desc(const struct raw_test_types *t, char *desc)
948 strcpy(desc, t->name);
951 static const struct raw_test_types raw_types_list[] = {
952 { "none-little", REGCACHE_NONE, REGMAP_ENDIAN_LITTLE },
953 { "none-big", REGCACHE_NONE, REGMAP_ENDIAN_BIG },
954 { "flat-little", REGCACHE_FLAT, REGMAP_ENDIAN_LITTLE },
955 { "flat-big", REGCACHE_FLAT, REGMAP_ENDIAN_BIG },
956 { "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
957 { "rbtree-big", REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
958 { "maple-little", REGCACHE_MAPLE, REGMAP_ENDIAN_LITTLE },
959 { "maple-big", REGCACHE_MAPLE, REGMAP_ENDIAN_BIG },
962 KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, raw_to_desc);
964 static const struct raw_test_types raw_cache_types_list[] = {
965 { "flat-little", REGCACHE_FLAT, REGMAP_ENDIAN_LITTLE },
966 { "flat-big", REGCACHE_FLAT, REGMAP_ENDIAN_BIG },
967 { "rbtree-little", REGCACHE_RBTREE, REGMAP_ENDIAN_LITTLE },
968 { "rbtree-big", REGCACHE_RBTREE, REGMAP_ENDIAN_BIG },
969 { "maple-little", REGCACHE_MAPLE, REGMAP_ENDIAN_LITTLE },
970 { "maple-big", REGCACHE_MAPLE, REGMAP_ENDIAN_BIG },
973 KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, raw_to_desc);
975 static const struct regmap_config raw_regmap_config = {
976 .max_register = BLOCK_TEST_SIZE,
978 .reg_format_endian = REGMAP_ENDIAN_LITTLE,
983 static struct regmap *gen_raw_regmap(struct regmap_config *config,
984 struct raw_test_types *test_type,
985 struct regmap_ram_data **data)
989 size_t size = (config->max_register + 1) * config->reg_bits / 8;
991 struct reg_default *defaults;
993 config->cache_type = test_type->cache_type;
994 config->val_format_endian = test_type->val_endian;
995 config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
996 config->cache_type == REGCACHE_MAPLE;
998 buf = kmalloc(size, GFP_KERNEL);
1000 return ERR_PTR(-ENOMEM);
1002 get_random_bytes(buf, size);
1004 *data = kzalloc(sizeof(**data), GFP_KERNEL);
1006 return ERR_PTR(-ENOMEM);
1007 (*data)->vals = (void *)buf;
1009 config->num_reg_defaults = config->max_register + 1;
1010 defaults = kcalloc(config->num_reg_defaults,
1011 sizeof(struct reg_default),
1014 return ERR_PTR(-ENOMEM);
1015 config->reg_defaults = defaults;
1017 for (i = 0; i < config->num_reg_defaults; i++) {
1018 defaults[i].reg = i;
1019 switch (test_type->val_endian) {
1020 case REGMAP_ENDIAN_LITTLE:
1021 defaults[i].def = le16_to_cpu(buf[i]);
1023 case REGMAP_ENDIAN_BIG:
1024 defaults[i].def = be16_to_cpu(buf[i]);
1027 return ERR_PTR(-EINVAL);
1032 * We use the defaults in the tests but they don't make sense
1033 * to the core if there's no cache.
1035 if (config->cache_type == REGCACHE_NONE)
1036 config->num_reg_defaults = 0;
1038 ret = regmap_init_raw_ram(config, *data);
1047 static void raw_read_defaults_single(struct kunit *test)
1049 struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1051 struct regmap_config config;
1052 struct regmap_ram_data *data;
1056 config = raw_regmap_config;
1058 map = gen_raw_regmap(&config, t, &data);
1059 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1063 /* Check that we can read the defaults via the API */
1064 for (i = 0; i < config.max_register + 1; i++) {
1065 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1066 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1072 static void raw_read_defaults(struct kunit *test)
1074 struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1076 struct regmap_config config;
1077 struct regmap_ram_data *data;
1083 config = raw_regmap_config;
1085 map = gen_raw_regmap(&config, t, &data);
1086 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1090 val_len = sizeof(*rval) * (config.max_register + 1);
1091 rval = kmalloc(val_len, GFP_KERNEL);
1092 KUNIT_ASSERT_TRUE(test, rval != NULL);
1096 /* Check that we can read the defaults via the API */
1097 KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len));
1098 for (i = 0; i < config.max_register + 1; i++) {
1099 def = config.reg_defaults[i].def;
1100 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1101 KUNIT_EXPECT_EQ(test, def, be16_to_cpu(rval[i]));
1103 KUNIT_EXPECT_EQ(test, def, le16_to_cpu(rval[i]));
1111 static void raw_write_read_single(struct kunit *test)
1113 struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1115 struct regmap_config config;
1116 struct regmap_ram_data *data;
1120 config = raw_regmap_config;
1122 map = gen_raw_regmap(&config, t, &data);
1123 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1127 get_random_bytes(&val, sizeof(val));
1129 /* If we write a value to a register we can read it back */
1130 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
1131 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
1132 KUNIT_EXPECT_EQ(test, val, rval);
1137 static void raw_write(struct kunit *test)
1139 struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1141 struct regmap_config config;
1142 struct regmap_ram_data *data;
1148 config = raw_regmap_config;
1150 map = gen_raw_regmap(&config, t, &data);
1151 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1155 hw_buf = (u16 *)data->vals;
1157 get_random_bytes(&val, sizeof(val));
1159 /* Do a raw write */
1160 KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
1162 /* We should read back the new values, and defaults for the rest */
1163 for (i = 0; i < config.max_register + 1; i++) {
1164 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1169 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1170 KUNIT_EXPECT_EQ(test, rval,
1171 be16_to_cpu(val[i % 2]));
1173 KUNIT_EXPECT_EQ(test, rval,
1174 le16_to_cpu(val[i % 2]));
1178 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1183 /* The values should appear in the "hardware" */
1184 KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
1189 static void raw_sync(struct kunit *test)
1191 struct raw_test_types *t = (struct raw_test_types *)test->param_value;
1193 struct regmap_config config;
1194 struct regmap_ram_data *data;
1200 config = raw_regmap_config;
1202 map = gen_raw_regmap(&config, t, &data);
1203 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1207 hw_buf = (u16 *)data->vals;
1209 get_random_bytes(&val, sizeof(val));
1211 /* Do a regular write and a raw write in cache only mode */
1212 regcache_cache_only(map, true);
1213 KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
1214 if (config.val_format_endian == REGMAP_ENDIAN_BIG)
1215 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
1216 be16_to_cpu(val[0])));
1218 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 6,
1219 le16_to_cpu(val[0])));
1221 /* We should read back the new values, and defaults for the rest */
1222 for (i = 0; i < config.max_register + 1; i++) {
1223 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1229 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1230 KUNIT_EXPECT_EQ(test, rval,
1231 be16_to_cpu(val[i % 2]));
1233 KUNIT_EXPECT_EQ(test, rval,
1234 le16_to_cpu(val[i % 2]));
1238 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1243 /* The values should not appear in the "hardware" */
1244 KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], val, sizeof(val));
1245 KUNIT_EXPECT_MEMNEQ(test, &hw_buf[6], val, sizeof(u16));
1247 for (i = 0; i < config.max_register + 1; i++)
1248 data->written[i] = false;
1251 regcache_cache_only(map, false);
1252 regcache_mark_dirty(map);
1253 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1255 /* The values should now appear in the "hardware" */
1256 KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
1257 KUNIT_EXPECT_MEMEQ(test, &hw_buf[6], val, sizeof(u16));
1262 static struct kunit_case regmap_test_cases[] = {
1263 KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params),
1264 KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params),
1265 KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params),
1266 KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params),
1267 KUNIT_CASE_PARAM(read_writeonly, regcache_types_gen_params),
1268 KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params),
1269 KUNIT_CASE_PARAM(reg_defaults_read_dev, regcache_types_gen_params),
1270 KUNIT_CASE_PARAM(register_patch, regcache_types_gen_params),
1271 KUNIT_CASE_PARAM(stride, regcache_types_gen_params),
1272 KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params),
1273 KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params),
1274 KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params),
1275 KUNIT_CASE_PARAM(cache_sync, real_cache_types_gen_params),
1276 KUNIT_CASE_PARAM(cache_sync_defaults, real_cache_types_gen_params),
1277 KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params),
1278 KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params),
1279 KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params),
1280 KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params),
1281 KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_gen_params),
1283 KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
1284 KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
1285 KUNIT_CASE_PARAM(raw_write_read_single, raw_test_types_gen_params),
1286 KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params),
1287 KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params),
1291 static struct kunit_suite regmap_test_suite = {
1293 .test_cases = regmap_test_cases,
1295 kunit_test_suite(regmap_test_suite);
1297 MODULE_LICENSE("GPL v2");