1 // SPDX-License-Identifier: GPL-2.0
5 // Copyright 2023 Arm Ltd
7 #include <kunit/device.h>
8 #include <kunit/resource.h>
9 #include <kunit/test.h>
12 #define BLOCK_TEST_SIZE 12
14 KUNIT_DEFINE_ACTION_WRAPPER(regmap_exit_action, regmap_exit, struct regmap *);
16 struct regmap_test_priv {
20 struct regmap_test_param {
21 enum regcache_type cache;
22 enum regmap_endian val_endian;
24 unsigned int from_reg;
27 static void get_changed_bytes(void *orig, void *new, size_t size)
33 get_random_bytes(new, size);
36 * This could be nicer and more efficient but we shouldn't
39 for (i = 0; i < size; i++)
41 get_random_bytes(&n[i], 1);
44 static const struct regmap_config test_regmap_config = {
46 .val_bits = sizeof(unsigned int) * 8,
49 static const char *regcache_type_name(enum regcache_type type)
65 static const char *regmap_endian_name(enum regmap_endian endian)
68 case REGMAP_ENDIAN_BIG:
70 case REGMAP_ENDIAN_LITTLE:
72 case REGMAP_ENDIAN_DEFAULT:
74 case REGMAP_ENDIAN_NATIVE:
81 static void param_to_desc(const struct regmap_test_param *param, char *desc)
83 snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s-%s @%#x",
84 regcache_type_name(param->cache),
85 regmap_endian_name(param->val_endian),
89 static const struct regmap_test_param regcache_types_list[] = {
90 { .cache = REGCACHE_NONE },
91 { .cache = REGCACHE_FLAT },
92 { .cache = REGCACHE_RBTREE },
93 { .cache = REGCACHE_MAPLE },
96 KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, param_to_desc);
98 static const struct regmap_test_param real_cache_types_only_list[] = {
99 { .cache = REGCACHE_FLAT },
100 { .cache = REGCACHE_RBTREE },
101 { .cache = REGCACHE_MAPLE },
104 KUNIT_ARRAY_PARAM(real_cache_types_only, real_cache_types_only_list, param_to_desc);
106 static const struct regmap_test_param real_cache_types_list[] = {
107 { .cache = REGCACHE_FLAT, .from_reg = 0 },
108 { .cache = REGCACHE_FLAT, .from_reg = 0x2001 },
109 { .cache = REGCACHE_FLAT, .from_reg = 0x2002 },
110 { .cache = REGCACHE_FLAT, .from_reg = 0x2003 },
111 { .cache = REGCACHE_FLAT, .from_reg = 0x2004 },
112 { .cache = REGCACHE_RBTREE, .from_reg = 0 },
113 { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
114 { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 },
115 { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
116 { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
117 { .cache = REGCACHE_MAPLE, .from_reg = 0 },
118 { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
119 { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
120 { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
121 { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 },
124 KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, param_to_desc);
126 static const struct regmap_test_param sparse_cache_types_list[] = {
127 { .cache = REGCACHE_RBTREE, .from_reg = 0 },
128 { .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
129 { .cache = REGCACHE_RBTREE, .from_reg = 0x2002 },
130 { .cache = REGCACHE_RBTREE, .from_reg = 0x2003 },
131 { .cache = REGCACHE_RBTREE, .from_reg = 0x2004 },
132 { .cache = REGCACHE_MAPLE, .from_reg = 0 },
133 { .cache = REGCACHE_MAPLE, .from_reg = 0x2001 },
134 { .cache = REGCACHE_MAPLE, .from_reg = 0x2002 },
135 { .cache = REGCACHE_MAPLE, .from_reg = 0x2003 },
136 { .cache = REGCACHE_MAPLE, .from_reg = 0x2004 },
139 KUNIT_ARRAY_PARAM(sparse_cache_types, sparse_cache_types_list, param_to_desc);
141 static struct regmap *gen_regmap(struct kunit *test,
142 struct regmap_config *config,
143 struct regmap_ram_data **data)
145 const struct regmap_test_param *param = test->param_value;
146 struct regmap_test_priv *priv = test->priv;
151 struct reg_default *defaults;
153 config->cache_type = param->cache;
154 config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
155 config->cache_type == REGCACHE_MAPLE;
157 if (config->max_register == 0) {
158 config->max_register = param->from_reg;
159 if (config->num_reg_defaults)
160 config->max_register += (config->num_reg_defaults - 1) *
163 config->max_register += (BLOCK_TEST_SIZE * config->reg_stride);
166 size = (config->max_register + 1) * sizeof(unsigned int);
167 buf = kmalloc(size, GFP_KERNEL);
169 return ERR_PTR(-ENOMEM);
171 get_random_bytes(buf, size);
173 *data = kzalloc(sizeof(**data), GFP_KERNEL);
175 return ERR_PTR(-ENOMEM);
178 if (config->num_reg_defaults) {
179 defaults = kcalloc(config->num_reg_defaults,
180 sizeof(struct reg_default),
183 return ERR_PTR(-ENOMEM);
184 config->reg_defaults = defaults;
186 for (i = 0; i < config->num_reg_defaults; i++) {
187 defaults[i].reg = param->from_reg + (i * config->reg_stride);
188 defaults[i].def = buf[param->from_reg + (i * config->reg_stride)];
192 ret = regmap_init_ram(priv->dev, config, *data);
197 kunit_add_action(test, regmap_exit_action, ret);
203 static bool reg_5_false(struct device *dev, unsigned int reg)
205 struct kunit *test = dev_get_drvdata(dev);
206 const struct regmap_test_param *param = test->param_value;
208 return reg != (param->from_reg + 5);
211 static void basic_read_write(struct kunit *test)
214 struct regmap_config config;
215 struct regmap_ram_data *data;
216 unsigned int val, rval;
218 config = test_regmap_config;
220 map = gen_regmap(test, &config, &data);
221 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
225 get_random_bytes(&val, sizeof(val));
227 /* If we write a value to a register we can read it back */
228 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
229 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
230 KUNIT_EXPECT_EQ(test, val, rval);
232 /* If using a cache the cache satisfied the read */
233 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[0]);
236 static void bulk_write(struct kunit *test)
239 struct regmap_config config;
240 struct regmap_ram_data *data;
241 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
244 config = test_regmap_config;
246 map = gen_regmap(test, &config, &data);
247 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
251 get_random_bytes(&val, sizeof(val));
254 * Data written via the bulk API can be read back with single
257 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, 0, val,
259 for (i = 0; i < BLOCK_TEST_SIZE; i++)
260 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval[i]));
262 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
264 /* If using a cache the cache satisfied the read */
265 for (i = 0; i < BLOCK_TEST_SIZE; i++)
266 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
269 static void bulk_read(struct kunit *test)
272 struct regmap_config config;
273 struct regmap_ram_data *data;
274 unsigned int val[BLOCK_TEST_SIZE], rval[BLOCK_TEST_SIZE];
277 config = test_regmap_config;
279 map = gen_regmap(test, &config, &data);
280 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
284 get_random_bytes(&val, sizeof(val));
286 /* Data written as single writes can be read via the bulk API */
287 for (i = 0; i < BLOCK_TEST_SIZE; i++)
288 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, val[i]));
289 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
291 KUNIT_EXPECT_MEMEQ(test, val, rval, sizeof(val));
293 /* If using a cache the cache satisfied the read */
294 for (i = 0; i < BLOCK_TEST_SIZE; i++)
295 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
298 static void read_bypassed(struct kunit *test)
300 const struct regmap_test_param *param = test->param_value;
302 struct regmap_config config;
303 struct regmap_ram_data *data;
304 unsigned int val[BLOCK_TEST_SIZE], rval;
307 config = test_regmap_config;
309 map = gen_regmap(test, &config, &data);
310 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
314 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
316 get_random_bytes(&val, sizeof(val));
318 /* Write some test values */
319 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val)));
321 regcache_cache_only(map, true);
324 * While in cache-only regmap_read_bypassed() should return the register
325 * value and leave the map in cache-only.
327 for (i = 0; i < ARRAY_SIZE(val); i++) {
328 /* Put inverted bits in rval to prove we really read the value */
330 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval));
331 KUNIT_EXPECT_EQ(test, val[i], rval);
334 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
335 KUNIT_EXPECT_EQ(test, val[i], rval);
336 KUNIT_EXPECT_TRUE(test, map->cache_only);
337 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
341 * Change the underlying register values to prove it is returning
342 * real values not cached values.
344 for (i = 0; i < ARRAY_SIZE(val); i++) {
346 data->vals[param->from_reg + i] = val[i];
349 for (i = 0; i < ARRAY_SIZE(val); i++) {
351 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &rval));
352 KUNIT_EXPECT_NE(test, val[i], rval);
355 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
356 KUNIT_EXPECT_EQ(test, val[i], rval);
357 KUNIT_EXPECT_TRUE(test, map->cache_only);
358 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
362 static void read_bypassed_volatile(struct kunit *test)
364 const struct regmap_test_param *param = test->param_value;
366 struct regmap_config config;
367 struct regmap_ram_data *data;
368 unsigned int val[BLOCK_TEST_SIZE], rval;
371 config = test_regmap_config;
372 /* All registers except #5 volatile */
373 config.volatile_reg = reg_5_false;
375 map = gen_regmap(test, &config, &data);
376 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
380 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
382 get_random_bytes(&val, sizeof(val));
384 /* Write some test values */
385 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val, ARRAY_SIZE(val)));
387 regcache_cache_only(map, true);
390 * While in cache-only regmap_read_bypassed() should return the register
391 * value and leave the map in cache-only.
393 for (i = 0; i < ARRAY_SIZE(val); i++) {
394 /* Register #5 is non-volatile so should read from cache */
395 KUNIT_EXPECT_EQ(test, (i == 5) ? 0 : -EBUSY,
396 regmap_read(map, param->from_reg + i, &rval));
398 /* Put inverted bits in rval to prove we really read the value */
400 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
401 KUNIT_EXPECT_EQ(test, val[i], rval);
402 KUNIT_EXPECT_TRUE(test, map->cache_only);
403 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
407 * Change the underlying register values to prove it is returning
408 * real values not cached values.
410 for (i = 0; i < ARRAY_SIZE(val); i++) {
412 data->vals[param->from_reg + i] = val[i];
415 for (i = 0; i < ARRAY_SIZE(val); i++) {
420 KUNIT_EXPECT_EQ(test, 0, regmap_read_bypassed(map, param->from_reg + i, &rval));
421 KUNIT_EXPECT_EQ(test, val[i], rval);
422 KUNIT_EXPECT_TRUE(test, map->cache_only);
423 KUNIT_EXPECT_FALSE(test, map->cache_bypass);
427 static void write_readonly(struct kunit *test)
430 struct regmap_config config;
431 struct regmap_ram_data *data;
435 config = test_regmap_config;
436 config.num_reg_defaults = BLOCK_TEST_SIZE;
437 config.writeable_reg = reg_5_false;
439 map = gen_regmap(test, &config, &data);
440 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
444 get_random_bytes(&val, sizeof(val));
446 for (i = 0; i < BLOCK_TEST_SIZE; i++)
447 data->written[i] = false;
449 /* Change the value of all registers, readonly should fail */
450 for (i = 0; i < BLOCK_TEST_SIZE; i++)
451 KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, i, val) == 0);
453 /* Did that match what we see on the device? */
454 for (i = 0; i < BLOCK_TEST_SIZE; i++)
455 KUNIT_EXPECT_EQ(test, i != 5, data->written[i]);
458 static void read_writeonly(struct kunit *test)
461 struct regmap_config config;
462 struct regmap_ram_data *data;
466 config = test_regmap_config;
467 config.readable_reg = reg_5_false;
469 map = gen_regmap(test, &config, &data);
470 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
474 for (i = 0; i < BLOCK_TEST_SIZE; i++)
475 data->read[i] = false;
478 * Try to read all the registers, the writeonly one should
479 * fail if we aren't using the flat cache.
481 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
482 if (config.cache_type != REGCACHE_FLAT) {
483 KUNIT_EXPECT_EQ(test, i != 5,
484 regmap_read(map, i, &val) == 0);
486 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &val));
490 /* Did we trigger a hardware access? */
491 KUNIT_EXPECT_FALSE(test, data->read[5]);
494 static void reg_defaults(struct kunit *test)
497 struct regmap_config config;
498 struct regmap_ram_data *data;
499 unsigned int rval[BLOCK_TEST_SIZE];
502 config = test_regmap_config;
503 config.num_reg_defaults = BLOCK_TEST_SIZE;
505 map = gen_regmap(test, &config, &data);
506 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
510 /* Read back the expected default data */
511 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
513 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
515 /* The data should have been read from cache if there was one */
516 for (i = 0; i < BLOCK_TEST_SIZE; i++)
517 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
520 static void reg_defaults_read_dev(struct kunit *test)
523 struct regmap_config config;
524 struct regmap_ram_data *data;
525 unsigned int rval[BLOCK_TEST_SIZE];
528 config = test_regmap_config;
529 config.num_reg_defaults_raw = BLOCK_TEST_SIZE;
531 map = gen_regmap(test, &config, &data);
532 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
536 /* We should have read the cache defaults back from the map */
537 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
538 KUNIT_EXPECT_EQ(test, config.cache_type != REGCACHE_NONE, data->read[i]);
539 data->read[i] = false;
542 /* Read back the expected default data */
543 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
545 KUNIT_EXPECT_MEMEQ(test, data->vals, rval, sizeof(rval));
547 /* The data should have been read from cache if there was one */
548 for (i = 0; i < BLOCK_TEST_SIZE; i++)
549 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
552 static void register_patch(struct kunit *test)
555 struct regmap_config config;
556 struct regmap_ram_data *data;
557 struct reg_sequence patch[2];
558 unsigned int rval[BLOCK_TEST_SIZE];
561 /* We need defaults so readback works */
562 config = test_regmap_config;
563 config.num_reg_defaults = BLOCK_TEST_SIZE;
565 map = gen_regmap(test, &config, &data);
566 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
570 /* Stash the original values */
571 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, 0, rval,
574 /* Patch a couple of values */
576 patch[0].def = rval[2] + 1;
577 patch[0].delay_us = 0;
579 patch[1].def = rval[5] + 1;
580 patch[1].delay_us = 0;
581 KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
584 /* Only the patched registers are written */
585 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
589 KUNIT_EXPECT_TRUE(test, data->written[i]);
590 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i] + 1);
593 KUNIT_EXPECT_FALSE(test, data->written[i]);
594 KUNIT_EXPECT_EQ(test, data->vals[i], rval[i]);
600 static void stride(struct kunit *test)
603 struct regmap_config config;
604 struct regmap_ram_data *data;
608 config = test_regmap_config;
609 config.reg_stride = 2;
610 config.num_reg_defaults = BLOCK_TEST_SIZE / 2;
612 map = gen_regmap(test, &config, &data);
613 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
617 /* Only even registers can be accessed, try both read and write */
618 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
619 data->read[i] = false;
620 data->written[i] = false;
623 KUNIT_EXPECT_NE(test, 0, regmap_read(map, i, &rval));
624 KUNIT_EXPECT_NE(test, 0, regmap_write(map, i, rval));
625 KUNIT_EXPECT_FALSE(test, data->read[i]);
626 KUNIT_EXPECT_FALSE(test, data->written[i]);
628 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
629 KUNIT_EXPECT_EQ(test, data->vals[i], rval);
630 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE,
633 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, rval));
634 KUNIT_EXPECT_TRUE(test, data->written[i]);
639 static struct regmap_range_cfg test_range = {
641 .selector_mask = 0xff,
650 static bool test_range_window_volatile(struct device *dev, unsigned int reg)
652 if (reg >= test_range.window_start &&
653 reg <= test_range.window_start + test_range.window_len)
659 static bool test_range_all_volatile(struct device *dev, unsigned int reg)
661 if (test_range_window_volatile(dev, reg))
664 if (reg >= test_range.range_min && reg <= test_range.range_max)
670 static void basic_ranges(struct kunit *test)
673 struct regmap_config config;
674 struct regmap_ram_data *data;
678 config = test_regmap_config;
679 config.volatile_reg = test_range_all_volatile;
680 config.ranges = &test_range;
681 config.num_ranges = 1;
682 config.max_register = test_range.range_max;
684 map = gen_regmap(test, &config, &data);
685 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
689 for (i = test_range.range_min; i < test_range.range_max; i++) {
690 data->read[i] = false;
691 data->written[i] = false;
694 /* Reset the page to a non-zero value to trigger a change */
695 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
696 test_range.range_max));
698 /* Check we set the page and use the window for writes */
699 data->written[test_range.selector_reg] = false;
700 data->written[test_range.window_start] = false;
701 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
702 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
703 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
705 data->written[test_range.selector_reg] = false;
706 data->written[test_range.window_start] = false;
707 KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
708 test_range.range_min +
709 test_range.window_len,
711 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
712 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
715 data->written[test_range.selector_reg] = false;
716 data->read[test_range.window_start] = false;
717 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
718 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
719 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
721 data->written[test_range.selector_reg] = false;
722 data->read[test_range.window_start] = false;
723 KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
724 test_range.range_min +
725 test_range.window_len,
727 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
728 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
730 /* No physical access triggered in the virtual range */
731 for (i = test_range.range_min; i < test_range.range_max; i++) {
732 KUNIT_EXPECT_FALSE(test, data->read[i]);
733 KUNIT_EXPECT_FALSE(test, data->written[i]);
737 /* Try to stress dynamic creation of cache data structures */
738 static void stress_insert(struct kunit *test)
741 struct regmap_config config;
742 struct regmap_ram_data *data;
743 unsigned int rval, *vals;
747 config = test_regmap_config;
748 config.max_register = 300;
750 map = gen_regmap(test, &config, &data);
751 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
755 vals = kunit_kcalloc(test, sizeof(unsigned long), config.max_register,
757 KUNIT_ASSERT_FALSE(test, vals == NULL);
758 buf_sz = sizeof(unsigned long) * config.max_register;
760 get_random_bytes(vals, buf_sz);
762 /* Write data into the map/cache in ever decreasing strides */
763 for (i = 0; i < config.max_register; i += 100)
764 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
765 for (i = 0; i < config.max_register; i += 50)
766 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
767 for (i = 0; i < config.max_register; i += 25)
768 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
769 for (i = 0; i < config.max_register; i += 10)
770 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
771 for (i = 0; i < config.max_register; i += 5)
772 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
773 for (i = 0; i < config.max_register; i += 3)
774 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
775 for (i = 0; i < config.max_register; i += 2)
776 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
777 for (i = 0; i < config.max_register; i++)
778 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, i, vals[i]));
780 /* Do reads from the cache (if there is one) match? */
781 for (i = 0; i < config.max_register; i ++) {
782 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
783 KUNIT_EXPECT_EQ(test, rval, vals[i]);
784 KUNIT_EXPECT_EQ(test, config.cache_type == REGCACHE_NONE, data->read[i]);
788 static void cache_bypass(struct kunit *test)
790 const struct regmap_test_param *param = test->param_value;
792 struct regmap_config config;
793 struct regmap_ram_data *data;
794 unsigned int val, rval;
796 config = test_regmap_config;
798 map = gen_regmap(test, &config, &data);
799 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
803 get_random_bytes(&val, sizeof(val));
805 /* Ensure the cache has a value in it */
806 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val));
808 /* Bypass then write a different value */
809 regcache_cache_bypass(map, true);
810 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg, val + 1));
812 /* Read the bypassed value */
813 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval));
814 KUNIT_EXPECT_EQ(test, val + 1, rval);
815 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg], rval);
817 /* Disable bypass, the cache should still return the original value */
818 regcache_cache_bypass(map, false);
819 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg, &rval));
820 KUNIT_EXPECT_EQ(test, val, rval);
823 static void cache_sync_marked_dirty(struct kunit *test)
825 const struct regmap_test_param *param = test->param_value;
827 struct regmap_config config;
828 struct regmap_ram_data *data;
829 unsigned int val[BLOCK_TEST_SIZE];
832 config = test_regmap_config;
834 map = gen_regmap(test, &config, &data);
835 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
839 get_random_bytes(&val, sizeof(val));
841 /* Put some data into the cache */
842 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
844 for (i = 0; i < BLOCK_TEST_SIZE; i++)
845 data->written[param->from_reg + i] = false;
847 /* Trash the data on the device itself then resync */
848 regcache_mark_dirty(map);
849 memset(data->vals, 0, sizeof(val));
850 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
852 /* Did we just write the correct data out? */
853 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val));
854 for (i = 0; i < BLOCK_TEST_SIZE; i++)
855 KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]);
858 static void cache_sync_after_cache_only(struct kunit *test)
860 const struct regmap_test_param *param = test->param_value;
862 struct regmap_config config;
863 struct regmap_ram_data *data;
864 unsigned int val[BLOCK_TEST_SIZE];
865 unsigned int val_mask;
868 config = test_regmap_config;
870 map = gen_regmap(test, &config, &data);
871 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
875 val_mask = GENMASK(config.val_bits - 1, 0);
876 get_random_bytes(&val, sizeof(val));
878 /* Put some data into the cache */
879 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
881 for (i = 0; i < BLOCK_TEST_SIZE; i++)
882 data->written[param->from_reg + i] = false;
884 /* Set cache-only and change the values */
885 regcache_cache_only(map, true);
886 for (i = 0; i < ARRAY_SIZE(val); ++i)
887 val[i] = ~val[i] & val_mask;
889 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, param->from_reg, val,
891 for (i = 0; i < BLOCK_TEST_SIZE; i++)
892 KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]);
894 KUNIT_EXPECT_MEMNEQ(test, &data->vals[param->from_reg], val, sizeof(val));
896 /* Exit cache-only and sync the cache without marking hardware registers dirty */
897 regcache_cache_only(map, false);
899 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
901 /* Did we just write the correct data out? */
902 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], val, sizeof(val));
903 for (i = 0; i < BLOCK_TEST_SIZE; i++)
904 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + i]);
907 static void cache_sync_defaults_marked_dirty(struct kunit *test)
909 const struct regmap_test_param *param = test->param_value;
911 struct regmap_config config;
912 struct regmap_ram_data *data;
916 config = test_regmap_config;
917 config.num_reg_defaults = BLOCK_TEST_SIZE;
919 map = gen_regmap(test, &config, &data);
920 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
924 get_random_bytes(&val, sizeof(val));
926 /* Change the value of one register */
927 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, val));
930 regcache_mark_dirty(map);
931 for (i = 0; i < BLOCK_TEST_SIZE; i++)
932 data->written[param->from_reg + i] = false;
933 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
935 /* Did we just sync the one register we touched? */
936 for (i = 0; i < BLOCK_TEST_SIZE; i++)
937 KUNIT_EXPECT_EQ(test, i == 2, data->written[param->from_reg + i]);
939 /* Rewrite registers back to their defaults */
940 for (i = 0; i < config.num_reg_defaults; ++i)
941 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, config.reg_defaults[i].reg,
942 config.reg_defaults[i].def));
945 * Resync after regcache_mark_dirty() should not write out registers
946 * that are at default value
948 for (i = 0; i < BLOCK_TEST_SIZE; i++)
949 data->written[param->from_reg + i] = false;
950 regcache_mark_dirty(map);
951 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
952 for (i = 0; i < BLOCK_TEST_SIZE; i++)
953 KUNIT_EXPECT_FALSE(test, data->written[param->from_reg + i]);
956 static void cache_sync_default_after_cache_only(struct kunit *test)
958 const struct regmap_test_param *param = test->param_value;
960 struct regmap_config config;
961 struct regmap_ram_data *data;
962 unsigned int orig_val;
965 config = test_regmap_config;
966 config.num_reg_defaults = BLOCK_TEST_SIZE;
968 map = gen_regmap(test, &config, &data);
969 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
973 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + 2, &orig_val));
975 /* Enter cache-only and change the value of one register */
976 regcache_cache_only(map, true);
977 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val + 1));
979 /* Exit cache-only and resync, should write out the changed register */
980 regcache_cache_only(map, false);
981 for (i = 0; i < BLOCK_TEST_SIZE; i++)
982 data->written[param->from_reg + i] = false;
983 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
985 /* Was the register written out? */
986 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]);
987 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val + 1);
989 /* Enter cache-only and write register back to its default value */
990 regcache_cache_only(map, true);
991 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + 2, orig_val));
993 /* Resync should write out the new value */
994 regcache_cache_only(map, false);
995 for (i = 0; i < BLOCK_TEST_SIZE; i++)
996 data->written[param->from_reg + i] = false;
998 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
999 KUNIT_EXPECT_TRUE(test, data->written[param->from_reg + 2]);
1000 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + 2], orig_val);
1003 static void cache_sync_readonly(struct kunit *test)
1005 const struct regmap_test_param *param = test->param_value;
1007 struct regmap_config config;
1008 struct regmap_ram_data *data;
1012 config = test_regmap_config;
1013 config.writeable_reg = reg_5_false;
1015 map = gen_regmap(test, &config, &data);
1016 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1020 /* Read all registers to fill the cache */
1021 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1022 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
1024 /* Change the value of all registers, readonly should fail */
1025 get_random_bytes(&val, sizeof(val));
1026 regcache_cache_only(map, true);
1027 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1028 KUNIT_EXPECT_EQ(test, i != 5, regmap_write(map, param->from_reg + i, val) == 0);
1029 regcache_cache_only(map, false);
1032 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1033 data->written[param->from_reg + i] = false;
1034 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1036 /* Did that match what we see on the device? */
1037 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1038 KUNIT_EXPECT_EQ(test, i != 5, data->written[param->from_reg + i]);
1041 static void cache_sync_patch(struct kunit *test)
1043 const struct regmap_test_param *param = test->param_value;
1045 struct regmap_config config;
1046 struct regmap_ram_data *data;
1047 struct reg_sequence patch[2];
1048 unsigned int rval[BLOCK_TEST_SIZE], val;
1051 /* We need defaults so readback works */
1052 config = test_regmap_config;
1053 config.num_reg_defaults = BLOCK_TEST_SIZE;
1055 map = gen_regmap(test, &config, &data);
1056 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1060 /* Stash the original values */
1061 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1064 /* Patch a couple of values */
1065 patch[0].reg = param->from_reg + 2;
1066 patch[0].def = rval[2] + 1;
1067 patch[0].delay_us = 0;
1068 patch[1].reg = param->from_reg + 5;
1069 patch[1].def = rval[5] + 1;
1070 patch[1].delay_us = 0;
1071 KUNIT_EXPECT_EQ(test, 0, regmap_register_patch(map, patch,
1072 ARRAY_SIZE(patch)));
1074 /* Sync the cache */
1075 regcache_mark_dirty(map);
1076 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1077 data->written[param->from_reg + i] = false;
1078 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1080 /* The patch should be on the device but not in the cache */
1081 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
1082 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
1083 KUNIT_EXPECT_EQ(test, val, rval[i]);
1088 KUNIT_EXPECT_EQ(test, true, data->written[param->from_reg + i]);
1089 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i] + 1);
1092 KUNIT_EXPECT_EQ(test, false, data->written[param->from_reg + i]);
1093 KUNIT_EXPECT_EQ(test, data->vals[param->from_reg + i], rval[i]);
1099 static void cache_drop(struct kunit *test)
1101 const struct regmap_test_param *param = test->param_value;
1103 struct regmap_config config;
1104 struct regmap_ram_data *data;
1105 unsigned int rval[BLOCK_TEST_SIZE];
1108 config = test_regmap_config;
1109 config.num_reg_defaults = BLOCK_TEST_SIZE;
1111 map = gen_regmap(test, &config, &data);
1112 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1116 /* Ensure the data is read from the cache */
1117 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1118 data->read[param->from_reg + i] = false;
1119 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1121 for (i = 0; i < BLOCK_TEST_SIZE; i++) {
1122 KUNIT_EXPECT_FALSE(test, data->read[param->from_reg + i]);
1123 data->read[param->from_reg + i] = false;
1125 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1127 /* Drop some registers */
1128 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, param->from_reg + 3,
1129 param->from_reg + 5));
1131 /* Reread and check only the dropped registers hit the device. */
1132 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1134 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1135 KUNIT_EXPECT_EQ(test, data->read[param->from_reg + i], i >= 3 && i <= 5);
1136 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1139 static void cache_drop_with_non_contiguous_ranges(struct kunit *test)
1141 const struct regmap_test_param *param = test->param_value;
1143 struct regmap_config config;
1144 struct regmap_ram_data *data;
1145 unsigned int val[4][BLOCK_TEST_SIZE];
1147 const int num_ranges = ARRAY_SIZE(val) * 2;
1150 static_assert(ARRAY_SIZE(val) == 4);
1152 config = test_regmap_config;
1153 config.max_register = param->from_reg + (num_ranges * BLOCK_TEST_SIZE);
1155 map = gen_regmap(test, &config, &data);
1156 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1160 for (i = 0; i < config.max_register + 1; i++)
1161 data->written[i] = false;
1163 /* Create non-contiguous cache blocks by writing every other range */
1164 get_random_bytes(&val, sizeof(val));
1165 for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) {
1166 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
1167 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_write(map, reg,
1170 KUNIT_EXPECT_MEMEQ(test, &data->vals[reg],
1171 &val[rangeidx / 2], sizeof(val[rangeidx / 2]));
1174 /* Check that odd ranges weren't written */
1175 for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) {
1176 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
1177 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1178 KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
1182 reg = param->from_reg + (2 * BLOCK_TEST_SIZE);
1183 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg, reg + BLOCK_TEST_SIZE - 1));
1185 /* Drop part of range 4 */
1186 reg = param->from_reg + (4 * BLOCK_TEST_SIZE);
1187 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, reg + 3, reg + 5));
1189 /* Mark dirty and reset mock registers to 0 */
1190 regcache_mark_dirty(map);
1191 for (i = 0; i < config.max_register + 1; i++) {
1193 data->written[i] = false;
1196 /* The registers that were dropped from range 4 should now remain at 0 */
1201 /* Sync and check that the expected register ranges were written */
1202 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1204 /* Check that odd ranges weren't written */
1205 for (rangeidx = 1; rangeidx < num_ranges; rangeidx += 2) {
1206 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
1207 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1208 KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
1211 /* Check that even ranges (except 2 and 4) were written */
1212 for (rangeidx = 0; rangeidx < num_ranges; rangeidx += 2) {
1213 if ((rangeidx == 2) || (rangeidx == 4))
1216 reg = param->from_reg + (rangeidx * BLOCK_TEST_SIZE);
1217 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1218 KUNIT_EXPECT_TRUE(test, data->written[reg + i]);
1220 KUNIT_EXPECT_MEMEQ(test, &data->vals[reg],
1221 &val[rangeidx / 2], sizeof(val[rangeidx / 2]));
1224 /* Check that range 2 wasn't written */
1225 reg = param->from_reg + (2 * BLOCK_TEST_SIZE);
1226 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1227 KUNIT_EXPECT_FALSE(test, data->written[reg + i]);
1229 /* Check that range 4 was partially written */
1230 reg = param->from_reg + (4 * BLOCK_TEST_SIZE);
1231 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1232 KUNIT_EXPECT_EQ(test, data->written[reg + i], i < 3 || i > 5);
1234 KUNIT_EXPECT_MEMEQ(test, &data->vals[reg], &val[4 / 2], sizeof(val[4 / 2]));
1236 /* Nothing before param->from_reg should have been written */
1237 for (i = 0; i < param->from_reg; i++)
1238 KUNIT_EXPECT_FALSE(test, data->written[i]);
1241 static void cache_drop_all_and_sync_marked_dirty(struct kunit *test)
1243 const struct regmap_test_param *param = test->param_value;
1245 struct regmap_config config;
1246 struct regmap_ram_data *data;
1247 unsigned int rval[BLOCK_TEST_SIZE];
1250 config = test_regmap_config;
1251 config.num_reg_defaults = BLOCK_TEST_SIZE;
1253 map = gen_regmap(test, &config, &data);
1254 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1258 /* Ensure the data is read from the cache */
1259 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1260 data->read[param->from_reg + i] = false;
1261 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1263 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1265 /* Change all values in cache from defaults */
1266 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1267 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
1269 /* Drop all registers */
1270 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
1272 /* Mark dirty and cache sync should not write anything. */
1273 regcache_mark_dirty(map);
1274 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1275 data->written[param->from_reg + i] = false;
1277 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1278 for (i = 0; i <= config.max_register; i++)
1279 KUNIT_EXPECT_FALSE(test, data->written[i]);
1282 static void cache_drop_all_and_sync_no_defaults(struct kunit *test)
1284 const struct regmap_test_param *param = test->param_value;
1286 struct regmap_config config;
1287 struct regmap_ram_data *data;
1288 unsigned int rval[BLOCK_TEST_SIZE];
1291 config = test_regmap_config;
1293 map = gen_regmap(test, &config, &data);
1294 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1298 /* Ensure the data is read from the cache */
1299 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1300 data->read[param->from_reg + i] = false;
1301 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1303 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1305 /* Change all values in cache */
1306 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1307 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
1309 /* Drop all registers */
1310 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
1313 * Sync cache without marking it dirty. All registers were dropped
1314 * so the cache should not have any entries to write out.
1316 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1317 data->written[param->from_reg + i] = false;
1319 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1320 for (i = 0; i <= config.max_register; i++)
1321 KUNIT_EXPECT_FALSE(test, data->written[i]);
1324 static void cache_drop_all_and_sync_has_defaults(struct kunit *test)
1326 const struct regmap_test_param *param = test->param_value;
1328 struct regmap_config config;
1329 struct regmap_ram_data *data;
1330 unsigned int rval[BLOCK_TEST_SIZE];
1333 config = test_regmap_config;
1334 config.num_reg_defaults = BLOCK_TEST_SIZE;
1336 map = gen_regmap(test, &config, &data);
1337 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1341 /* Ensure the data is read from the cache */
1342 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1343 data->read[param->from_reg + i] = false;
1344 KUNIT_EXPECT_EQ(test, 0, regmap_bulk_read(map, param->from_reg, rval,
1346 KUNIT_EXPECT_MEMEQ(test, &data->vals[param->from_reg], rval, sizeof(rval));
1348 /* Change all values in cache from defaults */
1349 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1350 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, param->from_reg + i, rval[i] + 1));
1352 /* Drop all registers */
1353 KUNIT_EXPECT_EQ(test, 0, regcache_drop_region(map, 0, config.max_register));
1356 * Sync cache without marking it dirty. All registers were dropped
1357 * so the cache should not have any entries to write out.
1359 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1360 data->written[param->from_reg + i] = false;
1362 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1363 for (i = 0; i <= config.max_register; i++)
1364 KUNIT_EXPECT_FALSE(test, data->written[i]);
1367 static void cache_present(struct kunit *test)
1369 const struct regmap_test_param *param = test->param_value;
1371 struct regmap_config config;
1372 struct regmap_ram_data *data;
1376 config = test_regmap_config;
1378 map = gen_regmap(test, &config, &data);
1379 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1383 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1384 data->read[param->from_reg + i] = false;
1386 /* No defaults so no registers cached. */
1387 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1388 KUNIT_ASSERT_FALSE(test, regcache_reg_cached(map, param->from_reg + i));
1390 /* We didn't trigger any reads */
1391 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1392 KUNIT_ASSERT_FALSE(test, data->read[param->from_reg + i]);
1394 /* Fill the cache */
1395 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1396 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, param->from_reg + i, &val));
1398 /* Now everything should be cached */
1399 for (i = 0; i < BLOCK_TEST_SIZE; i++)
1400 KUNIT_ASSERT_TRUE(test, regcache_reg_cached(map, param->from_reg + i));
1403 /* Check that caching the window register works with sync */
1404 static void cache_range_window_reg(struct kunit *test)
1407 struct regmap_config config;
1408 struct regmap_ram_data *data;
1412 config = test_regmap_config;
1413 config.volatile_reg = test_range_window_volatile;
1414 config.ranges = &test_range;
1415 config.num_ranges = 1;
1416 config.max_register = test_range.range_max;
1418 map = gen_regmap(test, &config, &data);
1419 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1423 /* Write new values to the entire range */
1424 for (i = test_range.range_min; i <= test_range.range_max; i++)
1425 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, i, 0));
1427 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1428 KUNIT_ASSERT_EQ(test, val, 2);
1430 /* Write to the first register in the range to reset the page */
1431 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
1432 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1433 KUNIT_ASSERT_EQ(test, val, 0);
1435 /* Trigger a cache sync */
1436 regcache_mark_dirty(map);
1437 KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
1439 /* Write to the first register again, the page should be reset */
1440 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
1441 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1442 KUNIT_ASSERT_EQ(test, val, 0);
1444 /* Trigger another cache sync */
1445 regcache_mark_dirty(map);
1446 KUNIT_ASSERT_EQ(test, 0, regcache_sync(map));
1448 /* Write to the last register again, the page should be reset */
1449 KUNIT_ASSERT_EQ(test, 0, regmap_write(map, test_range.range_max, 0));
1450 val = data->vals[test_range.selector_reg] & test_range.selector_mask;
1451 KUNIT_ASSERT_EQ(test, val, 2);
1454 static const struct regmap_test_param raw_types_list[] = {
1455 { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_LITTLE },
1456 { .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_BIG },
1457 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE },
1458 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG },
1459 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
1460 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
1461 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE },
1462 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG },
1465 KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, param_to_desc);
1467 static const struct regmap_test_param raw_cache_types_list[] = {
1468 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE },
1469 { .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG },
1470 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
1471 { .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
1472 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE },
1473 { .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_BIG },
1476 KUNIT_ARRAY_PARAM(raw_test_cache_types, raw_cache_types_list, param_to_desc);
1478 static const struct regmap_config raw_regmap_config = {
1479 .max_register = BLOCK_TEST_SIZE,
1481 .reg_format_endian = REGMAP_ENDIAN_LITTLE,
1486 static struct regmap *gen_raw_regmap(struct kunit *test,
1487 struct regmap_config *config,
1488 struct regmap_ram_data **data)
1490 struct regmap_test_priv *priv = test->priv;
1491 const struct regmap_test_param *param = test->param_value;
1494 size_t size = (config->max_register + 1) * config->reg_bits / 8;
1496 struct reg_default *defaults;
1498 config->cache_type = param->cache;
1499 config->val_format_endian = param->val_endian;
1500 config->disable_locking = config->cache_type == REGCACHE_RBTREE ||
1501 config->cache_type == REGCACHE_MAPLE;
1503 buf = kmalloc(size, GFP_KERNEL);
1505 return ERR_PTR(-ENOMEM);
1507 get_random_bytes(buf, size);
1509 *data = kzalloc(sizeof(**data), GFP_KERNEL);
1511 return ERR_PTR(-ENOMEM);
1512 (*data)->vals = (void *)buf;
1514 config->num_reg_defaults = config->max_register + 1;
1515 defaults = kcalloc(config->num_reg_defaults,
1516 sizeof(struct reg_default),
1519 return ERR_PTR(-ENOMEM);
1520 config->reg_defaults = defaults;
1522 for (i = 0; i < config->num_reg_defaults; i++) {
1523 defaults[i].reg = i;
1524 switch (param->val_endian) {
1525 case REGMAP_ENDIAN_LITTLE:
1526 defaults[i].def = le16_to_cpu(buf[i]);
1528 case REGMAP_ENDIAN_BIG:
1529 defaults[i].def = be16_to_cpu(buf[i]);
1532 return ERR_PTR(-EINVAL);
1537 * We use the defaults in the tests but they don't make sense
1538 * to the core if there's no cache.
1540 if (config->cache_type == REGCACHE_NONE)
1541 config->num_reg_defaults = 0;
1543 ret = regmap_init_raw_ram(priv->dev, config, *data);
1548 kunit_add_action(test, regmap_exit_action, ret);
1554 static void raw_read_defaults_single(struct kunit *test)
1557 struct regmap_config config;
1558 struct regmap_ram_data *data;
1562 config = raw_regmap_config;
1564 map = gen_raw_regmap(test, &config, &data);
1565 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1569 /* Check that we can read the defaults via the API */
1570 for (i = 0; i < config.max_register + 1; i++) {
1571 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1572 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1576 static void raw_read_defaults(struct kunit *test)
1579 struct regmap_config config;
1580 struct regmap_ram_data *data;
1586 config = raw_regmap_config;
1588 map = gen_raw_regmap(test, &config, &data);
1589 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1593 val_len = sizeof(*rval) * (config.max_register + 1);
1594 rval = kunit_kmalloc(test, val_len, GFP_KERNEL);
1595 KUNIT_ASSERT_TRUE(test, rval != NULL);
1599 /* Check that we can read the defaults via the API */
1600 KUNIT_EXPECT_EQ(test, 0, regmap_raw_read(map, 0, rval, val_len));
1601 for (i = 0; i < config.max_register + 1; i++) {
1602 def = config.reg_defaults[i].def;
1603 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1604 KUNIT_EXPECT_EQ(test, def, be16_to_cpu((__force __be16)rval[i]));
1606 KUNIT_EXPECT_EQ(test, def, le16_to_cpu((__force __le16)rval[i]));
1611 static void raw_write_read_single(struct kunit *test)
1614 struct regmap_config config;
1615 struct regmap_ram_data *data;
1619 config = raw_regmap_config;
1621 map = gen_raw_regmap(test, &config, &data);
1622 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1626 get_random_bytes(&val, sizeof(val));
1628 /* If we write a value to a register we can read it back */
1629 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 0, val));
1630 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &rval));
1631 KUNIT_EXPECT_EQ(test, val, rval);
1634 static void raw_write(struct kunit *test)
1637 struct regmap_config config;
1638 struct regmap_ram_data *data;
1644 config = raw_regmap_config;
1646 map = gen_raw_regmap(test, &config, &data);
1647 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1651 hw_buf = (u16 *)data->vals;
1653 get_random_bytes(&val, sizeof(val));
1655 /* Do a raw write */
1656 KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val, sizeof(val)));
1658 /* We should read back the new values, and defaults for the rest */
1659 for (i = 0; i < config.max_register + 1; i++) {
1660 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1665 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1666 KUNIT_EXPECT_EQ(test, rval,
1667 be16_to_cpu((__force __be16)val[i % 2]));
1669 KUNIT_EXPECT_EQ(test, rval,
1670 le16_to_cpu((__force __le16)val[i % 2]));
1674 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1679 /* The values should appear in the "hardware" */
1680 KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], val, sizeof(val));
1683 static bool reg_zero(struct device *dev, unsigned int reg)
1688 static bool ram_reg_zero(struct regmap_ram_data *data, unsigned int reg)
1693 static void raw_noinc_write(struct kunit *test)
1696 struct regmap_config config;
1697 struct regmap_ram_data *data;
1699 u16 val_test, val_last;
1700 u16 val_array[BLOCK_TEST_SIZE];
1702 config = raw_regmap_config;
1703 config.volatile_reg = reg_zero;
1704 config.writeable_noinc_reg = reg_zero;
1705 config.readable_noinc_reg = reg_zero;
1707 map = gen_raw_regmap(test, &config, &data);
1708 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1712 data->noinc_reg = ram_reg_zero;
1714 get_random_bytes(&val_array, sizeof(val_array));
1716 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1717 val_test = be16_to_cpu(val_array[1]) + 100;
1718 val_last = be16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]);
1720 val_test = le16_to_cpu(val_array[1]) + 100;
1721 val_last = le16_to_cpu(val_array[BLOCK_TEST_SIZE - 1]);
1724 /* Put some data into the register following the noinc register */
1725 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 1, val_test));
1727 /* Write some data to the noinc register */
1728 KUNIT_EXPECT_EQ(test, 0, regmap_noinc_write(map, 0, val_array,
1729 sizeof(val_array)));
1731 /* We should read back the last value written */
1732 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 0, &val));
1733 KUNIT_ASSERT_EQ(test, val_last, val);
1735 /* Make sure we didn't touch the register after the noinc register */
1736 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, 1, &val));
1737 KUNIT_ASSERT_EQ(test, val_test, val);
1740 static void raw_sync(struct kunit *test)
1743 struct regmap_config config;
1744 struct regmap_ram_data *data;
1750 config = raw_regmap_config;
1752 map = gen_raw_regmap(test, &config, &data);
1753 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1757 hw_buf = (u16 *)data->vals;
1759 get_changed_bytes(&hw_buf[2], &val[0], sizeof(val));
1761 /* Do a regular write and a raw write in cache only mode */
1762 regcache_cache_only(map, true);
1763 KUNIT_EXPECT_EQ(test, 0, regmap_raw_write(map, 2, val,
1765 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, 4, val[2]));
1767 /* We should read back the new values, and defaults for the rest */
1768 for (i = 0; i < config.max_register + 1; i++) {
1769 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, i, &rval));
1774 if (config.val_format_endian == REGMAP_ENDIAN_BIG) {
1775 KUNIT_EXPECT_EQ(test, rval,
1776 be16_to_cpu((__force __be16)val[i - 2]));
1778 KUNIT_EXPECT_EQ(test, rval,
1779 le16_to_cpu((__force __le16)val[i - 2]));
1783 KUNIT_EXPECT_EQ(test, rval, val[i - 2]);
1786 KUNIT_EXPECT_EQ(test, config.reg_defaults[i].def, rval);
1792 * The value written via _write() was translated by the core,
1793 * translate the original copy for comparison purposes.
1795 if (config.val_format_endian == REGMAP_ENDIAN_BIG)
1796 val[2] = cpu_to_be16(val[2]);
1798 val[2] = cpu_to_le16(val[2]);
1800 /* The values should not appear in the "hardware" */
1801 KUNIT_EXPECT_MEMNEQ(test, &hw_buf[2], &val[0], sizeof(val));
1803 for (i = 0; i < config.max_register + 1; i++)
1804 data->written[i] = false;
1807 regcache_cache_only(map, false);
1808 regcache_mark_dirty(map);
1809 KUNIT_EXPECT_EQ(test, 0, regcache_sync(map));
1811 /* The values should now appear in the "hardware" */
1812 KUNIT_EXPECT_MEMEQ(test, &hw_buf[2], &val[0], sizeof(val));
1815 static void raw_ranges(struct kunit *test)
1818 struct regmap_config config;
1819 struct regmap_ram_data *data;
1823 config = raw_regmap_config;
1824 config.volatile_reg = test_range_all_volatile;
1825 config.ranges = &test_range;
1826 config.num_ranges = 1;
1827 config.max_register = test_range.range_max;
1829 map = gen_raw_regmap(test, &config, &data);
1830 KUNIT_ASSERT_FALSE(test, IS_ERR(map));
1834 /* Reset the page to a non-zero value to trigger a change */
1835 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.selector_reg,
1836 test_range.range_max));
1838 /* Check we set the page and use the window for writes */
1839 data->written[test_range.selector_reg] = false;
1840 data->written[test_range.window_start] = false;
1841 KUNIT_EXPECT_EQ(test, 0, regmap_write(map, test_range.range_min, 0));
1842 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
1843 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
1845 data->written[test_range.selector_reg] = false;
1846 data->written[test_range.window_start] = false;
1847 KUNIT_EXPECT_EQ(test, 0, regmap_write(map,
1848 test_range.range_min +
1849 test_range.window_len,
1851 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
1852 KUNIT_EXPECT_TRUE(test, data->written[test_range.window_start]);
1854 /* Same for reads */
1855 data->written[test_range.selector_reg] = false;
1856 data->read[test_range.window_start] = false;
1857 KUNIT_EXPECT_EQ(test, 0, regmap_read(map, test_range.range_min, &val));
1858 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
1859 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
1861 data->written[test_range.selector_reg] = false;
1862 data->read[test_range.window_start] = false;
1863 KUNIT_EXPECT_EQ(test, 0, regmap_read(map,
1864 test_range.range_min +
1865 test_range.window_len,
1867 KUNIT_EXPECT_TRUE(test, data->written[test_range.selector_reg]);
1868 KUNIT_EXPECT_TRUE(test, data->read[test_range.window_start]);
1870 /* No physical access triggered in the virtual range */
1871 for (i = test_range.range_min; i < test_range.range_max; i++) {
1872 KUNIT_EXPECT_FALSE(test, data->read[i]);
1873 KUNIT_EXPECT_FALSE(test, data->written[i]);
1877 static struct kunit_case regmap_test_cases[] = {
1878 KUNIT_CASE_PARAM(basic_read_write, regcache_types_gen_params),
1879 KUNIT_CASE_PARAM(read_bypassed, real_cache_types_gen_params),
1880 KUNIT_CASE_PARAM(read_bypassed_volatile, real_cache_types_gen_params),
1881 KUNIT_CASE_PARAM(bulk_write, regcache_types_gen_params),
1882 KUNIT_CASE_PARAM(bulk_read, regcache_types_gen_params),
1883 KUNIT_CASE_PARAM(write_readonly, regcache_types_gen_params),
1884 KUNIT_CASE_PARAM(read_writeonly, regcache_types_gen_params),
1885 KUNIT_CASE_PARAM(reg_defaults, regcache_types_gen_params),
1886 KUNIT_CASE_PARAM(reg_defaults_read_dev, regcache_types_gen_params),
1887 KUNIT_CASE_PARAM(register_patch, regcache_types_gen_params),
1888 KUNIT_CASE_PARAM(stride, regcache_types_gen_params),
1889 KUNIT_CASE_PARAM(basic_ranges, regcache_types_gen_params),
1890 KUNIT_CASE_PARAM(stress_insert, regcache_types_gen_params),
1891 KUNIT_CASE_PARAM(cache_bypass, real_cache_types_gen_params),
1892 KUNIT_CASE_PARAM(cache_sync_marked_dirty, real_cache_types_gen_params),
1893 KUNIT_CASE_PARAM(cache_sync_after_cache_only, real_cache_types_gen_params),
1894 KUNIT_CASE_PARAM(cache_sync_defaults_marked_dirty, real_cache_types_gen_params),
1895 KUNIT_CASE_PARAM(cache_sync_default_after_cache_only, real_cache_types_gen_params),
1896 KUNIT_CASE_PARAM(cache_sync_readonly, real_cache_types_gen_params),
1897 KUNIT_CASE_PARAM(cache_sync_patch, real_cache_types_gen_params),
1898 KUNIT_CASE_PARAM(cache_drop, sparse_cache_types_gen_params),
1899 KUNIT_CASE_PARAM(cache_drop_with_non_contiguous_ranges, sparse_cache_types_gen_params),
1900 KUNIT_CASE_PARAM(cache_drop_all_and_sync_marked_dirty, sparse_cache_types_gen_params),
1901 KUNIT_CASE_PARAM(cache_drop_all_and_sync_no_defaults, sparse_cache_types_gen_params),
1902 KUNIT_CASE_PARAM(cache_drop_all_and_sync_has_defaults, sparse_cache_types_gen_params),
1903 KUNIT_CASE_PARAM(cache_present, sparse_cache_types_gen_params),
1904 KUNIT_CASE_PARAM(cache_range_window_reg, real_cache_types_only_gen_params),
1906 KUNIT_CASE_PARAM(raw_read_defaults_single, raw_test_types_gen_params),
1907 KUNIT_CASE_PARAM(raw_read_defaults, raw_test_types_gen_params),
1908 KUNIT_CASE_PARAM(raw_write_read_single, raw_test_types_gen_params),
1909 KUNIT_CASE_PARAM(raw_write, raw_test_types_gen_params),
1910 KUNIT_CASE_PARAM(raw_noinc_write, raw_test_types_gen_params),
1911 KUNIT_CASE_PARAM(raw_sync, raw_test_cache_types_gen_params),
1912 KUNIT_CASE_PARAM(raw_ranges, raw_test_cache_types_gen_params),
1916 static int regmap_test_init(struct kunit *test)
1918 struct regmap_test_priv *priv;
1921 priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
1927 dev = kunit_device_register(test, "regmap_test");
1929 return PTR_ERR(dev);
1931 priv->dev = get_device(dev);
1932 dev_set_drvdata(dev, test);
1937 static void regmap_test_exit(struct kunit *test)
1939 struct regmap_test_priv *priv = test->priv;
1941 /* Destroy the dummy struct device */
1942 if (priv && priv->dev)
1943 put_device(priv->dev);
1946 static struct kunit_suite regmap_test_suite = {
1948 .init = regmap_test_init,
1949 .exit = regmap_test_exit,
1950 .test_cases = regmap_test_cases,
1952 kunit_test_suite(regmap_test_suite);
1954 MODULE_LICENSE("GPL v2");