1 // SPDX-License-Identifier: GPL-2.0-or-later
7 * livepatch-shadow-mod.c - Shadow variables, buggy module demo
12 * As a demonstration of livepatch shadow variable API, this module
13 * introduces memory leak behavior that livepatch modules
14 * livepatch-shadow-fix1.ko and livepatch-shadow-fix2.ko correct and
17 * WARNING - even though the livepatch-shadow-fix modules patch the
18 * memory leak, please load these modules at your own risk -- some
19 * amount of memory may leaked before the bug is patched.
25 * Step 1 - Load the buggy demonstration module:
27 * insmod samples/livepatch/livepatch-shadow-mod.ko
29 * Watch dmesg output for a few moments to see new dummy being allocated
30 * and a periodic cleanup check. (Note: a small amount of memory is
34 * Step 2 - Load livepatch fix1:
36 * insmod samples/livepatch/livepatch-shadow-fix1.ko
38 * Continue watching dmesg and note that now livepatch_fix1_dummy_free()
39 * and livepatch_fix1_dummy_alloc() are logging messages about leaked
40 * memory and eventually leaks prevented.
43 * Step 3 - Load livepatch fix2 (on top of fix1):
45 * insmod samples/livepatch/livepatch-shadow-fix2.ko
47 * This module extends functionality through shadow variables, as a new
48 * "check" counter is added to the dummy structure. Periodic dmesg
49 * messages will log these as dummies are cleaned up.
54 * Unwind the demonstration by disabling the livepatch fix modules, then
55 * removing them and the demo module:
57 * echo 0 > /sys/kernel/livepatch/livepatch_shadow_fix2/enabled
58 * echo 0 > /sys/kernel/livepatch/livepatch_shadow_fix1/enabled
59 * rmmod livepatch-shadow-fix2
60 * rmmod livepatch-shadow-fix1
61 * rmmod livepatch-shadow-mod
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/sched.h>
68 #include <linux/slab.h>
69 #include <linux/stat.h>
70 #include <linux/workqueue.h>
72 MODULE_LICENSE("GPL");
74 MODULE_DESCRIPTION("Buggy module for shadow variable demo");
76 /* Allocate new dummies every second */
77 #define ALLOC_PERIOD 1
78 /* Check for expired dummies after a few new ones have been allocated */
79 #define CLEANUP_PERIOD (3 * ALLOC_PERIOD)
80 /* Dummies expire after a few cleanup instances */
81 #define EXPIRE_PERIOD (4 * CLEANUP_PERIOD)
84 * Keep a list of all the dummies so we can clean up any residual ones
87 static LIST_HEAD(dummy_list);
88 static DEFINE_MUTEX(dummy_list_mutex);
91 struct list_head list;
92 unsigned long jiffies_expire;
95 static __used noinline struct dummy *dummy_alloc(void)
100 d = kzalloc(sizeof(*d), GFP_KERNEL);
104 d->jiffies_expire = jiffies + secs_to_jiffies(EXPIRE_PERIOD);
106 /* Oops, forgot to save leak! */
107 leak = kzalloc(sizeof(*leak), GFP_KERNEL);
113 pr_info("%s: dummy @ %p, expires @ %lx\n",
114 __func__, d, d->jiffies_expire);
119 static __used noinline void dummy_free(struct dummy *d)
121 pr_info("%s: dummy @ %p, expired = %lx\n",
122 __func__, d, d->jiffies_expire);
127 static __used noinline bool dummy_check(struct dummy *d,
128 unsigned long jiffies)
130 return time_after(jiffies, d->jiffies_expire);
134 * alloc_work_func: allocates new dummy structures, allocates additional
135 * memory, aptly named "leak", but doesn't keep
136 * permanent record of it.
139 static void alloc_work_func(struct work_struct *work);
140 static DECLARE_DELAYED_WORK(alloc_dwork, alloc_work_func);
142 static void alloc_work_func(struct work_struct *work)
150 mutex_lock(&dummy_list_mutex);
151 list_add(&d->list, &dummy_list);
152 mutex_unlock(&dummy_list_mutex);
154 schedule_delayed_work(&alloc_dwork, secs_to_jiffies(ALLOC_PERIOD));
158 * cleanup_work_func: frees dummy structures. Without knownledge of
159 * "leak", it leaks the additional memory that
160 * alloc_work_func created.
163 static void cleanup_work_func(struct work_struct *work);
164 static DECLARE_DELAYED_WORK(cleanup_dwork, cleanup_work_func);
166 static void cleanup_work_func(struct work_struct *work)
168 struct dummy *d, *tmp;
172 pr_info("%s: jiffies = %lx\n", __func__, j);
174 mutex_lock(&dummy_list_mutex);
175 list_for_each_entry_safe(d, tmp, &dummy_list, list) {
177 /* Kick out and free any expired dummies */
178 if (dummy_check(d, j)) {
183 mutex_unlock(&dummy_list_mutex);
185 schedule_delayed_work(&cleanup_dwork, secs_to_jiffies(CLEANUP_PERIOD));
188 static int livepatch_shadow_mod_init(void)
190 schedule_delayed_work(&alloc_dwork, secs_to_jiffies(ALLOC_PERIOD));
191 schedule_delayed_work(&cleanup_dwork, secs_to_jiffies(CLEANUP_PERIOD));
196 static void livepatch_shadow_mod_exit(void)
198 struct dummy *d, *tmp;
200 /* Wait for any dummies at work */
201 cancel_delayed_work_sync(&alloc_dwork);
202 cancel_delayed_work_sync(&cleanup_dwork);
204 /* Cleanup residual dummies */
205 list_for_each_entry_safe(d, tmp, &dummy_list, list) {
211 module_init(livepatch_shadow_mod_init);
212 module_exit(livepatch_shadow_mod_exit);