4 * usage: rcuq_test <readers> <duration>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 * Copyright (c) 2013 Mike D. Day, IBM Corporation.
27 #include "qemu/atomic.h"
29 #include "qemu/compiler.h"
30 #include "qemu/osdep.h"
31 #include "qemu/thread.h"
32 #include "qemu/rcu_queue.h"
38 static QemuMutex counts_mutex;
39 static long long n_reads = 0LL;
40 static long long n_updates = 0LL;
41 static long long n_reclaims = 0LL;
42 static long long n_nodes_removed = 0LL;
43 static long long n_nodes = 0LL;
44 static int g_test_in_charge = 0;
46 static int nthreadsrunning;
52 static volatile int goflag = GOFLAG_INIT;
54 #define RCU_READ_RUN 1000
55 #define RCU_UPDATE_RUN 10
56 #define NR_THREADS 100
59 static QemuThread threads[NR_THREADS];
60 static struct rcu_reader_data *data[NR_THREADS];
63 static int select_random_el(int max)
65 return (rand() % max);
69 static void create_thread(void *(*func)(void *))
71 if (n_threads >= NR_THREADS) {
72 fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS);
75 qemu_thread_create(&threads[n_threads], "test", func, &data[n_threads],
76 QEMU_THREAD_JOINABLE);
80 static void wait_all_threads(void)
84 for (i = 0; i < n_threads; i++) {
85 qemu_thread_join(&threads[i]);
92 QLIST_ENTRY(list_element) entry;
96 static void reclaim_list_el(struct rcu_head *prcu)
98 struct list_element *el = container_of(prcu, struct list_element, rcu);
100 /* Accessed only from call_rcu thread. */
104 static QLIST_HEAD(q_list_head, list_element) Q_list_head;
106 static void *rcu_q_reader(void *arg)
108 long long n_reads_local = 0;
109 struct list_element *el;
111 rcu_register_thread();
113 *(struct rcu_reader_data **)arg = &rcu_reader;
114 atomic_inc(&nthreadsrunning);
115 while (goflag == GOFLAG_INIT) {
119 while (goflag == GOFLAG_RUN) {
121 QLIST_FOREACH_RCU(el, &Q_list_head, entry) {
123 if (goflag == GOFLAG_STOP) {
131 qemu_mutex_lock(&counts_mutex);
132 n_reads += n_reads_local;
133 qemu_mutex_unlock(&counts_mutex);
135 rcu_unregister_thread();
140 static void *rcu_q_updater(void *arg)
143 long long n_nodes_local = 0;
144 long long n_updates_local = 0;
145 long long n_removed_local = 0;
146 struct list_element *el, *prev_el;
148 *(struct rcu_reader_data **)arg = &rcu_reader;
149 atomic_inc(&nthreadsrunning);
150 while (goflag == GOFLAG_INIT) {
154 while (goflag == GOFLAG_RUN) {
155 target_el = select_random_el(RCU_Q_LEN);
157 /* FOREACH_RCU could work here but let's use both macros */
158 QLIST_FOREACH_SAFE_RCU(prev_el, &Q_list_head, entry, el) {
160 if (target_el == j) {
161 QLIST_REMOVE_RCU(prev_el, entry);
162 /* may be more than one updater in the future */
163 call_rcu1(&prev_el->rcu, reclaim_list_el);
168 if (goflag == GOFLAG_STOP) {
171 target_el = select_random_el(RCU_Q_LEN);
173 QLIST_FOREACH_RCU(el, &Q_list_head, entry) {
175 if (target_el == j) {
176 prev_el = g_new(struct list_element, 1);
177 n_nodes += n_nodes_local;
178 QLIST_INSERT_BEFORE_RCU(el, prev_el, entry);
183 n_updates_local += 2;
187 qemu_mutex_lock(&counts_mutex);
188 n_nodes += n_nodes_local;
189 n_updates += n_updates_local;
190 n_nodes_removed += n_removed_local;
191 qemu_mutex_unlock(&counts_mutex);
195 static void rcu_qtest_init(void)
197 struct list_element *new_el;
201 for (i = 0; i < RCU_Q_LEN; i++) {
202 new_el = g_new(struct list_element, 1);
203 QLIST_INSERT_HEAD_RCU(&Q_list_head, new_el, entry);
205 qemu_mutex_lock(&counts_mutex);
206 n_nodes += RCU_Q_LEN;
207 qemu_mutex_unlock(&counts_mutex);
210 static void rcu_qtest_run(int duration, int nreaders)
212 int nthreads = nreaders + 1;
213 while (atomic_read(&nthreadsrunning) < nthreads) {
219 goflag = GOFLAG_STOP;
224 static void rcu_qtest(const char *test, int duration, int nreaders)
227 long long n_removed_local = 0;
229 struct list_element *el, *prev_el;
232 for (i = 0; i < nreaders; i++) {
233 create_thread(rcu_q_reader);
235 create_thread(rcu_q_updater);
236 rcu_qtest_run(duration, nreaders);
238 QLIST_FOREACH_SAFE_RCU(prev_el, &Q_list_head, entry, el) {
239 QLIST_REMOVE_RCU(prev_el, entry);
240 call_rcu1(&prev_el->rcu, reclaim_list_el);
243 qemu_mutex_lock(&counts_mutex);
244 n_nodes_removed += n_removed_local;
245 qemu_mutex_unlock(&counts_mutex);
247 while (n_nodes_removed > n_reclaims) {
251 if (g_test_in_charge) {
252 g_assert_cmpint(n_nodes_removed, ==, n_reclaims);
254 printf("%s: %d readers; 1 updater; nodes read: " \
255 "%lld, nodes removed: %lld; nodes reclaimed: %lld\n",
256 test, nthreadsrunning - 1, n_reads, n_nodes_removed, n_reclaims);
261 static void usage(int argc, char *argv[])
263 fprintf(stderr, "Usage: %s duration nreaders\n", argv[0]);
267 static int gtest_seconds;
269 static void gtest_rcuq_one(void)
271 rcu_qtest("rcuqtest", gtest_seconds / 4, 1);
274 static void gtest_rcuq_few(void)
276 rcu_qtest("rcuqtest", gtest_seconds / 4, 5);
279 static void gtest_rcuq_many(void)
281 rcu_qtest("rcuqtest", gtest_seconds / 2, 20);
285 int main(int argc, char *argv[])
287 int duration = 0, readers = 0;
289 qemu_mutex_init(&counts_mutex);
291 if (argv[1][0] == '-') {
292 g_test_init(&argc, &argv, NULL);
293 if (g_test_quick()) {
298 g_test_add_func("/rcu/qlist/single-threaded", gtest_rcuq_one);
299 g_test_add_func("/rcu/qlist/short-few", gtest_rcuq_few);
300 g_test_add_func("/rcu/qlist/long-many", gtest_rcuq_many);
301 g_test_in_charge = 1;
304 duration = strtoul(argv[1], NULL, 0);
307 readers = strtoul(argv[2], NULL, 0);
309 if (duration && readers) {
310 rcu_qtest(argv[0], duration, readers);