1 /* Copyright 2009 - 2016 Freescale Semiconductor, Inc.
3 * Redistribution and use in source and binary forms, with or without
4 * modification, are permitted provided that the following conditions are met:
5 * * Redistributions of source code must retain the above copyright
6 * notice, this list of conditions and the following disclaimer.
7 * * Redistributions in binary form must reproduce the above copyright
8 * notice, this list of conditions and the following disclaimer in the
9 * documentation and/or other materials provided with the distribution.
10 * * Neither the name of Freescale Semiconductor nor the
11 * names of its contributors may be used to endorse or promote products
12 * derived from this software without specific prior written permission.
14 * ALTERNATIVELY, this software may be distributed under the terms of the
15 * GNU General Public License ("GPL") as published by the Free Software
16 * Foundation, either version 2 of that License or (at your option) any
19 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
20 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
23 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
26 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "qman_test.h"
33 #include <linux/dma-mapping.h>
34 #include <linux/delay.h>
39 * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates
40 * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The
41 * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will
42 * shuttle a "hot potato" frame around them such that every forwarding action
43 * moves it from one cpu to another. (The use of more than one handler per cpu
44 * is to allow enough handlers/FQs to truly test the significance of caching -
45 * ie. when cache-expiries are occurring.)
47 * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the
48 * first and last words of the frame data will undergo a transformation step on
49 * each forwarding action. To achieve this, each handler will be assigned a
50 * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is
51 * received by a handler, the mixer of the expected sender is XOR'd into all
52 * words of the entire frame, which is then validated against the original
53 * values. Then, before forwarding, the entire frame is XOR'd with the mixer of
54 * the current handler. Apart from validating that the frame is taking the
55 * expected path, this also provides some quasi-realistic overheads to each
56 * forwarding action - dereferencing *all* the frame data, computation, and
57 * conditional branching. There is a "special" handler designated to act as the
58 * instigator of the test by creating an enqueuing the "hot potato" frame, and
59 * to determine when the test has completed by counting HP_LOOPS iterations.
63 * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them
64 * into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU
65 * handlers and link-list them (but do no other handler setup).
67 * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
68 * hp_cpu's 'iterator' to point to its first handler. With each loop,
69 * allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler
70 * and advance the iterator for the next loop. This includes a final fixup,
71 * which connects the last handler to the first (and which is why phase 2
72 * and 3 are separate).
74 * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
75 * hp_cpu's 'iterator' to point to its first handler. With each loop,
76 * initialise FQ objects and advance the iterator for the next loop.
77 * Moreover, do this initialisation on the cpu it applies to so that Rx FQ
78 * initialisation targets the correct cpu.
82 * helper to run something on all cpus (can't use on_each_cpu(), as that invokes
83 * the fn from irq context, which is too restrictive).
89 static int bstrap_fn(void *bs)
91 struct bstrap *bstrap = bs;
94 atomic_inc(&bstrap->started);
98 while (!kthread_should_stop())
102 static int on_all_cpus(int (*fn)(void))
106 for_each_cpu(cpu, cpu_online_mask) {
107 struct bstrap bstrap = {
109 .started = ATOMIC_INIT(0)
111 struct task_struct *k = kthread_run_on_cpu(bstrap_fn, &bstrap,
118 * If we call kthread_stop() before the "wake up" has had an
119 * effect, then the thread may exit with -EINTR without ever
120 * running the function. So poll until it's started before
121 * requesting it to stop.
123 while (!atomic_read(&bstrap.started))
125 ret = kthread_stop(k);
134 /* The following data is stashed when 'rx' is dequeued; */
136 /* The Rx FQ, dequeues of which will stash the entire hp_handler */
138 /* The Tx FQ we should forward to */
140 /* The value we XOR post-dequeue, prior to validating */
142 /* The value we XOR pre-enqueue, after validating */
144 /* what the hotpotato address should be on dequeue */
148 /* The following data isn't (necessarily) stashed on dequeue; */
150 u32 fqid_rx, fqid_tx;
151 /* list node for linking us into 'hp_cpu' */
152 struct list_head node;
153 /* Just to check ... */
154 unsigned int processor_id;
155 } ____cacheline_aligned;
158 /* identify the cpu we run on; */
159 unsigned int processor_id;
160 /* root node for the per-cpu list of handlers */
161 struct list_head handlers;
162 /* list node for linking us into 'hp_cpu_list' */
163 struct list_head node;
165 * when repeatedly scanning 'hp_list', each time linking the n'th
166 * handlers together, this is used as per-cpu iterator state
168 struct hp_handler *iterator;
171 /* Each cpu has one of these */
172 static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
174 /* links together the hp_cpu structs, in first-come first-serve order. */
175 static LIST_HEAD(hp_cpu_list);
176 static DEFINE_SPINLOCK(hp_lock);
178 static unsigned int hp_cpu_list_length;
180 /* the "special" handler, that starts and terminates the test. */
181 static struct hp_handler *special_handler;
182 static int loop_counter;
184 /* handlers are allocated out of this, so they're properly aligned. */
185 static struct kmem_cache *hp_handler_slab;
187 /* this is the frame data */
188 static void *__frame_ptr;
189 static u32 *frame_ptr;
190 static dma_addr_t frame_dma;
192 /* needed for dma_map*() */
193 static const struct qm_portal_config *pcfg;
195 /* the main function waits on this */
196 static DECLARE_WAIT_QUEUE_HEAD(queue);
200 /* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */
201 #define HP_NUM_WORDS 80
202 /* First word of the LFSR-based frame data */
203 #define HP_FIRST_WORD 0xabbaf00d
205 static inline u32 do_lfsr(u32 prev)
207 return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u);
210 static int allocate_frame_data(void)
212 u32 lfsr = HP_FIRST_WORD;
215 if (!qman_dma_portal) {
216 pr_crit("portal not available\n");
220 pcfg = qman_get_qm_portal_config(qman_dma_portal);
222 __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
226 frame_ptr = PTR_ALIGN(__frame_ptr, 64);
227 for (loop = 0; loop < HP_NUM_WORDS; loop++) {
228 frame_ptr[loop] = lfsr;
229 lfsr = do_lfsr(lfsr);
232 frame_dma = dma_map_single(pcfg->dev, frame_ptr, 4 * HP_NUM_WORDS,
234 if (dma_mapping_error(pcfg->dev, frame_dma)) {
235 pr_crit("dma mapping failure\n");
243 static void deallocate_frame_data(void)
245 dma_unmap_single(pcfg->dev, frame_dma, 4 * HP_NUM_WORDS,
250 static inline int process_frame_data(struct hp_handler *handler,
251 const struct qm_fd *fd)
253 u32 *p = handler->frame_ptr;
254 u32 lfsr = HP_FIRST_WORD;
257 if (qm_fd_addr_get64(fd) != handler->addr) {
258 pr_crit("bad frame address, [%llX != %llX]\n",
259 qm_fd_addr_get64(fd), handler->addr);
262 for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
263 *p ^= handler->rx_mixer;
265 pr_crit("corrupt frame data");
268 *p ^= handler->tx_mixer;
269 lfsr = do_lfsr(lfsr);
274 static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
276 const struct qm_dqrr_entry *dqrr,
279 struct hp_handler *handler = (struct hp_handler *)fq;
281 if (process_frame_data(handler, &dqrr->fd)) {
285 if (qman_enqueue(&handler->tx, &dqrr->fd)) {
286 pr_crit("qman_enqueue() failed");
290 return qman_cb_dqrr_consume;
293 static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
295 const struct qm_dqrr_entry *dqrr,
298 struct hp_handler *handler = (struct hp_handler *)fq;
300 process_frame_data(handler, &dqrr->fd);
301 if (++loop_counter < HP_LOOPS) {
302 if (qman_enqueue(&handler->tx, &dqrr->fd)) {
303 pr_crit("qman_enqueue() failed");
308 pr_info("Received final (%dth) frame\n", loop_counter);
312 return qman_cb_dqrr_consume;
315 static int create_per_cpu_handlers(void)
317 struct hp_handler *handler;
319 struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
321 hp_cpu->processor_id = smp_processor_id();
323 list_add_tail(&hp_cpu->node, &hp_cpu_list);
324 hp_cpu_list_length++;
325 spin_unlock(&hp_lock);
326 INIT_LIST_HEAD(&hp_cpu->handlers);
327 for (loop = 0; loop < HP_PER_CPU; loop++) {
328 handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL);
330 pr_crit("kmem_cache_alloc() failed");
334 handler->processor_id = hp_cpu->processor_id;
335 handler->addr = frame_dma;
336 handler->frame_ptr = frame_ptr;
337 list_add_tail(&handler->node, &hp_cpu->handlers);
342 static int destroy_per_cpu_handlers(void)
344 struct list_head *loop, *tmp;
345 struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus);
348 list_del(&hp_cpu->node);
349 spin_unlock(&hp_lock);
350 list_for_each_safe(loop, tmp, &hp_cpu->handlers) {
352 struct hp_handler *handler = list_entry(loop, struct hp_handler,
354 if (qman_retire_fq(&handler->rx, &flags) ||
355 (flags & QMAN_FQ_STATE_BLOCKOOS)) {
356 pr_crit("qman_retire_fq(rx) failed, flags: %x", flags);
360 if (qman_oos_fq(&handler->rx)) {
361 pr_crit("qman_oos_fq(rx) failed");
365 qman_destroy_fq(&handler->rx);
366 qman_destroy_fq(&handler->tx);
367 qman_release_fqid(handler->fqid_rx);
368 list_del(&handler->node);
369 kmem_cache_free(hp_handler_slab, handler);
374 static inline u8 num_cachelines(u32 offset)
376 u8 res = (offset + (L1_CACHE_BYTES - 1))
382 #define STASH_DATA_CL \
383 num_cachelines(HP_NUM_WORDS * 4)
384 #define STASH_CTX_CL \
385 num_cachelines(offsetof(struct hp_handler, fqid_rx))
387 static int init_handler(void *h)
389 struct qm_mcc_initfq opts;
390 struct hp_handler *handler = h;
393 if (handler->processor_id != smp_processor_id()) {
398 memset(&handler->rx, 0, sizeof(handler->rx));
399 if (handler == special_handler)
400 handler->rx.cb.dqrr = special_dqrr;
402 handler->rx.cb.dqrr = normal_dqrr;
403 err = qman_create_fq(handler->fqid_rx, 0, &handler->rx);
405 pr_crit("qman_create_fq(rx) failed");
408 memset(&opts, 0, sizeof(opts));
409 opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL |
410 QM_INITFQ_WE_CONTEXTA);
411 opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING);
412 qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL);
413 err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
414 QMAN_INITFQ_FLAG_LOCAL, &opts);
416 pr_crit("qman_init_fq(rx) failed");
420 memset(&handler->tx, 0, sizeof(handler->tx));
421 err = qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY,
424 pr_crit("qman_create_fq(tx) failed");
433 static void init_handler_cb(void *h)
439 static int init_phase2(void)
443 u32 lfsr = 0xdeadbeef;
444 struct hp_cpu *hp_cpu;
445 struct hp_handler *handler;
447 for (loop = 0; loop < HP_PER_CPU; loop++) {
448 list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
452 hp_cpu->iterator = list_first_entry(
454 struct hp_handler, node);
456 hp_cpu->iterator = list_entry(
457 hp_cpu->iterator->node.next,
458 struct hp_handler, node);
459 /* Rx FQID is the previous handler's Tx FQID */
460 hp_cpu->iterator->fqid_rx = fqid;
461 /* Allocate new FQID for Tx */
462 err = qman_alloc_fqid(&fqid);
464 pr_crit("qman_alloc_fqid() failed");
467 hp_cpu->iterator->fqid_tx = fqid;
468 /* Rx mixer is the previous handler's Tx mixer */
469 hp_cpu->iterator->rx_mixer = lfsr;
470 /* Get new mixer for Tx */
471 lfsr = do_lfsr(lfsr);
472 hp_cpu->iterator->tx_mixer = lfsr;
475 /* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */
476 hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node);
477 handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node);
478 if (handler->fqid_rx != 0 || handler->rx_mixer != 0xdeadbeef)
480 handler->fqid_rx = fqid;
481 handler->rx_mixer = lfsr;
482 /* and tag it as our "special" handler */
483 special_handler = handler;
487 static int init_phase3(void)
490 struct hp_cpu *hp_cpu;
492 for (loop = 0; loop < HP_PER_CPU; loop++) {
493 list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
495 hp_cpu->iterator = list_first_entry(
497 struct hp_handler, node);
499 hp_cpu->iterator = list_entry(
500 hp_cpu->iterator->node.next,
501 struct hp_handler, node);
503 if (hp_cpu->processor_id == smp_processor_id()) {
504 err = init_handler(hp_cpu->iterator);
508 smp_call_function_single(hp_cpu->processor_id,
509 init_handler_cb, hp_cpu->iterator, 1);
517 static int send_first_frame(void *ignore)
519 u32 *p = special_handler->frame_ptr;
520 u32 lfsr = HP_FIRST_WORD;
524 if (special_handler->processor_id != smp_processor_id()) {
528 memset(&fd, 0, sizeof(fd));
529 qm_fd_addr_set64(&fd, special_handler->addr);
530 qm_fd_set_contig_big(&fd, HP_NUM_WORDS * 4);
531 for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
534 pr_crit("corrupt frame data");
537 *p ^= special_handler->tx_mixer;
538 lfsr = do_lfsr(lfsr);
540 pr_info("Sending first frame\n");
541 err = qman_enqueue(&special_handler->tx, &fd);
543 pr_crit("qman_enqueue() failed");
552 static void send_first_frame_cb(void *ignore)
554 if (send_first_frame(NULL))
558 int qman_test_stash(void)
562 if (cpumask_weight(cpu_online_mask) < 2) {
563 pr_info("%s(): skip - only 1 CPU\n", __func__);
567 pr_info("%s(): Starting\n", __func__);
569 hp_cpu_list_length = 0;
571 hp_handler_slab = kmem_cache_create("hp_handler_slab",
572 sizeof(struct hp_handler), L1_CACHE_BYTES,
573 SLAB_HWCACHE_ALIGN, NULL);
574 if (!hp_handler_slab) {
576 pr_crit("kmem_cache_create() failed");
580 err = allocate_frame_data();
585 pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU);
586 if (on_all_cpus(create_per_cpu_handlers)) {
588 pr_crit("on_each_cpu() failed");
591 pr_info("Number of cpus: %d, total of %d handlers\n",
592 hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU);
603 if (special_handler->processor_id == smp_processor_id()) {
604 err = send_first_frame(NULL);
608 smp_call_function_single(special_handler->processor_id,
609 send_first_frame_cb, NULL, 1);
613 wait_event(queue, loop_counter == HP_LOOPS);
614 deallocate_frame_data();
615 if (on_all_cpus(destroy_per_cpu_handlers)) {
617 pr_crit("on_each_cpu() failed");
620 kmem_cache_destroy(hp_handler_slab);
621 pr_info("%s(): Finished\n", __func__);