1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2007-2008 Pierre Ossman
6 #include <linux/mmc/core.h>
7 #include <linux/mmc/card.h>
8 #include <linux/mmc/host.h>
9 #include <linux/mmc/mmc.h>
10 #include <linux/slab.h>
12 #include <linux/scatterlist.h>
13 #include <linux/swap.h> /* For nr_free_buffer_pages() */
14 #include <linux/list.h>
16 #include <linux/debugfs.h>
17 #include <linux/uaccess.h>
18 #include <linux/seq_file.h>
19 #include <linux/module.h>
29 #define RESULT_UNSUP_HOST 2
30 #define RESULT_UNSUP_CARD 3
32 #define BUFFER_ORDER 2
33 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
35 #define TEST_ALIGN_END 8
38 * Limit the test area size to the maximum MMC HC erase group size. Note that
39 * the maximum SD allocation unit size is just 4MiB.
41 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
44 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
45 * @page: first page in the allocation
46 * @order: order of the number of pages allocated
48 struct mmc_test_pages {
54 * struct mmc_test_mem - allocated memory.
55 * @arr: array of allocations
56 * @cnt: number of allocations
59 struct mmc_test_pages *arr;
64 * struct mmc_test_area - information for performance tests.
65 * @max_sz: test area size (in bytes)
66 * @dev_addr: address on card at which to do performance tests
67 * @max_tfr: maximum transfer size allowed by driver (in bytes)
68 * @max_segs: maximum segments allowed by driver in scatterlist @sg
69 * @max_seg_sz: maximum segment size allowed by driver
70 * @blocks: number of (512 byte) blocks currently mapped by @sg
71 * @sg_len: length of currently mapped scatterlist @sg
72 * @mem: allocated memory
74 * @sg_areq: scatterlist for non-blocking request
76 struct mmc_test_area {
78 unsigned int dev_addr;
80 unsigned int max_segs;
81 unsigned int max_seg_sz;
84 struct mmc_test_mem *mem;
85 struct scatterlist *sg;
86 struct scatterlist *sg_areq;
90 * struct mmc_test_transfer_result - transfer results for performance tests.
91 * @link: double-linked list
92 * @count: amount of group of sectors to check
93 * @sectors: amount of sectors to check in one group
94 * @ts: time values of transfer
95 * @rate: calculated transfer rate
96 * @iops: I/O operations per second (times 100)
98 struct mmc_test_transfer_result {
99 struct list_head link;
101 unsigned int sectors;
102 struct timespec64 ts;
108 * struct mmc_test_general_result - results for tests.
109 * @link: double-linked list
110 * @card: card under test
111 * @testcase: number of test case
112 * @result: result of test run
113 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
115 struct mmc_test_general_result {
116 struct list_head link;
117 struct mmc_card *card;
120 struct list_head tr_lst;
124 * struct mmc_test_dbgfs_file - debugfs related file.
125 * @link: double-linked list
126 * @card: card under test
127 * @file: file created under debugfs
129 struct mmc_test_dbgfs_file {
130 struct list_head link;
131 struct mmc_card *card;
136 * struct mmc_test_card - test information.
137 * @card: card under test
138 * @scratch: transfer buffer
139 * @buffer: transfer buffer
140 * @highmem: buffer for highmem tests
141 * @area: information for performance tests
142 * @gr: pointer to results of current testcase
144 struct mmc_test_card {
145 struct mmc_card *card;
147 u8 scratch[BUFFER_SIZE];
149 #ifdef CONFIG_HIGHMEM
150 struct page *highmem;
152 struct mmc_test_area area;
153 struct mmc_test_general_result *gr;
156 enum mmc_test_prep_media {
157 MMC_TEST_PREP_NONE = 0,
158 MMC_TEST_PREP_WRITE_FULL = 1 << 0,
159 MMC_TEST_PREP_ERASE = 1 << 1,
162 struct mmc_test_multiple_rw {
163 unsigned int *sg_len;
168 bool do_nonblock_req;
169 enum mmc_test_prep_media prepare;
172 /*******************************************************************/
173 /* General helper functions */
174 /*******************************************************************/
177 * Configure correct block size in card
179 static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
181 return mmc_set_blocklen(test->card, size);
184 static bool mmc_test_card_cmd23(struct mmc_card *card)
186 return mmc_card_mmc(card) ||
187 (mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT);
190 static void mmc_test_prepare_sbc(struct mmc_test_card *test,
191 struct mmc_request *mrq, unsigned int blocks)
193 struct mmc_card *card = test->card;
195 if (!mrq->sbc || !mmc_host_cmd23(card->host) ||
196 !mmc_test_card_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) ||
197 (card->quirks & MMC_QUIRK_BLK_NO_CMD23)) {
202 mrq->sbc->opcode = MMC_SET_BLOCK_COUNT;
203 mrq->sbc->arg = blocks;
204 mrq->sbc->flags = MMC_RSP_R1 | MMC_CMD_AC;
208 * Fill in the mmc_request structure given a set of transfer parameters.
210 static void mmc_test_prepare_mrq(struct mmc_test_card *test,
211 struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
212 unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
214 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop))
218 mrq->cmd->opcode = write ?
219 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
221 mrq->cmd->opcode = write ?
222 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
225 mrq->cmd->arg = dev_addr;
226 if (!mmc_card_blockaddr(test->card))
229 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
234 mrq->stop->opcode = MMC_STOP_TRANSMISSION;
236 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
239 mrq->data->blksz = blksz;
240 mrq->data->blocks = blocks;
241 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
243 mrq->data->sg_len = sg_len;
245 mmc_test_prepare_sbc(test, mrq, blocks);
247 mmc_set_data_timeout(mrq->data, test->card);
250 static int mmc_test_busy(struct mmc_command *cmd)
252 return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
253 (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
257 * Wait for the card to finish the busy state
259 static int mmc_test_wait_busy(struct mmc_test_card *test)
262 struct mmc_command cmd = {};
266 memset(&cmd, 0, sizeof(struct mmc_command));
268 cmd.opcode = MMC_SEND_STATUS;
269 cmd.arg = test->card->rca << 16;
270 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
272 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
276 if (!busy && mmc_test_busy(&cmd)) {
278 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
279 pr_info("%s: Warning: Host did not wait for busy state to end.\n",
280 mmc_hostname(test->card->host));
282 } while (mmc_test_busy(&cmd));
288 * Transfer a single sector of kernel addressable data
290 static int mmc_test_buffer_transfer(struct mmc_test_card *test,
291 u8 *buffer, unsigned addr, unsigned blksz, int write)
293 struct mmc_request mrq = {};
294 struct mmc_command cmd = {};
295 struct mmc_command stop = {};
296 struct mmc_data data = {};
298 struct scatterlist sg;
304 sg_init_one(&sg, buffer, blksz);
306 mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
308 mmc_wait_for_req(test->card->host, &mrq);
315 return mmc_test_wait_busy(test);
318 static void mmc_test_free_mem(struct mmc_test_mem *mem)
323 __free_pages(mem->arr[mem->cnt].page,
324 mem->arr[mem->cnt].order);
330 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case
331 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
332 * not exceed a maximum number of segments and try not to make segments much
333 * bigger than maximum segment size.
335 static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
336 unsigned long max_sz,
337 unsigned int max_segs,
338 unsigned int max_seg_sz)
340 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
341 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
342 unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
343 unsigned long page_cnt = 0;
344 unsigned long limit = nr_free_buffer_pages() >> 4;
345 struct mmc_test_mem *mem;
347 if (max_page_cnt > limit)
348 max_page_cnt = limit;
349 if (min_page_cnt > max_page_cnt)
350 min_page_cnt = max_page_cnt;
352 if (max_seg_page_cnt > max_page_cnt)
353 max_seg_page_cnt = max_page_cnt;
355 if (max_segs > max_page_cnt)
356 max_segs = max_page_cnt;
358 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
362 mem->arr = kcalloc(max_segs, sizeof(*mem->arr), GFP_KERNEL);
366 while (max_page_cnt) {
369 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
372 order = get_order(max_seg_page_cnt << PAGE_SHIFT);
374 page = alloc_pages(flags, order);
380 if (page_cnt < min_page_cnt)
384 mem->arr[mem->cnt].page = page;
385 mem->arr[mem->cnt].order = order;
387 if (max_page_cnt <= (1UL << order))
389 max_page_cnt -= 1UL << order;
390 page_cnt += 1UL << order;
391 if (mem->cnt >= max_segs) {
392 if (page_cnt < min_page_cnt)
401 mmc_test_free_mem(mem);
406 * Map memory into a scatterlist. Optionally allow the same memory to be
407 * mapped more than once.
409 static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
410 struct scatterlist *sglist, int repeat,
411 unsigned int max_segs, unsigned int max_seg_sz,
412 unsigned int *sg_len, int min_sg_len)
414 struct scatterlist *sg = NULL;
416 unsigned long sz = size;
418 sg_init_table(sglist, max_segs);
419 if (min_sg_len > max_segs)
420 min_sg_len = max_segs;
424 for (i = 0; i < mem->cnt; i++) {
425 unsigned long len = PAGE_SIZE << mem->arr[i].order;
427 if (min_sg_len && (size / min_sg_len < len))
428 len = ALIGN(size / min_sg_len, 512);
431 if (len > max_seg_sz)
439 sg_set_page(sg, mem->arr[i].page, len, 0);
445 } while (sz && repeat);
457 * Map memory into a scatterlist so that no pages are contiguous. Allow the
458 * same memory to be mapped more than once.
460 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
462 struct scatterlist *sglist,
463 unsigned int max_segs,
464 unsigned int max_seg_sz,
465 unsigned int *sg_len)
467 struct scatterlist *sg = NULL;
468 unsigned int i = mem->cnt, cnt;
470 void *base, *addr, *last_addr = NULL;
472 sg_init_table(sglist, max_segs);
476 base = page_address(mem->arr[--i].page);
477 cnt = 1 << mem->arr[i].order;
479 addr = base + PAGE_SIZE * --cnt;
480 if (last_addr && last_addr + PAGE_SIZE == addr)
484 if (len > max_seg_sz)
494 sg_set_page(sg, virt_to_page(addr), len, 0);
509 * Calculate transfer rate in bytes per second.
511 static unsigned int mmc_test_rate(uint64_t bytes, struct timespec64 *ts)
515 ns = timespec64_to_ns(ts);
518 while (ns > UINT_MAX) {
526 do_div(bytes, (uint32_t)ns);
532 * Save transfer results for future usage
534 static void mmc_test_save_transfer_result(struct mmc_test_card *test,
535 unsigned int count, unsigned int sectors, struct timespec64 ts,
536 unsigned int rate, unsigned int iops)
538 struct mmc_test_transfer_result *tr;
543 tr = kmalloc(sizeof(*tr), GFP_KERNEL);
548 tr->sectors = sectors;
553 list_add_tail(&tr->link, &test->gr->tr_lst);
557 * Print the transfer rate.
559 static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
560 struct timespec64 *ts1, struct timespec64 *ts2)
562 unsigned int rate, iops, sectors = bytes >> 9;
563 struct timespec64 ts;
565 ts = timespec64_sub(*ts2, *ts1);
567 rate = mmc_test_rate(bytes, &ts);
568 iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
570 pr_info("%s: Transfer of %u sectors (%u%s KiB) took %llu.%09u "
571 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
572 mmc_hostname(test->card->host), sectors, sectors >> 1,
573 (sectors & 1 ? ".5" : ""), (u64)ts.tv_sec,
574 (u32)ts.tv_nsec, rate / 1000, rate / 1024,
575 iops / 100, iops % 100);
577 mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
581 * Print the average transfer rate.
583 static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
584 unsigned int count, struct timespec64 *ts1,
585 struct timespec64 *ts2)
587 unsigned int rate, iops, sectors = bytes >> 9;
588 uint64_t tot = bytes * count;
589 struct timespec64 ts;
591 ts = timespec64_sub(*ts2, *ts1);
593 rate = mmc_test_rate(tot, &ts);
594 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
596 pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
597 "%llu.%09u seconds (%u kB/s, %u KiB/s, "
598 "%u.%02u IOPS, sg_len %d)\n",
599 mmc_hostname(test->card->host), count, sectors, count,
600 sectors >> 1, (sectors & 1 ? ".5" : ""),
601 (u64)ts.tv_sec, (u32)ts.tv_nsec,
602 rate / 1000, rate / 1024, iops / 100, iops % 100,
605 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
609 * Return the card size in sectors.
611 static unsigned int mmc_test_capacity(struct mmc_card *card)
613 if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
614 return card->ext_csd.sectors;
616 return card->csd.capacity << (card->csd.read_blkbits - 9);
619 /*******************************************************************/
620 /* Test preparation and cleanup */
621 /*******************************************************************/
624 * Fill the first couple of sectors of the card with known data
625 * so that bad reads/writes can be detected
627 static int __mmc_test_prepare(struct mmc_test_card *test, int write)
631 ret = mmc_test_set_blksize(test, 512);
636 memset(test->buffer, 0xDF, 512);
638 for (i = 0; i < 512; i++)
642 for (i = 0; i < BUFFER_SIZE / 512; i++) {
643 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
651 static int mmc_test_prepare_write(struct mmc_test_card *test)
653 return __mmc_test_prepare(test, 1);
656 static int mmc_test_prepare_read(struct mmc_test_card *test)
658 return __mmc_test_prepare(test, 0);
661 static int mmc_test_cleanup(struct mmc_test_card *test)
665 ret = mmc_test_set_blksize(test, 512);
669 memset(test->buffer, 0, 512);
671 for (i = 0; i < BUFFER_SIZE / 512; i++) {
672 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
680 /*******************************************************************/
681 /* Test execution helpers */
682 /*******************************************************************/
685 * Modifies the mmc_request to perform the "short transfer" tests
687 static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
688 struct mmc_request *mrq, int write)
690 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
693 if (mrq->data->blocks > 1) {
694 mrq->cmd->opcode = write ?
695 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
698 mrq->cmd->opcode = MMC_SEND_STATUS;
699 mrq->cmd->arg = test->card->rca << 16;
704 * Checks that a normal transfer didn't have any errors
706 static int mmc_test_check_result(struct mmc_test_card *test,
707 struct mmc_request *mrq)
711 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
716 if (mrq->sbc && mrq->sbc->error)
717 ret = mrq->sbc->error;
718 if (!ret && mrq->cmd->error)
719 ret = mrq->cmd->error;
720 if (!ret && mrq->data->error)
721 ret = mrq->data->error;
722 if (!ret && mrq->stop && mrq->stop->error)
723 ret = mrq->stop->error;
724 if (!ret && mrq->data->bytes_xfered !=
725 mrq->data->blocks * mrq->data->blksz)
729 ret = RESULT_UNSUP_HOST;
735 * Checks that a "short transfer" behaved as expected
737 static int mmc_test_check_broken_result(struct mmc_test_card *test,
738 struct mmc_request *mrq)
742 if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
747 if (!ret && mrq->cmd->error)
748 ret = mrq->cmd->error;
749 if (!ret && mrq->data->error == 0)
751 if (!ret && mrq->data->error != -ETIMEDOUT)
752 ret = mrq->data->error;
753 if (!ret && mrq->stop && mrq->stop->error)
754 ret = mrq->stop->error;
755 if (mrq->data->blocks > 1) {
756 if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
759 if (!ret && mrq->data->bytes_xfered > 0)
764 ret = RESULT_UNSUP_HOST;
769 struct mmc_test_req {
770 struct mmc_request mrq;
771 struct mmc_command sbc;
772 struct mmc_command cmd;
773 struct mmc_command stop;
774 struct mmc_command status;
775 struct mmc_data data;
779 * Tests nonblock transfer with certain parameters
781 static void mmc_test_req_reset(struct mmc_test_req *rq)
783 memset(rq, 0, sizeof(struct mmc_test_req));
785 rq->mrq.cmd = &rq->cmd;
786 rq->mrq.data = &rq->data;
787 rq->mrq.stop = &rq->stop;
790 static struct mmc_test_req *mmc_test_req_alloc(void)
792 struct mmc_test_req *rq = kmalloc(sizeof(*rq), GFP_KERNEL);
795 mmc_test_req_reset(rq);
800 static void mmc_test_wait_done(struct mmc_request *mrq)
802 complete(&mrq->completion);
805 static int mmc_test_start_areq(struct mmc_test_card *test,
806 struct mmc_request *mrq,
807 struct mmc_request *prev_mrq)
809 struct mmc_host *host = test->card->host;
813 init_completion(&mrq->completion);
814 mrq->done = mmc_test_wait_done;
815 mmc_pre_req(host, mrq);
819 wait_for_completion(&prev_mrq->completion);
820 err = mmc_test_wait_busy(test);
822 err = mmc_test_check_result(test, prev_mrq);
826 err = mmc_start_request(host, mrq);
828 mmc_retune_release(host);
832 mmc_post_req(host, prev_mrq, 0);
835 mmc_post_req(host, mrq, err);
840 static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
841 unsigned int dev_addr, int write,
844 struct mmc_test_req *rq1, *rq2;
845 struct mmc_request *mrq, *prev_mrq;
848 struct mmc_test_area *t = &test->area;
849 struct scatterlist *sg = t->sg;
850 struct scatterlist *sg_areq = t->sg_areq;
852 rq1 = mmc_test_req_alloc();
853 rq2 = mmc_test_req_alloc();
862 for (i = 0; i < count; i++) {
863 mmc_test_req_reset(container_of(mrq, struct mmc_test_req, mrq));
864 mmc_test_prepare_mrq(test, mrq, sg, t->sg_len, dev_addr,
865 t->blocks, 512, write);
866 ret = mmc_test_start_areq(test, mrq, prev_mrq);
871 prev_mrq = &rq2->mrq;
875 dev_addr += t->blocks;
878 ret = mmc_test_start_areq(test, NULL, prev_mrq);
886 * Tests a basic transfer with certain parameters
888 static int mmc_test_simple_transfer(struct mmc_test_card *test,
889 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
890 unsigned blocks, unsigned blksz, int write)
892 struct mmc_request mrq = {};
893 struct mmc_command cmd = {};
894 struct mmc_command stop = {};
895 struct mmc_data data = {};
901 mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
902 blocks, blksz, write);
904 mmc_wait_for_req(test->card->host, &mrq);
906 mmc_test_wait_busy(test);
908 return mmc_test_check_result(test, &mrq);
912 * Tests a transfer where the card will fail completely or partly
914 static int mmc_test_broken_transfer(struct mmc_test_card *test,
915 unsigned blocks, unsigned blksz, int write)
917 struct mmc_request mrq = {};
918 struct mmc_command cmd = {};
919 struct mmc_command stop = {};
920 struct mmc_data data = {};
922 struct scatterlist sg;
928 sg_init_one(&sg, test->buffer, blocks * blksz);
930 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
931 mmc_test_prepare_broken_mrq(test, &mrq, write);
933 mmc_wait_for_req(test->card->host, &mrq);
935 mmc_test_wait_busy(test);
937 return mmc_test_check_broken_result(test, &mrq);
941 * Does a complete transfer test where data is also validated
943 * Note: mmc_test_prepare() must have been done before this call
945 static int mmc_test_transfer(struct mmc_test_card *test,
946 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
947 unsigned blocks, unsigned blksz, int write)
953 for (i = 0; i < blocks * blksz; i++)
954 test->scratch[i] = i;
956 memset(test->scratch, 0, BUFFER_SIZE);
958 local_irq_save(flags);
959 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
960 local_irq_restore(flags);
962 ret = mmc_test_set_blksize(test, blksz);
966 ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
967 blocks, blksz, write);
974 ret = mmc_test_set_blksize(test, 512);
978 sectors = (blocks * blksz + 511) / 512;
979 if ((sectors * 512) == (blocks * blksz))
982 if ((sectors * 512) > BUFFER_SIZE)
985 memset(test->buffer, 0, sectors * 512);
987 for (i = 0; i < sectors; i++) {
988 ret = mmc_test_buffer_transfer(test,
989 test->buffer + i * 512,
990 dev_addr + i, 512, 0);
995 for (i = 0; i < blocks * blksz; i++) {
996 if (test->buffer[i] != (u8)i)
1000 for (; i < sectors * 512; i++) {
1001 if (test->buffer[i] != 0xDF)
1005 local_irq_save(flags);
1006 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
1007 local_irq_restore(flags);
1008 for (i = 0; i < blocks * blksz; i++) {
1009 if (test->scratch[i] != (u8)i)
1017 /*******************************************************************/
1019 /*******************************************************************/
1021 struct mmc_test_case {
1024 int (*prepare)(struct mmc_test_card *);
1025 int (*run)(struct mmc_test_card *);
1026 int (*cleanup)(struct mmc_test_card *);
1029 static int mmc_test_basic_write(struct mmc_test_card *test)
1032 struct scatterlist sg;
1034 ret = mmc_test_set_blksize(test, 512);
1038 sg_init_one(&sg, test->buffer, 512);
1040 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
1043 static int mmc_test_basic_read(struct mmc_test_card *test)
1046 struct scatterlist sg;
1048 ret = mmc_test_set_blksize(test, 512);
1052 sg_init_one(&sg, test->buffer, 512);
1054 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
1057 static int mmc_test_verify_write(struct mmc_test_card *test)
1059 struct scatterlist sg;
1061 sg_init_one(&sg, test->buffer, 512);
1063 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1066 static int mmc_test_verify_read(struct mmc_test_card *test)
1068 struct scatterlist sg;
1070 sg_init_one(&sg, test->buffer, 512);
1072 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1075 static int mmc_test_multi_write(struct mmc_test_card *test)
1078 struct scatterlist sg;
1080 if (test->card->host->max_blk_count == 1)
1081 return RESULT_UNSUP_HOST;
1083 size = PAGE_SIZE * 2;
1084 size = min(size, test->card->host->max_req_size);
1085 size = min(size, test->card->host->max_seg_size);
1086 size = min(size, test->card->host->max_blk_count * 512);
1089 return RESULT_UNSUP_HOST;
1091 sg_init_one(&sg, test->buffer, size);
1093 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
1096 static int mmc_test_multi_read(struct mmc_test_card *test)
1099 struct scatterlist sg;
1101 if (test->card->host->max_blk_count == 1)
1102 return RESULT_UNSUP_HOST;
1104 size = PAGE_SIZE * 2;
1105 size = min(size, test->card->host->max_req_size);
1106 size = min(size, test->card->host->max_seg_size);
1107 size = min(size, test->card->host->max_blk_count * 512);
1110 return RESULT_UNSUP_HOST;
1112 sg_init_one(&sg, test->buffer, size);
1114 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
1117 static int mmc_test_pow2_write(struct mmc_test_card *test)
1120 struct scatterlist sg;
1122 if (!test->card->csd.write_partial)
1123 return RESULT_UNSUP_CARD;
1125 for (i = 1; i < 512; i <<= 1) {
1126 sg_init_one(&sg, test->buffer, i);
1127 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1135 static int mmc_test_pow2_read(struct mmc_test_card *test)
1138 struct scatterlist sg;
1140 if (!test->card->csd.read_partial)
1141 return RESULT_UNSUP_CARD;
1143 for (i = 1; i < 512; i <<= 1) {
1144 sg_init_one(&sg, test->buffer, i);
1145 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1153 static int mmc_test_weird_write(struct mmc_test_card *test)
1156 struct scatterlist sg;
1158 if (!test->card->csd.write_partial)
1159 return RESULT_UNSUP_CARD;
1161 for (i = 3; i < 512; i += 7) {
1162 sg_init_one(&sg, test->buffer, i);
1163 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1171 static int mmc_test_weird_read(struct mmc_test_card *test)
1174 struct scatterlist sg;
1176 if (!test->card->csd.read_partial)
1177 return RESULT_UNSUP_CARD;
1179 for (i = 3; i < 512; i += 7) {
1180 sg_init_one(&sg, test->buffer, i);
1181 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1189 static int mmc_test_align_write(struct mmc_test_card *test)
1192 struct scatterlist sg;
1194 for (i = 1; i < TEST_ALIGN_END; i++) {
1195 sg_init_one(&sg, test->buffer + i, 512);
1196 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1204 static int mmc_test_align_read(struct mmc_test_card *test)
1207 struct scatterlist sg;
1209 for (i = 1; i < TEST_ALIGN_END; i++) {
1210 sg_init_one(&sg, test->buffer + i, 512);
1211 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1219 static int mmc_test_align_multi_write(struct mmc_test_card *test)
1223 struct scatterlist sg;
1225 if (test->card->host->max_blk_count == 1)
1226 return RESULT_UNSUP_HOST;
1228 size = PAGE_SIZE * 2;
1229 size = min(size, test->card->host->max_req_size);
1230 size = min(size, test->card->host->max_seg_size);
1231 size = min(size, test->card->host->max_blk_count * 512);
1234 return RESULT_UNSUP_HOST;
1236 for (i = 1; i < TEST_ALIGN_END; i++) {
1237 sg_init_one(&sg, test->buffer + i, size);
1238 ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
1246 static int mmc_test_align_multi_read(struct mmc_test_card *test)
1250 struct scatterlist sg;
1252 if (test->card->host->max_blk_count == 1)
1253 return RESULT_UNSUP_HOST;
1255 size = PAGE_SIZE * 2;
1256 size = min(size, test->card->host->max_req_size);
1257 size = min(size, test->card->host->max_seg_size);
1258 size = min(size, test->card->host->max_blk_count * 512);
1261 return RESULT_UNSUP_HOST;
1263 for (i = 1; i < TEST_ALIGN_END; i++) {
1264 sg_init_one(&sg, test->buffer + i, size);
1265 ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
1273 static int mmc_test_xfersize_write(struct mmc_test_card *test)
1277 ret = mmc_test_set_blksize(test, 512);
1281 return mmc_test_broken_transfer(test, 1, 512, 1);
1284 static int mmc_test_xfersize_read(struct mmc_test_card *test)
1288 ret = mmc_test_set_blksize(test, 512);
1292 return mmc_test_broken_transfer(test, 1, 512, 0);
1295 static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1299 if (test->card->host->max_blk_count == 1)
1300 return RESULT_UNSUP_HOST;
1302 ret = mmc_test_set_blksize(test, 512);
1306 return mmc_test_broken_transfer(test, 2, 512, 1);
1309 static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1313 if (test->card->host->max_blk_count == 1)
1314 return RESULT_UNSUP_HOST;
1316 ret = mmc_test_set_blksize(test, 512);
1320 return mmc_test_broken_transfer(test, 2, 512, 0);
1323 #ifdef CONFIG_HIGHMEM
1325 static int mmc_test_write_high(struct mmc_test_card *test)
1327 struct scatterlist sg;
1329 sg_init_table(&sg, 1);
1330 sg_set_page(&sg, test->highmem, 512, 0);
1332 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1335 static int mmc_test_read_high(struct mmc_test_card *test)
1337 struct scatterlist sg;
1339 sg_init_table(&sg, 1);
1340 sg_set_page(&sg, test->highmem, 512, 0);
1342 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1345 static int mmc_test_multi_write_high(struct mmc_test_card *test)
1348 struct scatterlist sg;
1350 if (test->card->host->max_blk_count == 1)
1351 return RESULT_UNSUP_HOST;
1353 size = PAGE_SIZE * 2;
1354 size = min(size, test->card->host->max_req_size);
1355 size = min(size, test->card->host->max_seg_size);
1356 size = min(size, test->card->host->max_blk_count * 512);
1359 return RESULT_UNSUP_HOST;
1361 sg_init_table(&sg, 1);
1362 sg_set_page(&sg, test->highmem, size, 0);
1364 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
1367 static int mmc_test_multi_read_high(struct mmc_test_card *test)
1370 struct scatterlist sg;
1372 if (test->card->host->max_blk_count == 1)
1373 return RESULT_UNSUP_HOST;
1375 size = PAGE_SIZE * 2;
1376 size = min(size, test->card->host->max_req_size);
1377 size = min(size, test->card->host->max_seg_size);
1378 size = min(size, test->card->host->max_blk_count * 512);
1381 return RESULT_UNSUP_HOST;
1383 sg_init_table(&sg, 1);
1384 sg_set_page(&sg, test->highmem, size, 0);
1386 return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
1391 static int mmc_test_no_highmem(struct mmc_test_card *test)
1393 pr_info("%s: Highmem not configured - test skipped\n",
1394 mmc_hostname(test->card->host));
1398 #endif /* CONFIG_HIGHMEM */
1401 * Map sz bytes so that it can be transferred.
1403 static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1404 int max_scatter, int min_sg_len, bool nonblock)
1406 struct mmc_test_area *t = &test->area;
1408 unsigned int sg_len = 0;
1410 t->blocks = sz >> 9;
1413 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1414 t->max_segs, t->max_seg_sz,
1417 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1418 t->max_seg_sz, &t->sg_len, min_sg_len);
1421 if (err || !nonblock)
1425 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg_areq,
1426 t->max_segs, t->max_seg_sz,
1429 err = mmc_test_map_sg(t->mem, sz, t->sg_areq, 1, t->max_segs,
1430 t->max_seg_sz, &sg_len, min_sg_len);
1432 if (!err && sg_len != t->sg_len)
1437 pr_info("%s: Failed to map sg list\n",
1438 mmc_hostname(test->card->host));
1443 * Transfer bytes mapped by mmc_test_area_map().
1445 static int mmc_test_area_transfer(struct mmc_test_card *test,
1446 unsigned int dev_addr, int write)
1448 struct mmc_test_area *t = &test->area;
1450 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1451 t->blocks, 512, write);
1455 * Map and transfer bytes for multiple transfers.
1457 static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
1458 unsigned int dev_addr, int write,
1459 int max_scatter, int timed, int count,
1460 bool nonblock, int min_sg_len)
1462 struct timespec64 ts1, ts2;
1467 * In the case of a maximally scattered transfer, the maximum transfer
1468 * size is further limited by using PAGE_SIZE segments.
1471 struct mmc_test_area *t = &test->area;
1472 unsigned long max_tfr;
1474 if (t->max_seg_sz >= PAGE_SIZE)
1475 max_tfr = t->max_segs * PAGE_SIZE;
1477 max_tfr = t->max_segs * t->max_seg_sz;
1482 ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len, nonblock);
1487 ktime_get_ts64(&ts1);
1489 ret = mmc_test_nonblock_transfer(test, dev_addr, write, count);
1491 for (i = 0; i < count && ret == 0; i++) {
1492 ret = mmc_test_area_transfer(test, dev_addr, write);
1493 dev_addr += sz >> 9;
1500 ktime_get_ts64(&ts2);
1503 mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
1508 static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1509 unsigned int dev_addr, int write, int max_scatter,
1512 return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
1513 timed, 1, false, 0);
1517 * Write the test area entirely.
1519 static int mmc_test_area_fill(struct mmc_test_card *test)
1521 struct mmc_test_area *t = &test->area;
1523 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
1527 * Erase the test area entirely.
1529 static int mmc_test_area_erase(struct mmc_test_card *test)
1531 struct mmc_test_area *t = &test->area;
1533 if (!mmc_can_erase(test->card))
1536 return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
1541 * Cleanup struct mmc_test_area.
1543 static int mmc_test_area_cleanup(struct mmc_test_card *test)
1545 struct mmc_test_area *t = &test->area;
1549 mmc_test_free_mem(t->mem);
1555 * Initialize an area for testing large transfers. The test area is set to the
1556 * middle of the card because cards may have different characteristics at the
1557 * front (for FAT file system optimization). Optionally, the area is erased
1558 * (if the card supports it) which may improve write performance. Optionally,
1559 * the area is filled with data for subsequent read tests.
1561 static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1563 struct mmc_test_area *t = &test->area;
1564 unsigned long min_sz = 64 * 1024, sz;
1567 ret = mmc_test_set_blksize(test, 512);
1571 /* Make the test area size about 4MiB */
1572 sz = (unsigned long)test->card->pref_erase << 9;
1574 while (t->max_sz < 4 * 1024 * 1024)
1576 while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1579 t->max_segs = test->card->host->max_segs;
1580 t->max_seg_sz = test->card->host->max_seg_size;
1581 t->max_seg_sz -= t->max_seg_sz % 512;
1583 t->max_tfr = t->max_sz;
1584 if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1585 t->max_tfr = test->card->host->max_blk_count << 9;
1586 if (t->max_tfr > test->card->host->max_req_size)
1587 t->max_tfr = test->card->host->max_req_size;
1588 if (t->max_tfr / t->max_seg_sz > t->max_segs)
1589 t->max_tfr = t->max_segs * t->max_seg_sz;
1592 * Try to allocate enough memory for a max. sized transfer. Less is OK
1593 * because the same memory can be mapped into the scatterlist more than
1594 * once. Also, take into account the limits imposed on scatterlist
1595 * segments by the host driver.
1597 t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
1602 t->sg = kmalloc_array(t->max_segs, sizeof(*t->sg), GFP_KERNEL);
1608 t->sg_areq = kmalloc_array(t->max_segs, sizeof(*t->sg_areq),
1615 t->dev_addr = mmc_test_capacity(test->card) / 2;
1616 t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1619 ret = mmc_test_area_erase(test);
1625 ret = mmc_test_area_fill(test);
1633 mmc_test_area_cleanup(test);
1638 * Prepare for large transfers. Do not erase the test area.
1640 static int mmc_test_area_prepare(struct mmc_test_card *test)
1642 return mmc_test_area_init(test, 0, 0);
1646 * Prepare for large transfers. Do erase the test area.
1648 static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1650 return mmc_test_area_init(test, 1, 0);
1654 * Prepare for large transfers. Erase and fill the test area.
1656 static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1658 return mmc_test_area_init(test, 1, 1);
1662 * Test best-case performance. Best-case performance is expected from
1663 * a single large transfer.
1665 * An additional option (max_scatter) allows the measurement of the same
1666 * transfer but with no contiguous pages in the scatter list. This tests
1667 * the efficiency of DMA to handle scattered pages.
1669 static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1672 struct mmc_test_area *t = &test->area;
1674 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
1679 * Best-case read performance.
1681 static int mmc_test_best_read_performance(struct mmc_test_card *test)
1683 return mmc_test_best_performance(test, 0, 0);
1687 * Best-case write performance.
1689 static int mmc_test_best_write_performance(struct mmc_test_card *test)
1691 return mmc_test_best_performance(test, 1, 0);
1695 * Best-case read performance into scattered pages.
1697 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1699 return mmc_test_best_performance(test, 0, 1);
1703 * Best-case write performance from scattered pages.
1705 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1707 return mmc_test_best_performance(test, 1, 1);
1711 * Single read performance by transfer size.
1713 static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1715 struct mmc_test_area *t = &test->area;
1717 unsigned int dev_addr;
1720 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1721 dev_addr = t->dev_addr + (sz >> 9);
1722 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1727 dev_addr = t->dev_addr;
1728 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1732 * Single write performance by transfer size.
1734 static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1736 struct mmc_test_area *t = &test->area;
1738 unsigned int dev_addr;
1741 ret = mmc_test_area_erase(test);
1744 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1745 dev_addr = t->dev_addr + (sz >> 9);
1746 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1750 ret = mmc_test_area_erase(test);
1754 dev_addr = t->dev_addr;
1755 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1759 * Single trim performance by transfer size.
1761 static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1763 struct mmc_test_area *t = &test->area;
1765 unsigned int dev_addr;
1766 struct timespec64 ts1, ts2;
1769 if (!mmc_can_trim(test->card))
1770 return RESULT_UNSUP_CARD;
1772 if (!mmc_can_erase(test->card))
1773 return RESULT_UNSUP_HOST;
1775 for (sz = 512; sz < t->max_sz; sz <<= 1) {
1776 dev_addr = t->dev_addr + (sz >> 9);
1777 ktime_get_ts64(&ts1);
1778 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1781 ktime_get_ts64(&ts2);
1782 mmc_test_print_rate(test, sz, &ts1, &ts2);
1784 dev_addr = t->dev_addr;
1785 ktime_get_ts64(&ts1);
1786 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1789 ktime_get_ts64(&ts2);
1790 mmc_test_print_rate(test, sz, &ts1, &ts2);
1794 static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1796 struct mmc_test_area *t = &test->area;
1797 unsigned int dev_addr, i, cnt;
1798 struct timespec64 ts1, ts2;
1801 cnt = t->max_sz / sz;
1802 dev_addr = t->dev_addr;
1803 ktime_get_ts64(&ts1);
1804 for (i = 0; i < cnt; i++) {
1805 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1808 dev_addr += (sz >> 9);
1810 ktime_get_ts64(&ts2);
1811 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1816 * Consecutive read performance by transfer size.
1818 static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1820 struct mmc_test_area *t = &test->area;
1824 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1825 ret = mmc_test_seq_read_perf(test, sz);
1830 return mmc_test_seq_read_perf(test, sz);
1833 static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1835 struct mmc_test_area *t = &test->area;
1836 unsigned int dev_addr, i, cnt;
1837 struct timespec64 ts1, ts2;
1840 ret = mmc_test_area_erase(test);
1843 cnt = t->max_sz / sz;
1844 dev_addr = t->dev_addr;
1845 ktime_get_ts64(&ts1);
1846 for (i = 0; i < cnt; i++) {
1847 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1850 dev_addr += (sz >> 9);
1852 ktime_get_ts64(&ts2);
1853 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1858 * Consecutive write performance by transfer size.
1860 static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1862 struct mmc_test_area *t = &test->area;
1866 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1867 ret = mmc_test_seq_write_perf(test, sz);
1872 return mmc_test_seq_write_perf(test, sz);
1876 * Consecutive trim performance by transfer size.
1878 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1880 struct mmc_test_area *t = &test->area;
1882 unsigned int dev_addr, i, cnt;
1883 struct timespec64 ts1, ts2;
1886 if (!mmc_can_trim(test->card))
1887 return RESULT_UNSUP_CARD;
1889 if (!mmc_can_erase(test->card))
1890 return RESULT_UNSUP_HOST;
1892 for (sz = 512; sz <= t->max_sz; sz <<= 1) {
1893 ret = mmc_test_area_erase(test);
1896 ret = mmc_test_area_fill(test);
1899 cnt = t->max_sz / sz;
1900 dev_addr = t->dev_addr;
1901 ktime_get_ts64(&ts1);
1902 for (i = 0; i < cnt; i++) {
1903 ret = mmc_erase(test->card, dev_addr, sz >> 9,
1907 dev_addr += (sz >> 9);
1909 ktime_get_ts64(&ts2);
1910 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1915 static unsigned int rnd_next = 1;
1917 static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
1921 rnd_next = rnd_next * 1103515245 + 12345;
1922 r = (rnd_next >> 16) & 0x7fff;
1923 return (r * rnd_cnt) >> 15;
1926 static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1929 unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
1931 struct timespec64 ts1, ts2, ts;
1936 rnd_addr = mmc_test_capacity(test->card) / 4;
1937 range1 = rnd_addr / test->card->pref_erase;
1938 range2 = range1 / ssz;
1940 ktime_get_ts64(&ts1);
1941 for (cnt = 0; cnt < UINT_MAX; cnt++) {
1942 ktime_get_ts64(&ts2);
1943 ts = timespec64_sub(ts2, ts1);
1944 if (ts.tv_sec >= 10)
1946 ea = mmc_test_rnd_num(range1);
1950 dev_addr = rnd_addr + test->card->pref_erase * ea +
1951 ssz * mmc_test_rnd_num(range2);
1952 ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
1957 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1961 static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1963 struct mmc_test_area *t = &test->area;
1968 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1970 * When writing, try to get more consistent results by running
1971 * the test twice with exactly the same I/O but outputting the
1972 * results only for the 2nd run.
1976 ret = mmc_test_rnd_perf(test, write, 0, sz);
1981 ret = mmc_test_rnd_perf(test, write, 1, sz);
1988 ret = mmc_test_rnd_perf(test, write, 0, sz);
1993 return mmc_test_rnd_perf(test, write, 1, sz);
1997 * Random read performance by transfer size.
1999 static int mmc_test_random_read_perf(struct mmc_test_card *test)
2001 return mmc_test_random_perf(test, 0);
2005 * Random write performance by transfer size.
2007 static int mmc_test_random_write_perf(struct mmc_test_card *test)
2009 return mmc_test_random_perf(test, 1);
2012 static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
2013 unsigned int tot_sz, int max_scatter)
2015 struct mmc_test_area *t = &test->area;
2016 unsigned int dev_addr, i, cnt, sz, ssz;
2017 struct timespec64 ts1, ts2;
2023 * In the case of a maximally scattered transfer, the maximum transfer
2024 * size is further limited by using PAGE_SIZE segments.
2027 unsigned long max_tfr;
2029 if (t->max_seg_sz >= PAGE_SIZE)
2030 max_tfr = t->max_segs * PAGE_SIZE;
2032 max_tfr = t->max_segs * t->max_seg_sz;
2038 dev_addr = mmc_test_capacity(test->card) / 4;
2039 if (tot_sz > dev_addr << 9)
2040 tot_sz = dev_addr << 9;
2042 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2044 ktime_get_ts64(&ts1);
2045 for (i = 0; i < cnt; i++) {
2046 ret = mmc_test_area_io(test, sz, dev_addr, write,
2052 ktime_get_ts64(&ts2);
2054 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
2059 static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
2063 for (i = 0; i < 10; i++) {
2064 ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
2068 for (i = 0; i < 5; i++) {
2069 ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
2073 for (i = 0; i < 3; i++) {
2074 ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
2083 * Large sequential read performance.
2085 static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
2087 return mmc_test_large_seq_perf(test, 0);
2091 * Large sequential write performance.
2093 static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
2095 return mmc_test_large_seq_perf(test, 1);
2098 static int mmc_test_rw_multiple(struct mmc_test_card *test,
2099 struct mmc_test_multiple_rw *tdata,
2100 unsigned int reqsize, unsigned int size,
2103 unsigned int dev_addr;
2104 struct mmc_test_area *t = &test->area;
2107 /* Set up test area */
2108 if (size > mmc_test_capacity(test->card) / 2 * 512)
2109 size = mmc_test_capacity(test->card) / 2 * 512;
2110 if (reqsize > t->max_tfr)
2111 reqsize = t->max_tfr;
2112 dev_addr = mmc_test_capacity(test->card) / 4;
2113 if ((dev_addr & 0xffff0000))
2114 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2116 dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
2123 /* prepare test area */
2124 if (mmc_can_erase(test->card) &&
2125 tdata->prepare & MMC_TEST_PREP_ERASE) {
2126 ret = mmc_erase(test->card, dev_addr,
2127 size / 512, MMC_SECURE_ERASE_ARG);
2129 ret = mmc_erase(test->card, dev_addr,
2130 size / 512, MMC_ERASE_ARG);
2136 ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
2137 tdata->do_write, 0, 1, size / reqsize,
2138 tdata->do_nonblock_req, min_sg_len);
2144 pr_info("[%s] error\n", __func__);
2148 static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
2149 struct mmc_test_multiple_rw *rw)
2153 void *pre_req = test->card->host->ops->pre_req;
2154 void *post_req = test->card->host->ops->post_req;
2156 if (rw->do_nonblock_req &&
2157 ((!pre_req && post_req) || (pre_req && !post_req))) {
2158 pr_info("error: only one of pre/post is defined\n");
2162 for (i = 0 ; i < rw->len && ret == 0; i++) {
2163 ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
2170 static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
2171 struct mmc_test_multiple_rw *rw)
2176 for (i = 0 ; i < rw->len && ret == 0; i++) {
2177 ret = mmc_test_rw_multiple(test, rw, 512 * 1024, rw->size,
2186 * Multiple blocking write 4k to 4 MB chunks
2188 static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
2190 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2191 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2192 struct mmc_test_multiple_rw test_data = {
2194 .size = TEST_AREA_MAX_SIZE,
2195 .len = ARRAY_SIZE(bs),
2197 .do_nonblock_req = false,
2198 .prepare = MMC_TEST_PREP_ERASE,
2201 return mmc_test_rw_multiple_size(test, &test_data);
2205 * Multiple non-blocking write 4k to 4 MB chunks
2207 static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
2209 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2210 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2211 struct mmc_test_multiple_rw test_data = {
2213 .size = TEST_AREA_MAX_SIZE,
2214 .len = ARRAY_SIZE(bs),
2216 .do_nonblock_req = true,
2217 .prepare = MMC_TEST_PREP_ERASE,
2220 return mmc_test_rw_multiple_size(test, &test_data);
2224 * Multiple blocking read 4k to 4 MB chunks
2226 static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
2228 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2229 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2230 struct mmc_test_multiple_rw test_data = {
2232 .size = TEST_AREA_MAX_SIZE,
2233 .len = ARRAY_SIZE(bs),
2235 .do_nonblock_req = false,
2236 .prepare = MMC_TEST_PREP_NONE,
2239 return mmc_test_rw_multiple_size(test, &test_data);
2243 * Multiple non-blocking read 4k to 4 MB chunks
2245 static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
2247 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2248 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2249 struct mmc_test_multiple_rw test_data = {
2251 .size = TEST_AREA_MAX_SIZE,
2252 .len = ARRAY_SIZE(bs),
2254 .do_nonblock_req = true,
2255 .prepare = MMC_TEST_PREP_NONE,
2258 return mmc_test_rw_multiple_size(test, &test_data);
2262 * Multiple blocking write 1 to 512 sg elements
2264 static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
2266 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2267 1 << 7, 1 << 8, 1 << 9};
2268 struct mmc_test_multiple_rw test_data = {
2270 .size = TEST_AREA_MAX_SIZE,
2271 .len = ARRAY_SIZE(sg_len),
2273 .do_nonblock_req = false,
2274 .prepare = MMC_TEST_PREP_ERASE,
2277 return mmc_test_rw_multiple_sg_len(test, &test_data);
2281 * Multiple non-blocking write 1 to 512 sg elements
2283 static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
2285 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2286 1 << 7, 1 << 8, 1 << 9};
2287 struct mmc_test_multiple_rw test_data = {
2289 .size = TEST_AREA_MAX_SIZE,
2290 .len = ARRAY_SIZE(sg_len),
2292 .do_nonblock_req = true,
2293 .prepare = MMC_TEST_PREP_ERASE,
2296 return mmc_test_rw_multiple_sg_len(test, &test_data);
2300 * Multiple blocking read 1 to 512 sg elements
2302 static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
2304 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2305 1 << 7, 1 << 8, 1 << 9};
2306 struct mmc_test_multiple_rw test_data = {
2308 .size = TEST_AREA_MAX_SIZE,
2309 .len = ARRAY_SIZE(sg_len),
2311 .do_nonblock_req = false,
2312 .prepare = MMC_TEST_PREP_NONE,
2315 return mmc_test_rw_multiple_sg_len(test, &test_data);
2319 * Multiple non-blocking read 1 to 512 sg elements
2321 static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
2323 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2324 1 << 7, 1 << 8, 1 << 9};
2325 struct mmc_test_multiple_rw test_data = {
2327 .size = TEST_AREA_MAX_SIZE,
2328 .len = ARRAY_SIZE(sg_len),
2330 .do_nonblock_req = true,
2331 .prepare = MMC_TEST_PREP_NONE,
2334 return mmc_test_rw_multiple_sg_len(test, &test_data);
2338 * eMMC hardware reset.
2340 static int mmc_test_reset(struct mmc_test_card *test)
2342 struct mmc_card *card = test->card;
2343 struct mmc_host *host = card->host;
2346 err = mmc_hw_reset(host);
2349 * Reset will re-enable the card's command queue, but tests
2350 * expect it to be disabled.
2352 if (card->ext_csd.cmdq_en)
2353 mmc_cmdq_disable(card);
2355 } else if (err == -EOPNOTSUPP) {
2356 return RESULT_UNSUP_HOST;
2362 static int mmc_test_send_status(struct mmc_test_card *test,
2363 struct mmc_command *cmd)
2365 memset(cmd, 0, sizeof(*cmd));
2367 cmd->opcode = MMC_SEND_STATUS;
2368 if (!mmc_host_is_spi(test->card->host))
2369 cmd->arg = test->card->rca << 16;
2370 cmd->flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
2372 return mmc_wait_for_cmd(test->card->host, cmd, 0);
2375 static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
2376 unsigned int dev_addr, int use_sbc,
2377 int repeat_cmd, int write, int use_areq)
2379 struct mmc_test_req *rq = mmc_test_req_alloc();
2380 struct mmc_host *host = test->card->host;
2381 struct mmc_test_area *t = &test->area;
2382 struct mmc_request *mrq;
2383 unsigned long timeout;
2384 bool expired = false;
2385 int ret = 0, cmd_ret;
2394 mrq->sbc = &rq->sbc;
2395 mrq->cap_cmd_during_tfr = true;
2397 mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks,
2400 if (use_sbc && t->blocks > 1 && !mrq->sbc) {
2401 ret = mmc_host_cmd23(host) ?
2407 /* Start ongoing data request */
2409 ret = mmc_test_start_areq(test, mrq, NULL);
2413 mmc_wait_for_req(host, mrq);
2416 timeout = jiffies + msecs_to_jiffies(3000);
2420 /* Send status command while data transfer in progress */
2421 cmd_ret = mmc_test_send_status(test, &rq->status);
2425 status = rq->status.resp[0];
2426 if (status & R1_ERROR) {
2431 if (mmc_is_req_done(host, mrq))
2434 expired = time_after(jiffies, timeout);
2436 pr_info("%s: timeout waiting for Tran state status %#x\n",
2437 mmc_hostname(host), status);
2438 cmd_ret = -ETIMEDOUT;
2441 } while (repeat_cmd && R1_CURRENT_STATE(status) != R1_STATE_TRAN);
2443 /* Wait for data request to complete */
2445 ret = mmc_test_start_areq(test, NULL, mrq);
2447 mmc_wait_for_req_done(test->card->host, mrq);
2451 * For cap_cmd_during_tfr request, upper layer must send stop if
2454 if (mrq->data->stop && (mrq->data->error || !mrq->sbc)) {
2456 mmc_wait_for_cmd(host, mrq->data->stop, 0);
2458 ret = mmc_wait_for_cmd(host, mrq->data->stop, 0);
2465 pr_info("%s: Send Status failed: status %#x, error %d\n",
2466 mmc_hostname(test->card->host), status, cmd_ret);
2469 ret = mmc_test_check_result(test, mrq);
2473 ret = mmc_test_wait_busy(test);
2477 if (repeat_cmd && (t->blocks + 1) << 9 > t->max_tfr)
2478 pr_info("%s: %d commands completed during transfer of %u blocks\n",
2479 mmc_hostname(test->card->host), count, t->blocks);
2489 static int __mmc_test_cmds_during_tfr(struct mmc_test_card *test,
2490 unsigned long sz, int use_sbc, int write,
2493 struct mmc_test_area *t = &test->area;
2496 if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR))
2497 return RESULT_UNSUP_HOST;
2499 ret = mmc_test_area_map(test, sz, 0, 0, use_areq);
2503 ret = mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 0, write,
2508 return mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 1, write,
2512 static int mmc_test_cmds_during_tfr(struct mmc_test_card *test, int use_sbc,
2513 int write, int use_areq)
2515 struct mmc_test_area *t = &test->area;
2519 for (sz = 512; sz <= t->max_tfr; sz += 512) {
2520 ret = __mmc_test_cmds_during_tfr(test, sz, use_sbc, write,
2529 * Commands during read - no Set Block Count (CMD23).
2531 static int mmc_test_cmds_during_read(struct mmc_test_card *test)
2533 return mmc_test_cmds_during_tfr(test, 0, 0, 0);
2537 * Commands during write - no Set Block Count (CMD23).
2539 static int mmc_test_cmds_during_write(struct mmc_test_card *test)
2541 return mmc_test_cmds_during_tfr(test, 0, 1, 0);
2545 * Commands during read - use Set Block Count (CMD23).
2547 static int mmc_test_cmds_during_read_cmd23(struct mmc_test_card *test)
2549 return mmc_test_cmds_during_tfr(test, 1, 0, 0);
2553 * Commands during write - use Set Block Count (CMD23).
2555 static int mmc_test_cmds_during_write_cmd23(struct mmc_test_card *test)
2557 return mmc_test_cmds_during_tfr(test, 1, 1, 0);
2561 * Commands during non-blocking read - use Set Block Count (CMD23).
2563 static int mmc_test_cmds_during_read_cmd23_nonblock(struct mmc_test_card *test)
2565 return mmc_test_cmds_during_tfr(test, 1, 0, 1);
2569 * Commands during non-blocking write - use Set Block Count (CMD23).
2571 static int mmc_test_cmds_during_write_cmd23_nonblock(struct mmc_test_card *test)
2573 return mmc_test_cmds_during_tfr(test, 1, 1, 1);
2576 static const struct mmc_test_case mmc_test_cases[] = {
2578 .name = "Basic write (no data verification)",
2579 .run = mmc_test_basic_write,
2583 .name = "Basic read (no data verification)",
2584 .run = mmc_test_basic_read,
2588 .name = "Basic write (with data verification)",
2589 .prepare = mmc_test_prepare_write,
2590 .run = mmc_test_verify_write,
2591 .cleanup = mmc_test_cleanup,
2595 .name = "Basic read (with data verification)",
2596 .prepare = mmc_test_prepare_read,
2597 .run = mmc_test_verify_read,
2598 .cleanup = mmc_test_cleanup,
2602 .name = "Multi-block write",
2603 .prepare = mmc_test_prepare_write,
2604 .run = mmc_test_multi_write,
2605 .cleanup = mmc_test_cleanup,
2609 .name = "Multi-block read",
2610 .prepare = mmc_test_prepare_read,
2611 .run = mmc_test_multi_read,
2612 .cleanup = mmc_test_cleanup,
2616 .name = "Power of two block writes",
2617 .prepare = mmc_test_prepare_write,
2618 .run = mmc_test_pow2_write,
2619 .cleanup = mmc_test_cleanup,
2623 .name = "Power of two block reads",
2624 .prepare = mmc_test_prepare_read,
2625 .run = mmc_test_pow2_read,
2626 .cleanup = mmc_test_cleanup,
2630 .name = "Weird sized block writes",
2631 .prepare = mmc_test_prepare_write,
2632 .run = mmc_test_weird_write,
2633 .cleanup = mmc_test_cleanup,
2637 .name = "Weird sized block reads",
2638 .prepare = mmc_test_prepare_read,
2639 .run = mmc_test_weird_read,
2640 .cleanup = mmc_test_cleanup,
2644 .name = "Badly aligned write",
2645 .prepare = mmc_test_prepare_write,
2646 .run = mmc_test_align_write,
2647 .cleanup = mmc_test_cleanup,
2651 .name = "Badly aligned read",
2652 .prepare = mmc_test_prepare_read,
2653 .run = mmc_test_align_read,
2654 .cleanup = mmc_test_cleanup,
2658 .name = "Badly aligned multi-block write",
2659 .prepare = mmc_test_prepare_write,
2660 .run = mmc_test_align_multi_write,
2661 .cleanup = mmc_test_cleanup,
2665 .name = "Badly aligned multi-block read",
2666 .prepare = mmc_test_prepare_read,
2667 .run = mmc_test_align_multi_read,
2668 .cleanup = mmc_test_cleanup,
2672 .name = "Correct xfer_size at write (start failure)",
2673 .run = mmc_test_xfersize_write,
2677 .name = "Correct xfer_size at read (start failure)",
2678 .run = mmc_test_xfersize_read,
2682 .name = "Correct xfer_size at write (midway failure)",
2683 .run = mmc_test_multi_xfersize_write,
2687 .name = "Correct xfer_size at read (midway failure)",
2688 .run = mmc_test_multi_xfersize_read,
2691 #ifdef CONFIG_HIGHMEM
2694 .name = "Highmem write",
2695 .prepare = mmc_test_prepare_write,
2696 .run = mmc_test_write_high,
2697 .cleanup = mmc_test_cleanup,
2701 .name = "Highmem read",
2702 .prepare = mmc_test_prepare_read,
2703 .run = mmc_test_read_high,
2704 .cleanup = mmc_test_cleanup,
2708 .name = "Multi-block highmem write",
2709 .prepare = mmc_test_prepare_write,
2710 .run = mmc_test_multi_write_high,
2711 .cleanup = mmc_test_cleanup,
2715 .name = "Multi-block highmem read",
2716 .prepare = mmc_test_prepare_read,
2717 .run = mmc_test_multi_read_high,
2718 .cleanup = mmc_test_cleanup,
2724 .name = "Highmem write",
2725 .run = mmc_test_no_highmem,
2729 .name = "Highmem read",
2730 .run = mmc_test_no_highmem,
2734 .name = "Multi-block highmem write",
2735 .run = mmc_test_no_highmem,
2739 .name = "Multi-block highmem read",
2740 .run = mmc_test_no_highmem,
2743 #endif /* CONFIG_HIGHMEM */
2746 .name = "Best-case read performance",
2747 .prepare = mmc_test_area_prepare_fill,
2748 .run = mmc_test_best_read_performance,
2749 .cleanup = mmc_test_area_cleanup,
2753 .name = "Best-case write performance",
2754 .prepare = mmc_test_area_prepare_erase,
2755 .run = mmc_test_best_write_performance,
2756 .cleanup = mmc_test_area_cleanup,
2760 .name = "Best-case read performance into scattered pages",
2761 .prepare = mmc_test_area_prepare_fill,
2762 .run = mmc_test_best_read_perf_max_scatter,
2763 .cleanup = mmc_test_area_cleanup,
2767 .name = "Best-case write performance from scattered pages",
2768 .prepare = mmc_test_area_prepare_erase,
2769 .run = mmc_test_best_write_perf_max_scatter,
2770 .cleanup = mmc_test_area_cleanup,
2774 .name = "Single read performance by transfer size",
2775 .prepare = mmc_test_area_prepare_fill,
2776 .run = mmc_test_profile_read_perf,
2777 .cleanup = mmc_test_area_cleanup,
2781 .name = "Single write performance by transfer size",
2782 .prepare = mmc_test_area_prepare,
2783 .run = mmc_test_profile_write_perf,
2784 .cleanup = mmc_test_area_cleanup,
2788 .name = "Single trim performance by transfer size",
2789 .prepare = mmc_test_area_prepare_fill,
2790 .run = mmc_test_profile_trim_perf,
2791 .cleanup = mmc_test_area_cleanup,
2795 .name = "Consecutive read performance by transfer size",
2796 .prepare = mmc_test_area_prepare_fill,
2797 .run = mmc_test_profile_seq_read_perf,
2798 .cleanup = mmc_test_area_cleanup,
2802 .name = "Consecutive write performance by transfer size",
2803 .prepare = mmc_test_area_prepare,
2804 .run = mmc_test_profile_seq_write_perf,
2805 .cleanup = mmc_test_area_cleanup,
2809 .name = "Consecutive trim performance by transfer size",
2810 .prepare = mmc_test_area_prepare,
2811 .run = mmc_test_profile_seq_trim_perf,
2812 .cleanup = mmc_test_area_cleanup,
2816 .name = "Random read performance by transfer size",
2817 .prepare = mmc_test_area_prepare,
2818 .run = mmc_test_random_read_perf,
2819 .cleanup = mmc_test_area_cleanup,
2823 .name = "Random write performance by transfer size",
2824 .prepare = mmc_test_area_prepare,
2825 .run = mmc_test_random_write_perf,
2826 .cleanup = mmc_test_area_cleanup,
2830 .name = "Large sequential read into scattered pages",
2831 .prepare = mmc_test_area_prepare,
2832 .run = mmc_test_large_seq_read_perf,
2833 .cleanup = mmc_test_area_cleanup,
2837 .name = "Large sequential write from scattered pages",
2838 .prepare = mmc_test_area_prepare,
2839 .run = mmc_test_large_seq_write_perf,
2840 .cleanup = mmc_test_area_cleanup,
2844 .name = "Write performance with blocking req 4k to 4MB",
2845 .prepare = mmc_test_area_prepare,
2846 .run = mmc_test_profile_mult_write_blocking_perf,
2847 .cleanup = mmc_test_area_cleanup,
2851 .name = "Write performance with non-blocking req 4k to 4MB",
2852 .prepare = mmc_test_area_prepare,
2853 .run = mmc_test_profile_mult_write_nonblock_perf,
2854 .cleanup = mmc_test_area_cleanup,
2858 .name = "Read performance with blocking req 4k to 4MB",
2859 .prepare = mmc_test_area_prepare,
2860 .run = mmc_test_profile_mult_read_blocking_perf,
2861 .cleanup = mmc_test_area_cleanup,
2865 .name = "Read performance with non-blocking req 4k to 4MB",
2866 .prepare = mmc_test_area_prepare,
2867 .run = mmc_test_profile_mult_read_nonblock_perf,
2868 .cleanup = mmc_test_area_cleanup,
2872 .name = "Write performance blocking req 1 to 512 sg elems",
2873 .prepare = mmc_test_area_prepare,
2874 .run = mmc_test_profile_sglen_wr_blocking_perf,
2875 .cleanup = mmc_test_area_cleanup,
2879 .name = "Write performance non-blocking req 1 to 512 sg elems",
2880 .prepare = mmc_test_area_prepare,
2881 .run = mmc_test_profile_sglen_wr_nonblock_perf,
2882 .cleanup = mmc_test_area_cleanup,
2886 .name = "Read performance blocking req 1 to 512 sg elems",
2887 .prepare = mmc_test_area_prepare,
2888 .run = mmc_test_profile_sglen_r_blocking_perf,
2889 .cleanup = mmc_test_area_cleanup,
2893 .name = "Read performance non-blocking req 1 to 512 sg elems",
2894 .prepare = mmc_test_area_prepare,
2895 .run = mmc_test_profile_sglen_r_nonblock_perf,
2896 .cleanup = mmc_test_area_cleanup,
2900 .name = "Reset test",
2901 .run = mmc_test_reset,
2905 .name = "Commands during read - no Set Block Count (CMD23)",
2906 .prepare = mmc_test_area_prepare,
2907 .run = mmc_test_cmds_during_read,
2908 .cleanup = mmc_test_area_cleanup,
2912 .name = "Commands during write - no Set Block Count (CMD23)",
2913 .prepare = mmc_test_area_prepare,
2914 .run = mmc_test_cmds_during_write,
2915 .cleanup = mmc_test_area_cleanup,
2919 .name = "Commands during read - use Set Block Count (CMD23)",
2920 .prepare = mmc_test_area_prepare,
2921 .run = mmc_test_cmds_during_read_cmd23,
2922 .cleanup = mmc_test_area_cleanup,
2926 .name = "Commands during write - use Set Block Count (CMD23)",
2927 .prepare = mmc_test_area_prepare,
2928 .run = mmc_test_cmds_during_write_cmd23,
2929 .cleanup = mmc_test_area_cleanup,
2933 .name = "Commands during non-blocking read - use Set Block Count (CMD23)",
2934 .prepare = mmc_test_area_prepare,
2935 .run = mmc_test_cmds_during_read_cmd23_nonblock,
2936 .cleanup = mmc_test_area_cleanup,
2940 .name = "Commands during non-blocking write - use Set Block Count (CMD23)",
2941 .prepare = mmc_test_area_prepare,
2942 .run = mmc_test_cmds_during_write_cmd23_nonblock,
2943 .cleanup = mmc_test_area_cleanup,
2947 static DEFINE_MUTEX(mmc_test_lock);
2949 static LIST_HEAD(mmc_test_result);
2951 static void mmc_test_run(struct mmc_test_card *test, int testcase)
2955 pr_info("%s: Starting tests of card %s...\n",
2956 mmc_hostname(test->card->host), mmc_card_id(test->card));
2958 mmc_claim_host(test->card->host);
2960 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) {
2961 struct mmc_test_general_result *gr;
2963 if (testcase && ((i + 1) != testcase))
2966 pr_info("%s: Test case %d. %s...\n",
2967 mmc_hostname(test->card->host), i + 1,
2968 mmc_test_cases[i].name);
2970 if (mmc_test_cases[i].prepare) {
2971 ret = mmc_test_cases[i].prepare(test);
2973 pr_info("%s: Result: Prepare stage failed! (%d)\n",
2974 mmc_hostname(test->card->host),
2980 gr = kzalloc(sizeof(*gr), GFP_KERNEL);
2982 INIT_LIST_HEAD(&gr->tr_lst);
2984 /* Assign data what we know already */
2985 gr->card = test->card;
2988 /* Append container to global one */
2989 list_add_tail(&gr->link, &mmc_test_result);
2992 * Save the pointer to created container in our private
2998 ret = mmc_test_cases[i].run(test);
3001 pr_info("%s: Result: OK\n",
3002 mmc_hostname(test->card->host));
3005 pr_info("%s: Result: FAILED\n",
3006 mmc_hostname(test->card->host));
3008 case RESULT_UNSUP_HOST:
3009 pr_info("%s: Result: UNSUPPORTED (by host)\n",
3010 mmc_hostname(test->card->host));
3012 case RESULT_UNSUP_CARD:
3013 pr_info("%s: Result: UNSUPPORTED (by card)\n",
3014 mmc_hostname(test->card->host));
3017 pr_info("%s: Result: ERROR (%d)\n",
3018 mmc_hostname(test->card->host), ret);
3021 /* Save the result */
3025 if (mmc_test_cases[i].cleanup) {
3026 ret = mmc_test_cases[i].cleanup(test);
3028 pr_info("%s: Warning: Cleanup stage failed! (%d)\n",
3029 mmc_hostname(test->card->host),
3035 mmc_release_host(test->card->host);
3037 pr_info("%s: Tests completed.\n",
3038 mmc_hostname(test->card->host));
3041 static void mmc_test_free_result(struct mmc_card *card)
3043 struct mmc_test_general_result *gr, *grs;
3045 mutex_lock(&mmc_test_lock);
3047 list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
3048 struct mmc_test_transfer_result *tr, *trs;
3050 if (card && gr->card != card)
3053 list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
3054 list_del(&tr->link);
3058 list_del(&gr->link);
3062 mutex_unlock(&mmc_test_lock);
3065 static LIST_HEAD(mmc_test_file_test);
3067 static int mtf_test_show(struct seq_file *sf, void *data)
3069 struct mmc_card *card = (struct mmc_card *)sf->private;
3070 struct mmc_test_general_result *gr;
3072 mutex_lock(&mmc_test_lock);
3074 list_for_each_entry(gr, &mmc_test_result, link) {
3075 struct mmc_test_transfer_result *tr;
3077 if (gr->card != card)
3080 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
3082 list_for_each_entry(tr, &gr->tr_lst, link) {
3083 seq_printf(sf, "%u %d %llu.%09u %u %u.%02u\n",
3084 tr->count, tr->sectors,
3085 (u64)tr->ts.tv_sec, (u32)tr->ts.tv_nsec,
3086 tr->rate, tr->iops / 100, tr->iops % 100);
3090 mutex_unlock(&mmc_test_lock);
3095 static int mtf_test_open(struct inode *inode, struct file *file)
3097 return single_open(file, mtf_test_show, inode->i_private);
3100 static ssize_t mtf_test_write(struct file *file, const char __user *buf,
3101 size_t count, loff_t *pos)
3103 struct seq_file *sf = (struct seq_file *)file->private_data;
3104 struct mmc_card *card = (struct mmc_card *)sf->private;
3105 struct mmc_test_card *test;
3109 ret = kstrtol_from_user(buf, count, 10, &testcase);
3113 test = kzalloc(sizeof(*test), GFP_KERNEL);
3118 * Remove all test cases associated with given card. Thus we have only
3119 * actual data of the last run.
3121 mmc_test_free_result(card);
3125 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
3126 #ifdef CONFIG_HIGHMEM
3127 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
3130 #ifdef CONFIG_HIGHMEM
3131 if (test->buffer && test->highmem) {
3135 mutex_lock(&mmc_test_lock);
3136 mmc_test_run(test, testcase);
3137 mutex_unlock(&mmc_test_lock);
3140 #ifdef CONFIG_HIGHMEM
3141 __free_pages(test->highmem, BUFFER_ORDER);
3143 kfree(test->buffer);
3149 static const struct file_operations mmc_test_fops_test = {
3150 .open = mtf_test_open,
3152 .write = mtf_test_write,
3153 .llseek = seq_lseek,
3154 .release = single_release,
3157 static int mtf_testlist_show(struct seq_file *sf, void *data)
3161 mutex_lock(&mmc_test_lock);
3163 seq_puts(sf, "0:\tRun all tests\n");
3164 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
3165 seq_printf(sf, "%d:\t%s\n", i + 1, mmc_test_cases[i].name);
3167 mutex_unlock(&mmc_test_lock);
3172 DEFINE_SHOW_ATTRIBUTE(mtf_testlist);
3174 static void mmc_test_free_dbgfs_file(struct mmc_card *card)
3176 struct mmc_test_dbgfs_file *df, *dfs;
3178 mutex_lock(&mmc_test_lock);
3180 list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
3181 if (card && df->card != card)
3183 debugfs_remove(df->file);
3184 list_del(&df->link);
3188 mutex_unlock(&mmc_test_lock);
3191 static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
3192 const char *name, umode_t mode, const struct file_operations *fops)
3194 struct dentry *file = NULL;
3195 struct mmc_test_dbgfs_file *df;
3197 if (card->debugfs_root)
3198 debugfs_create_file(name, mode, card->debugfs_root, card, fops);
3200 df = kmalloc(sizeof(*df), GFP_KERNEL);
3202 debugfs_remove(file);
3209 list_add(&df->link, &mmc_test_file_test);
3213 static int mmc_test_register_dbgfs_file(struct mmc_card *card)
3217 mutex_lock(&mmc_test_lock);
3219 ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
3220 &mmc_test_fops_test);
3224 ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
3225 &mtf_testlist_fops);
3230 mutex_unlock(&mmc_test_lock);
3235 static int mmc_test_probe(struct mmc_card *card)
3239 if (!mmc_card_mmc(card) && !mmc_card_sd(card))
3242 ret = mmc_test_register_dbgfs_file(card);
3246 if (card->ext_csd.cmdq_en) {
3247 mmc_claim_host(card->host);
3248 ret = mmc_cmdq_disable(card);
3249 mmc_release_host(card->host);
3254 dev_info(&card->dev, "Card claimed for testing.\n");
3259 static void mmc_test_remove(struct mmc_card *card)
3261 if (card->reenable_cmdq) {
3262 mmc_claim_host(card->host);
3263 mmc_cmdq_enable(card);
3264 mmc_release_host(card->host);
3266 mmc_test_free_result(card);
3267 mmc_test_free_dbgfs_file(card);
3270 static void mmc_test_shutdown(struct mmc_card *card)
3274 static struct mmc_driver mmc_driver = {
3278 .probe = mmc_test_probe,
3279 .remove = mmc_test_remove,
3280 .shutdown = mmc_test_shutdown,
3283 static int __init mmc_test_init(void)
3285 return mmc_register_driver(&mmc_driver);
3288 static void __exit mmc_test_exit(void)
3290 /* Clear stalled data if card is still plugged */
3291 mmc_test_free_result(NULL);
3292 mmc_test_free_dbgfs_file(NULL);
3294 mmc_unregister_driver(&mmc_driver);
3297 module_init(mmc_test_init);
3298 module_exit(mmc_test_exit);
3300 MODULE_LICENSE("GPL");
3301 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
3302 MODULE_AUTHOR("Pierre Ossman");