Merge branch 'parisc-4.6-4' of git://git.kernel.org/pub/scm/linux/kernel/git/deller...
[deliverable/linux.git] / drivers / mmc / card / mmc_test.c
1 /*
2 * linux/drivers/mmc/card/mmc_test.c
3 *
4 * Copyright 2007-2008 Pierre Ossman
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 */
11
12 #include <linux/mmc/core.h>
13 #include <linux/mmc/card.h>
14 #include <linux/mmc/host.h>
15 #include <linux/mmc/mmc.h>
16 #include <linux/slab.h>
17
18 #include <linux/scatterlist.h>
19 #include <linux/swap.h> /* For nr_free_buffer_pages() */
20 #include <linux/list.h>
21
22 #include <linux/debugfs.h>
23 #include <linux/uaccess.h>
24 #include <linux/seq_file.h>
25 #include <linux/module.h>
26
27 #define RESULT_OK 0
28 #define RESULT_FAIL 1
29 #define RESULT_UNSUP_HOST 2
30 #define RESULT_UNSUP_CARD 3
31
32 #define BUFFER_ORDER 2
33 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
34
35 #define TEST_ALIGN_END 8
36
37 /*
38 * Limit the test area size to the maximum MMC HC erase group size. Note that
39 * the maximum SD allocation unit size is just 4MiB.
40 */
41 #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
42
43 /**
44 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
45 * @page: first page in the allocation
46 * @order: order of the number of pages allocated
47 */
48 struct mmc_test_pages {
49 struct page *page;
50 unsigned int order;
51 };
52
53 /**
54 * struct mmc_test_mem - allocated memory.
55 * @arr: array of allocations
56 * @cnt: number of allocations
57 */
58 struct mmc_test_mem {
59 struct mmc_test_pages *arr;
60 unsigned int cnt;
61 };
62
63 /**
64 * struct mmc_test_area - information for performance tests.
65 * @max_sz: test area size (in bytes)
66 * @dev_addr: address on card at which to do performance tests
67 * @max_tfr: maximum transfer size allowed by driver (in bytes)
68 * @max_segs: maximum segments allowed by driver in scatterlist @sg
69 * @max_seg_sz: maximum segment size allowed by driver
70 * @blocks: number of (512 byte) blocks currently mapped by @sg
71 * @sg_len: length of currently mapped scatterlist @sg
72 * @mem: allocated memory
73 * @sg: scatterlist
74 */
75 struct mmc_test_area {
76 unsigned long max_sz;
77 unsigned int dev_addr;
78 unsigned int max_tfr;
79 unsigned int max_segs;
80 unsigned int max_seg_sz;
81 unsigned int blocks;
82 unsigned int sg_len;
83 struct mmc_test_mem *mem;
84 struct scatterlist *sg;
85 };
86
87 /**
88 * struct mmc_test_transfer_result - transfer results for performance tests.
89 * @link: double-linked list
90 * @count: amount of group of sectors to check
91 * @sectors: amount of sectors to check in one group
92 * @ts: time values of transfer
93 * @rate: calculated transfer rate
94 * @iops: I/O operations per second (times 100)
95 */
96 struct mmc_test_transfer_result {
97 struct list_head link;
98 unsigned int count;
99 unsigned int sectors;
100 struct timespec ts;
101 unsigned int rate;
102 unsigned int iops;
103 };
104
105 /**
106 * struct mmc_test_general_result - results for tests.
107 * @link: double-linked list
108 * @card: card under test
109 * @testcase: number of test case
110 * @result: result of test run
111 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
112 */
113 struct mmc_test_general_result {
114 struct list_head link;
115 struct mmc_card *card;
116 int testcase;
117 int result;
118 struct list_head tr_lst;
119 };
120
121 /**
122 * struct mmc_test_dbgfs_file - debugfs related file.
123 * @link: double-linked list
124 * @card: card under test
125 * @file: file created under debugfs
126 */
127 struct mmc_test_dbgfs_file {
128 struct list_head link;
129 struct mmc_card *card;
130 struct dentry *file;
131 };
132
133 /**
134 * struct mmc_test_card - test information.
135 * @card: card under test
136 * @scratch: transfer buffer
137 * @buffer: transfer buffer
138 * @highmem: buffer for highmem tests
139 * @area: information for performance tests
140 * @gr: pointer to results of current testcase
141 */
142 struct mmc_test_card {
143 struct mmc_card *card;
144
145 u8 scratch[BUFFER_SIZE];
146 u8 *buffer;
147 #ifdef CONFIG_HIGHMEM
148 struct page *highmem;
149 #endif
150 struct mmc_test_area area;
151 struct mmc_test_general_result *gr;
152 };
153
154 enum mmc_test_prep_media {
155 MMC_TEST_PREP_NONE = 0,
156 MMC_TEST_PREP_WRITE_FULL = 1 << 0,
157 MMC_TEST_PREP_ERASE = 1 << 1,
158 };
159
160 struct mmc_test_multiple_rw {
161 unsigned int *sg_len;
162 unsigned int *bs;
163 unsigned int len;
164 unsigned int size;
165 bool do_write;
166 bool do_nonblock_req;
167 enum mmc_test_prep_media prepare;
168 };
169
170 struct mmc_test_async_req {
171 struct mmc_async_req areq;
172 struct mmc_test_card *test;
173 };
174
175 /*******************************************************************/
176 /* General helper functions */
177 /*******************************************************************/
178
179 /*
180 * Configure correct block size in card
181 */
182 static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
183 {
184 return mmc_set_blocklen(test->card, size);
185 }
186
187 /*
188 * Fill in the mmc_request structure given a set of transfer parameters.
189 */
190 static void mmc_test_prepare_mrq(struct mmc_test_card *test,
191 struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
192 unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
193 {
194 BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
195
196 if (blocks > 1) {
197 mrq->cmd->opcode = write ?
198 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
199 } else {
200 mrq->cmd->opcode = write ?
201 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
202 }
203
204 mrq->cmd->arg = dev_addr;
205 if (!mmc_card_blockaddr(test->card))
206 mrq->cmd->arg <<= 9;
207
208 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
209
210 if (blocks == 1)
211 mrq->stop = NULL;
212 else {
213 mrq->stop->opcode = MMC_STOP_TRANSMISSION;
214 mrq->stop->arg = 0;
215 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
216 }
217
218 mrq->data->blksz = blksz;
219 mrq->data->blocks = blocks;
220 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
221 mrq->data->sg = sg;
222 mrq->data->sg_len = sg_len;
223
224 mmc_set_data_timeout(mrq->data, test->card);
225 }
226
227 static int mmc_test_busy(struct mmc_command *cmd)
228 {
229 return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
230 (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
231 }
232
233 /*
234 * Wait for the card to finish the busy state
235 */
236 static int mmc_test_wait_busy(struct mmc_test_card *test)
237 {
238 int ret, busy;
239 struct mmc_command cmd = {0};
240
241 busy = 0;
242 do {
243 memset(&cmd, 0, sizeof(struct mmc_command));
244
245 cmd.opcode = MMC_SEND_STATUS;
246 cmd.arg = test->card->rca << 16;
247 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
248
249 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
250 if (ret)
251 break;
252
253 if (!busy && mmc_test_busy(&cmd)) {
254 busy = 1;
255 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
256 pr_info("%s: Warning: Host did not "
257 "wait for busy state to end.\n",
258 mmc_hostname(test->card->host));
259 }
260 } while (mmc_test_busy(&cmd));
261
262 return ret;
263 }
264
265 /*
266 * Transfer a single sector of kernel addressable data
267 */
268 static int mmc_test_buffer_transfer(struct mmc_test_card *test,
269 u8 *buffer, unsigned addr, unsigned blksz, int write)
270 {
271 struct mmc_request mrq = {0};
272 struct mmc_command cmd = {0};
273 struct mmc_command stop = {0};
274 struct mmc_data data = {0};
275
276 struct scatterlist sg;
277
278 mrq.cmd = &cmd;
279 mrq.data = &data;
280 mrq.stop = &stop;
281
282 sg_init_one(&sg, buffer, blksz);
283
284 mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
285
286 mmc_wait_for_req(test->card->host, &mrq);
287
288 if (cmd.error)
289 return cmd.error;
290 if (data.error)
291 return data.error;
292
293 return mmc_test_wait_busy(test);
294 }
295
296 static void mmc_test_free_mem(struct mmc_test_mem *mem)
297 {
298 if (!mem)
299 return;
300 while (mem->cnt--)
301 __free_pages(mem->arr[mem->cnt].page,
302 mem->arr[mem->cnt].order);
303 kfree(mem->arr);
304 kfree(mem);
305 }
306
307 /*
308 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case
309 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
310 * not exceed a maximum number of segments and try not to make segments much
311 * bigger than maximum segment size.
312 */
313 static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
314 unsigned long max_sz,
315 unsigned int max_segs,
316 unsigned int max_seg_sz)
317 {
318 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
319 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
320 unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
321 unsigned long page_cnt = 0;
322 unsigned long limit = nr_free_buffer_pages() >> 4;
323 struct mmc_test_mem *mem;
324
325 if (max_page_cnt > limit)
326 max_page_cnt = limit;
327 if (min_page_cnt > max_page_cnt)
328 min_page_cnt = max_page_cnt;
329
330 if (max_seg_page_cnt > max_page_cnt)
331 max_seg_page_cnt = max_page_cnt;
332
333 if (max_segs > max_page_cnt)
334 max_segs = max_page_cnt;
335
336 mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
337 if (!mem)
338 return NULL;
339
340 mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
341 GFP_KERNEL);
342 if (!mem->arr)
343 goto out_free;
344
345 while (max_page_cnt) {
346 struct page *page;
347 unsigned int order;
348 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
349 __GFP_NORETRY;
350
351 order = get_order(max_seg_page_cnt << PAGE_SHIFT);
352 while (1) {
353 page = alloc_pages(flags, order);
354 if (page || !order)
355 break;
356 order -= 1;
357 }
358 if (!page) {
359 if (page_cnt < min_page_cnt)
360 goto out_free;
361 break;
362 }
363 mem->arr[mem->cnt].page = page;
364 mem->arr[mem->cnt].order = order;
365 mem->cnt += 1;
366 if (max_page_cnt <= (1UL << order))
367 break;
368 max_page_cnt -= 1UL << order;
369 page_cnt += 1UL << order;
370 if (mem->cnt >= max_segs) {
371 if (page_cnt < min_page_cnt)
372 goto out_free;
373 break;
374 }
375 }
376
377 return mem;
378
379 out_free:
380 mmc_test_free_mem(mem);
381 return NULL;
382 }
383
384 /*
385 * Map memory into a scatterlist. Optionally allow the same memory to be
386 * mapped more than once.
387 */
388 static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
389 struct scatterlist *sglist, int repeat,
390 unsigned int max_segs, unsigned int max_seg_sz,
391 unsigned int *sg_len, int min_sg_len)
392 {
393 struct scatterlist *sg = NULL;
394 unsigned int i;
395 unsigned long sz = size;
396
397 sg_init_table(sglist, max_segs);
398 if (min_sg_len > max_segs)
399 min_sg_len = max_segs;
400
401 *sg_len = 0;
402 do {
403 for (i = 0; i < mem->cnt; i++) {
404 unsigned long len = PAGE_SIZE << mem->arr[i].order;
405
406 if (min_sg_len && (size / min_sg_len < len))
407 len = ALIGN(size / min_sg_len, 512);
408 if (len > sz)
409 len = sz;
410 if (len > max_seg_sz)
411 len = max_seg_sz;
412 if (sg)
413 sg = sg_next(sg);
414 else
415 sg = sglist;
416 if (!sg)
417 return -EINVAL;
418 sg_set_page(sg, mem->arr[i].page, len, 0);
419 sz -= len;
420 *sg_len += 1;
421 if (!sz)
422 break;
423 }
424 } while (sz && repeat);
425
426 if (sz)
427 return -EINVAL;
428
429 if (sg)
430 sg_mark_end(sg);
431
432 return 0;
433 }
434
435 /*
436 * Map memory into a scatterlist so that no pages are contiguous. Allow the
437 * same memory to be mapped more than once.
438 */
439 static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
440 unsigned long sz,
441 struct scatterlist *sglist,
442 unsigned int max_segs,
443 unsigned int max_seg_sz,
444 unsigned int *sg_len)
445 {
446 struct scatterlist *sg = NULL;
447 unsigned int i = mem->cnt, cnt;
448 unsigned long len;
449 void *base, *addr, *last_addr = NULL;
450
451 sg_init_table(sglist, max_segs);
452
453 *sg_len = 0;
454 while (sz) {
455 base = page_address(mem->arr[--i].page);
456 cnt = 1 << mem->arr[i].order;
457 while (sz && cnt) {
458 addr = base + PAGE_SIZE * --cnt;
459 if (last_addr && last_addr + PAGE_SIZE == addr)
460 continue;
461 last_addr = addr;
462 len = PAGE_SIZE;
463 if (len > max_seg_sz)
464 len = max_seg_sz;
465 if (len > sz)
466 len = sz;
467 if (sg)
468 sg = sg_next(sg);
469 else
470 sg = sglist;
471 if (!sg)
472 return -EINVAL;
473 sg_set_page(sg, virt_to_page(addr), len, 0);
474 sz -= len;
475 *sg_len += 1;
476 }
477 if (i == 0)
478 i = mem->cnt;
479 }
480
481 if (sg)
482 sg_mark_end(sg);
483
484 return 0;
485 }
486
487 /*
488 * Calculate transfer rate in bytes per second.
489 */
490 static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
491 {
492 uint64_t ns;
493
494 ns = ts->tv_sec;
495 ns *= 1000000000;
496 ns += ts->tv_nsec;
497
498 bytes *= 1000000000;
499
500 while (ns > UINT_MAX) {
501 bytes >>= 1;
502 ns >>= 1;
503 }
504
505 if (!ns)
506 return 0;
507
508 do_div(bytes, (uint32_t)ns);
509
510 return bytes;
511 }
512
513 /*
514 * Save transfer results for future usage
515 */
516 static void mmc_test_save_transfer_result(struct mmc_test_card *test,
517 unsigned int count, unsigned int sectors, struct timespec ts,
518 unsigned int rate, unsigned int iops)
519 {
520 struct mmc_test_transfer_result *tr;
521
522 if (!test->gr)
523 return;
524
525 tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
526 if (!tr)
527 return;
528
529 tr->count = count;
530 tr->sectors = sectors;
531 tr->ts = ts;
532 tr->rate = rate;
533 tr->iops = iops;
534
535 list_add_tail(&tr->link, &test->gr->tr_lst);
536 }
537
538 /*
539 * Print the transfer rate.
540 */
541 static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
542 struct timespec *ts1, struct timespec *ts2)
543 {
544 unsigned int rate, iops, sectors = bytes >> 9;
545 struct timespec ts;
546
547 ts = timespec_sub(*ts2, *ts1);
548
549 rate = mmc_test_rate(bytes, &ts);
550 iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
551
552 pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
553 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
554 mmc_hostname(test->card->host), sectors, sectors >> 1,
555 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
556 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
557 iops / 100, iops % 100);
558
559 mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
560 }
561
562 /*
563 * Print the average transfer rate.
564 */
565 static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
566 unsigned int count, struct timespec *ts1,
567 struct timespec *ts2)
568 {
569 unsigned int rate, iops, sectors = bytes >> 9;
570 uint64_t tot = bytes * count;
571 struct timespec ts;
572
573 ts = timespec_sub(*ts2, *ts1);
574
575 rate = mmc_test_rate(tot, &ts);
576 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
577
578 pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
579 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
580 "%u.%02u IOPS, sg_len %d)\n",
581 mmc_hostname(test->card->host), count, sectors, count,
582 sectors >> 1, (sectors & 1 ? ".5" : ""),
583 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
584 rate / 1000, rate / 1024, iops / 100, iops % 100,
585 test->area.sg_len);
586
587 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
588 }
589
590 /*
591 * Return the card size in sectors.
592 */
593 static unsigned int mmc_test_capacity(struct mmc_card *card)
594 {
595 if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
596 return card->ext_csd.sectors;
597 else
598 return card->csd.capacity << (card->csd.read_blkbits - 9);
599 }
600
601 /*******************************************************************/
602 /* Test preparation and cleanup */
603 /*******************************************************************/
604
605 /*
606 * Fill the first couple of sectors of the card with known data
607 * so that bad reads/writes can be detected
608 */
609 static int __mmc_test_prepare(struct mmc_test_card *test, int write)
610 {
611 int ret, i;
612
613 ret = mmc_test_set_blksize(test, 512);
614 if (ret)
615 return ret;
616
617 if (write)
618 memset(test->buffer, 0xDF, 512);
619 else {
620 for (i = 0;i < 512;i++)
621 test->buffer[i] = i;
622 }
623
624 for (i = 0;i < BUFFER_SIZE / 512;i++) {
625 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
626 if (ret)
627 return ret;
628 }
629
630 return 0;
631 }
632
633 static int mmc_test_prepare_write(struct mmc_test_card *test)
634 {
635 return __mmc_test_prepare(test, 1);
636 }
637
638 static int mmc_test_prepare_read(struct mmc_test_card *test)
639 {
640 return __mmc_test_prepare(test, 0);
641 }
642
643 static int mmc_test_cleanup(struct mmc_test_card *test)
644 {
645 int ret, i;
646
647 ret = mmc_test_set_blksize(test, 512);
648 if (ret)
649 return ret;
650
651 memset(test->buffer, 0, 512);
652
653 for (i = 0;i < BUFFER_SIZE / 512;i++) {
654 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
655 if (ret)
656 return ret;
657 }
658
659 return 0;
660 }
661
662 /*******************************************************************/
663 /* Test execution helpers */
664 /*******************************************************************/
665
666 /*
667 * Modifies the mmc_request to perform the "short transfer" tests
668 */
669 static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
670 struct mmc_request *mrq, int write)
671 {
672 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
673
674 if (mrq->data->blocks > 1) {
675 mrq->cmd->opcode = write ?
676 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
677 mrq->stop = NULL;
678 } else {
679 mrq->cmd->opcode = MMC_SEND_STATUS;
680 mrq->cmd->arg = test->card->rca << 16;
681 }
682 }
683
684 /*
685 * Checks that a normal transfer didn't have any errors
686 */
687 static int mmc_test_check_result(struct mmc_test_card *test,
688 struct mmc_request *mrq)
689 {
690 int ret;
691
692 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
693
694 ret = 0;
695
696 if (!ret && mrq->cmd->error)
697 ret = mrq->cmd->error;
698 if (!ret && mrq->data->error)
699 ret = mrq->data->error;
700 if (!ret && mrq->stop && mrq->stop->error)
701 ret = mrq->stop->error;
702 if (!ret && mrq->data->bytes_xfered !=
703 mrq->data->blocks * mrq->data->blksz)
704 ret = RESULT_FAIL;
705
706 if (ret == -EINVAL)
707 ret = RESULT_UNSUP_HOST;
708
709 return ret;
710 }
711
712 static int mmc_test_check_result_async(struct mmc_card *card,
713 struct mmc_async_req *areq)
714 {
715 struct mmc_test_async_req *test_async =
716 container_of(areq, struct mmc_test_async_req, areq);
717
718 mmc_test_wait_busy(test_async->test);
719
720 return mmc_test_check_result(test_async->test, areq->mrq);
721 }
722
723 /*
724 * Checks that a "short transfer" behaved as expected
725 */
726 static int mmc_test_check_broken_result(struct mmc_test_card *test,
727 struct mmc_request *mrq)
728 {
729 int ret;
730
731 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
732
733 ret = 0;
734
735 if (!ret && mrq->cmd->error)
736 ret = mrq->cmd->error;
737 if (!ret && mrq->data->error == 0)
738 ret = RESULT_FAIL;
739 if (!ret && mrq->data->error != -ETIMEDOUT)
740 ret = mrq->data->error;
741 if (!ret && mrq->stop && mrq->stop->error)
742 ret = mrq->stop->error;
743 if (mrq->data->blocks > 1) {
744 if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
745 ret = RESULT_FAIL;
746 } else {
747 if (!ret && mrq->data->bytes_xfered > 0)
748 ret = RESULT_FAIL;
749 }
750
751 if (ret == -EINVAL)
752 ret = RESULT_UNSUP_HOST;
753
754 return ret;
755 }
756
757 /*
758 * Tests nonblock transfer with certain parameters
759 */
760 static void mmc_test_nonblock_reset(struct mmc_request *mrq,
761 struct mmc_command *cmd,
762 struct mmc_command *stop,
763 struct mmc_data *data)
764 {
765 memset(mrq, 0, sizeof(struct mmc_request));
766 memset(cmd, 0, sizeof(struct mmc_command));
767 memset(data, 0, sizeof(struct mmc_data));
768 memset(stop, 0, sizeof(struct mmc_command));
769
770 mrq->cmd = cmd;
771 mrq->data = data;
772 mrq->stop = stop;
773 }
774 static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
775 struct scatterlist *sg, unsigned sg_len,
776 unsigned dev_addr, unsigned blocks,
777 unsigned blksz, int write, int count)
778 {
779 struct mmc_request mrq1;
780 struct mmc_command cmd1;
781 struct mmc_command stop1;
782 struct mmc_data data1;
783
784 struct mmc_request mrq2;
785 struct mmc_command cmd2;
786 struct mmc_command stop2;
787 struct mmc_data data2;
788
789 struct mmc_test_async_req test_areq[2];
790 struct mmc_async_req *done_areq;
791 struct mmc_async_req *cur_areq = &test_areq[0].areq;
792 struct mmc_async_req *other_areq = &test_areq[1].areq;
793 int i;
794 int ret;
795
796 test_areq[0].test = test;
797 test_areq[1].test = test;
798
799 mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1);
800 mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2);
801
802 cur_areq->mrq = &mrq1;
803 cur_areq->err_check = mmc_test_check_result_async;
804 other_areq->mrq = &mrq2;
805 other_areq->err_check = mmc_test_check_result_async;
806
807 for (i = 0; i < count; i++) {
808 mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
809 blocks, blksz, write);
810 done_areq = mmc_start_req(test->card->host, cur_areq, &ret);
811
812 if (ret || (!done_areq && i > 0))
813 goto err;
814
815 if (done_areq) {
816 if (done_areq->mrq == &mrq2)
817 mmc_test_nonblock_reset(&mrq2, &cmd2,
818 &stop2, &data2);
819 else
820 mmc_test_nonblock_reset(&mrq1, &cmd1,
821 &stop1, &data1);
822 }
823 swap(cur_areq, other_areq);
824 dev_addr += blocks;
825 }
826
827 done_areq = mmc_start_req(test->card->host, NULL, &ret);
828
829 return ret;
830 err:
831 return ret;
832 }
833
834 /*
835 * Tests a basic transfer with certain parameters
836 */
837 static int mmc_test_simple_transfer(struct mmc_test_card *test,
838 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
839 unsigned blocks, unsigned blksz, int write)
840 {
841 struct mmc_request mrq = {0};
842 struct mmc_command cmd = {0};
843 struct mmc_command stop = {0};
844 struct mmc_data data = {0};
845
846 mrq.cmd = &cmd;
847 mrq.data = &data;
848 mrq.stop = &stop;
849
850 mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
851 blocks, blksz, write);
852
853 mmc_wait_for_req(test->card->host, &mrq);
854
855 mmc_test_wait_busy(test);
856
857 return mmc_test_check_result(test, &mrq);
858 }
859
860 /*
861 * Tests a transfer where the card will fail completely or partly
862 */
863 static int mmc_test_broken_transfer(struct mmc_test_card *test,
864 unsigned blocks, unsigned blksz, int write)
865 {
866 struct mmc_request mrq = {0};
867 struct mmc_command cmd = {0};
868 struct mmc_command stop = {0};
869 struct mmc_data data = {0};
870
871 struct scatterlist sg;
872
873 mrq.cmd = &cmd;
874 mrq.data = &data;
875 mrq.stop = &stop;
876
877 sg_init_one(&sg, test->buffer, blocks * blksz);
878
879 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
880 mmc_test_prepare_broken_mrq(test, &mrq, write);
881
882 mmc_wait_for_req(test->card->host, &mrq);
883
884 mmc_test_wait_busy(test);
885
886 return mmc_test_check_broken_result(test, &mrq);
887 }
888
889 /*
890 * Does a complete transfer test where data is also validated
891 *
892 * Note: mmc_test_prepare() must have been done before this call
893 */
894 static int mmc_test_transfer(struct mmc_test_card *test,
895 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
896 unsigned blocks, unsigned blksz, int write)
897 {
898 int ret, i;
899 unsigned long flags;
900
901 if (write) {
902 for (i = 0;i < blocks * blksz;i++)
903 test->scratch[i] = i;
904 } else {
905 memset(test->scratch, 0, BUFFER_SIZE);
906 }
907 local_irq_save(flags);
908 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
909 local_irq_restore(flags);
910
911 ret = mmc_test_set_blksize(test, blksz);
912 if (ret)
913 return ret;
914
915 ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
916 blocks, blksz, write);
917 if (ret)
918 return ret;
919
920 if (write) {
921 int sectors;
922
923 ret = mmc_test_set_blksize(test, 512);
924 if (ret)
925 return ret;
926
927 sectors = (blocks * blksz + 511) / 512;
928 if ((sectors * 512) == (blocks * blksz))
929 sectors++;
930
931 if ((sectors * 512) > BUFFER_SIZE)
932 return -EINVAL;
933
934 memset(test->buffer, 0, sectors * 512);
935
936 for (i = 0;i < sectors;i++) {
937 ret = mmc_test_buffer_transfer(test,
938 test->buffer + i * 512,
939 dev_addr + i, 512, 0);
940 if (ret)
941 return ret;
942 }
943
944 for (i = 0;i < blocks * blksz;i++) {
945 if (test->buffer[i] != (u8)i)
946 return RESULT_FAIL;
947 }
948
949 for (;i < sectors * 512;i++) {
950 if (test->buffer[i] != 0xDF)
951 return RESULT_FAIL;
952 }
953 } else {
954 local_irq_save(flags);
955 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
956 local_irq_restore(flags);
957 for (i = 0;i < blocks * blksz;i++) {
958 if (test->scratch[i] != (u8)i)
959 return RESULT_FAIL;
960 }
961 }
962
963 return 0;
964 }
965
966 /*******************************************************************/
967 /* Tests */
968 /*******************************************************************/
969
970 struct mmc_test_case {
971 const char *name;
972
973 int (*prepare)(struct mmc_test_card *);
974 int (*run)(struct mmc_test_card *);
975 int (*cleanup)(struct mmc_test_card *);
976 };
977
978 static int mmc_test_basic_write(struct mmc_test_card *test)
979 {
980 int ret;
981 struct scatterlist sg;
982
983 ret = mmc_test_set_blksize(test, 512);
984 if (ret)
985 return ret;
986
987 sg_init_one(&sg, test->buffer, 512);
988
989 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
990 }
991
992 static int mmc_test_basic_read(struct mmc_test_card *test)
993 {
994 int ret;
995 struct scatterlist sg;
996
997 ret = mmc_test_set_blksize(test, 512);
998 if (ret)
999 return ret;
1000
1001 sg_init_one(&sg, test->buffer, 512);
1002
1003 return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
1004 }
1005
1006 static int mmc_test_verify_write(struct mmc_test_card *test)
1007 {
1008 struct scatterlist sg;
1009
1010 sg_init_one(&sg, test->buffer, 512);
1011
1012 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1013 }
1014
1015 static int mmc_test_verify_read(struct mmc_test_card *test)
1016 {
1017 struct scatterlist sg;
1018
1019 sg_init_one(&sg, test->buffer, 512);
1020
1021 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1022 }
1023
1024 static int mmc_test_multi_write(struct mmc_test_card *test)
1025 {
1026 unsigned int size;
1027 struct scatterlist sg;
1028
1029 if (test->card->host->max_blk_count == 1)
1030 return RESULT_UNSUP_HOST;
1031
1032 size = PAGE_SIZE * 2;
1033 size = min(size, test->card->host->max_req_size);
1034 size = min(size, test->card->host->max_seg_size);
1035 size = min(size, test->card->host->max_blk_count * 512);
1036
1037 if (size < 1024)
1038 return RESULT_UNSUP_HOST;
1039
1040 sg_init_one(&sg, test->buffer, size);
1041
1042 return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1043 }
1044
1045 static int mmc_test_multi_read(struct mmc_test_card *test)
1046 {
1047 unsigned int size;
1048 struct scatterlist sg;
1049
1050 if (test->card->host->max_blk_count == 1)
1051 return RESULT_UNSUP_HOST;
1052
1053 size = PAGE_SIZE * 2;
1054 size = min(size, test->card->host->max_req_size);
1055 size = min(size, test->card->host->max_seg_size);
1056 size = min(size, test->card->host->max_blk_count * 512);
1057
1058 if (size < 1024)
1059 return RESULT_UNSUP_HOST;
1060
1061 sg_init_one(&sg, test->buffer, size);
1062
1063 return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1064 }
1065
1066 static int mmc_test_pow2_write(struct mmc_test_card *test)
1067 {
1068 int ret, i;
1069 struct scatterlist sg;
1070
1071 if (!test->card->csd.write_partial)
1072 return RESULT_UNSUP_CARD;
1073
1074 for (i = 1; i < 512;i <<= 1) {
1075 sg_init_one(&sg, test->buffer, i);
1076 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1077 if (ret)
1078 return ret;
1079 }
1080
1081 return 0;
1082 }
1083
1084 static int mmc_test_pow2_read(struct mmc_test_card *test)
1085 {
1086 int ret, i;
1087 struct scatterlist sg;
1088
1089 if (!test->card->csd.read_partial)
1090 return RESULT_UNSUP_CARD;
1091
1092 for (i = 1; i < 512;i <<= 1) {
1093 sg_init_one(&sg, test->buffer, i);
1094 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1095 if (ret)
1096 return ret;
1097 }
1098
1099 return 0;
1100 }
1101
1102 static int mmc_test_weird_write(struct mmc_test_card *test)
1103 {
1104 int ret, i;
1105 struct scatterlist sg;
1106
1107 if (!test->card->csd.write_partial)
1108 return RESULT_UNSUP_CARD;
1109
1110 for (i = 3; i < 512;i += 7) {
1111 sg_init_one(&sg, test->buffer, i);
1112 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1113 if (ret)
1114 return ret;
1115 }
1116
1117 return 0;
1118 }
1119
1120 static int mmc_test_weird_read(struct mmc_test_card *test)
1121 {
1122 int ret, i;
1123 struct scatterlist sg;
1124
1125 if (!test->card->csd.read_partial)
1126 return RESULT_UNSUP_CARD;
1127
1128 for (i = 3; i < 512;i += 7) {
1129 sg_init_one(&sg, test->buffer, i);
1130 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1131 if (ret)
1132 return ret;
1133 }
1134
1135 return 0;
1136 }
1137
1138 static int mmc_test_align_write(struct mmc_test_card *test)
1139 {
1140 int ret, i;
1141 struct scatterlist sg;
1142
1143 for (i = 1; i < TEST_ALIGN_END; i++) {
1144 sg_init_one(&sg, test->buffer + i, 512);
1145 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1146 if (ret)
1147 return ret;
1148 }
1149
1150 return 0;
1151 }
1152
1153 static int mmc_test_align_read(struct mmc_test_card *test)
1154 {
1155 int ret, i;
1156 struct scatterlist sg;
1157
1158 for (i = 1; i < TEST_ALIGN_END; i++) {
1159 sg_init_one(&sg, test->buffer + i, 512);
1160 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1161 if (ret)
1162 return ret;
1163 }
1164
1165 return 0;
1166 }
1167
1168 static int mmc_test_align_multi_write(struct mmc_test_card *test)
1169 {
1170 int ret, i;
1171 unsigned int size;
1172 struct scatterlist sg;
1173
1174 if (test->card->host->max_blk_count == 1)
1175 return RESULT_UNSUP_HOST;
1176
1177 size = PAGE_SIZE * 2;
1178 size = min(size, test->card->host->max_req_size);
1179 size = min(size, test->card->host->max_seg_size);
1180 size = min(size, test->card->host->max_blk_count * 512);
1181
1182 if (size < 1024)
1183 return RESULT_UNSUP_HOST;
1184
1185 for (i = 1; i < TEST_ALIGN_END; i++) {
1186 sg_init_one(&sg, test->buffer + i, size);
1187 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1188 if (ret)
1189 return ret;
1190 }
1191
1192 return 0;
1193 }
1194
1195 static int mmc_test_align_multi_read(struct mmc_test_card *test)
1196 {
1197 int ret, i;
1198 unsigned int size;
1199 struct scatterlist sg;
1200
1201 if (test->card->host->max_blk_count == 1)
1202 return RESULT_UNSUP_HOST;
1203
1204 size = PAGE_SIZE * 2;
1205 size = min(size, test->card->host->max_req_size);
1206 size = min(size, test->card->host->max_seg_size);
1207 size = min(size, test->card->host->max_blk_count * 512);
1208
1209 if (size < 1024)
1210 return RESULT_UNSUP_HOST;
1211
1212 for (i = 1; i < TEST_ALIGN_END; i++) {
1213 sg_init_one(&sg, test->buffer + i, size);
1214 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1215 if (ret)
1216 return ret;
1217 }
1218
1219 return 0;
1220 }
1221
1222 static int mmc_test_xfersize_write(struct mmc_test_card *test)
1223 {
1224 int ret;
1225
1226 ret = mmc_test_set_blksize(test, 512);
1227 if (ret)
1228 return ret;
1229
1230 return mmc_test_broken_transfer(test, 1, 512, 1);
1231 }
1232
1233 static int mmc_test_xfersize_read(struct mmc_test_card *test)
1234 {
1235 int ret;
1236
1237 ret = mmc_test_set_blksize(test, 512);
1238 if (ret)
1239 return ret;
1240
1241 return mmc_test_broken_transfer(test, 1, 512, 0);
1242 }
1243
1244 static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1245 {
1246 int ret;
1247
1248 if (test->card->host->max_blk_count == 1)
1249 return RESULT_UNSUP_HOST;
1250
1251 ret = mmc_test_set_blksize(test, 512);
1252 if (ret)
1253 return ret;
1254
1255 return mmc_test_broken_transfer(test, 2, 512, 1);
1256 }
1257
1258 static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1259 {
1260 int ret;
1261
1262 if (test->card->host->max_blk_count == 1)
1263 return RESULT_UNSUP_HOST;
1264
1265 ret = mmc_test_set_blksize(test, 512);
1266 if (ret)
1267 return ret;
1268
1269 return mmc_test_broken_transfer(test, 2, 512, 0);
1270 }
1271
1272 #ifdef CONFIG_HIGHMEM
1273
1274 static int mmc_test_write_high(struct mmc_test_card *test)
1275 {
1276 struct scatterlist sg;
1277
1278 sg_init_table(&sg, 1);
1279 sg_set_page(&sg, test->highmem, 512, 0);
1280
1281 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1282 }
1283
1284 static int mmc_test_read_high(struct mmc_test_card *test)
1285 {
1286 struct scatterlist sg;
1287
1288 sg_init_table(&sg, 1);
1289 sg_set_page(&sg, test->highmem, 512, 0);
1290
1291 return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1292 }
1293
1294 static int mmc_test_multi_write_high(struct mmc_test_card *test)
1295 {
1296 unsigned int size;
1297 struct scatterlist sg;
1298
1299 if (test->card->host->max_blk_count == 1)
1300 return RESULT_UNSUP_HOST;
1301
1302 size = PAGE_SIZE * 2;
1303 size = min(size, test->card->host->max_req_size);
1304 size = min(size, test->card->host->max_seg_size);
1305 size = min(size, test->card->host->max_blk_count * 512);
1306
1307 if (size < 1024)
1308 return RESULT_UNSUP_HOST;
1309
1310 sg_init_table(&sg, 1);
1311 sg_set_page(&sg, test->highmem, size, 0);
1312
1313 return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1314 }
1315
1316 static int mmc_test_multi_read_high(struct mmc_test_card *test)
1317 {
1318 unsigned int size;
1319 struct scatterlist sg;
1320
1321 if (test->card->host->max_blk_count == 1)
1322 return RESULT_UNSUP_HOST;
1323
1324 size = PAGE_SIZE * 2;
1325 size = min(size, test->card->host->max_req_size);
1326 size = min(size, test->card->host->max_seg_size);
1327 size = min(size, test->card->host->max_blk_count * 512);
1328
1329 if (size < 1024)
1330 return RESULT_UNSUP_HOST;
1331
1332 sg_init_table(&sg, 1);
1333 sg_set_page(&sg, test->highmem, size, 0);
1334
1335 return mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1336 }
1337
1338 #else
1339
1340 static int mmc_test_no_highmem(struct mmc_test_card *test)
1341 {
1342 pr_info("%s: Highmem not configured - test skipped\n",
1343 mmc_hostname(test->card->host));
1344 return 0;
1345 }
1346
1347 #endif /* CONFIG_HIGHMEM */
1348
1349 /*
1350 * Map sz bytes so that it can be transferred.
1351 */
1352 static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1353 int max_scatter, int min_sg_len)
1354 {
1355 struct mmc_test_area *t = &test->area;
1356 int err;
1357
1358 t->blocks = sz >> 9;
1359
1360 if (max_scatter) {
1361 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1362 t->max_segs, t->max_seg_sz,
1363 &t->sg_len);
1364 } else {
1365 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1366 t->max_seg_sz, &t->sg_len, min_sg_len);
1367 }
1368 if (err)
1369 pr_info("%s: Failed to map sg list\n",
1370 mmc_hostname(test->card->host));
1371 return err;
1372 }
1373
1374 /*
1375 * Transfer bytes mapped by mmc_test_area_map().
1376 */
1377 static int mmc_test_area_transfer(struct mmc_test_card *test,
1378 unsigned int dev_addr, int write)
1379 {
1380 struct mmc_test_area *t = &test->area;
1381
1382 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1383 t->blocks, 512, write);
1384 }
1385
1386 /*
1387 * Map and transfer bytes for multiple transfers.
1388 */
1389 static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
1390 unsigned int dev_addr, int write,
1391 int max_scatter, int timed, int count,
1392 bool nonblock, int min_sg_len)
1393 {
1394 struct timespec ts1, ts2;
1395 int ret = 0;
1396 int i;
1397 struct mmc_test_area *t = &test->area;
1398
1399 /*
1400 * In the case of a maximally scattered transfer, the maximum transfer
1401 * size is further limited by using PAGE_SIZE segments.
1402 */
1403 if (max_scatter) {
1404 struct mmc_test_area *t = &test->area;
1405 unsigned long max_tfr;
1406
1407 if (t->max_seg_sz >= PAGE_SIZE)
1408 max_tfr = t->max_segs * PAGE_SIZE;
1409 else
1410 max_tfr = t->max_segs * t->max_seg_sz;
1411 if (sz > max_tfr)
1412 sz = max_tfr;
1413 }
1414
1415 ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len);
1416 if (ret)
1417 return ret;
1418
1419 if (timed)
1420 getnstimeofday(&ts1);
1421 if (nonblock)
1422 ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
1423 dev_addr, t->blocks, 512, write, count);
1424 else
1425 for (i = 0; i < count && ret == 0; i++) {
1426 ret = mmc_test_area_transfer(test, dev_addr, write);
1427 dev_addr += sz >> 9;
1428 }
1429
1430 if (ret)
1431 return ret;
1432
1433 if (timed)
1434 getnstimeofday(&ts2);
1435
1436 if (timed)
1437 mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
1438
1439 return 0;
1440 }
1441
1442 static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1443 unsigned int dev_addr, int write, int max_scatter,
1444 int timed)
1445 {
1446 return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
1447 timed, 1, false, 0);
1448 }
1449
1450 /*
1451 * Write the test area entirely.
1452 */
1453 static int mmc_test_area_fill(struct mmc_test_card *test)
1454 {
1455 struct mmc_test_area *t = &test->area;
1456
1457 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
1458 }
1459
1460 /*
1461 * Erase the test area entirely.
1462 */
1463 static int mmc_test_area_erase(struct mmc_test_card *test)
1464 {
1465 struct mmc_test_area *t = &test->area;
1466
1467 if (!mmc_can_erase(test->card))
1468 return 0;
1469
1470 return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
1471 MMC_ERASE_ARG);
1472 }
1473
1474 /*
1475 * Cleanup struct mmc_test_area.
1476 */
1477 static int mmc_test_area_cleanup(struct mmc_test_card *test)
1478 {
1479 struct mmc_test_area *t = &test->area;
1480
1481 kfree(t->sg);
1482 mmc_test_free_mem(t->mem);
1483
1484 return 0;
1485 }
1486
1487 /*
1488 * Initialize an area for testing large transfers. The test area is set to the
1489 * middle of the card because cards may have different charateristics at the
1490 * front (for FAT file system optimization). Optionally, the area is erased
1491 * (if the card supports it) which may improve write performance. Optionally,
1492 * the area is filled with data for subsequent read tests.
1493 */
1494 static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1495 {
1496 struct mmc_test_area *t = &test->area;
1497 unsigned long min_sz = 64 * 1024, sz;
1498 int ret;
1499
1500 ret = mmc_test_set_blksize(test, 512);
1501 if (ret)
1502 return ret;
1503
1504 /* Make the test area size about 4MiB */
1505 sz = (unsigned long)test->card->pref_erase << 9;
1506 t->max_sz = sz;
1507 while (t->max_sz < 4 * 1024 * 1024)
1508 t->max_sz += sz;
1509 while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1510 t->max_sz -= sz;
1511
1512 t->max_segs = test->card->host->max_segs;
1513 t->max_seg_sz = test->card->host->max_seg_size;
1514 t->max_seg_sz -= t->max_seg_sz % 512;
1515
1516 t->max_tfr = t->max_sz;
1517 if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1518 t->max_tfr = test->card->host->max_blk_count << 9;
1519 if (t->max_tfr > test->card->host->max_req_size)
1520 t->max_tfr = test->card->host->max_req_size;
1521 if (t->max_tfr / t->max_seg_sz > t->max_segs)
1522 t->max_tfr = t->max_segs * t->max_seg_sz;
1523
1524 /*
1525 * Try to allocate enough memory for a max. sized transfer. Less is OK
1526 * because the same memory can be mapped into the scatterlist more than
1527 * once. Also, take into account the limits imposed on scatterlist
1528 * segments by the host driver.
1529 */
1530 t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
1531 t->max_seg_sz);
1532 if (!t->mem)
1533 return -ENOMEM;
1534
1535 t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1536 if (!t->sg) {
1537 ret = -ENOMEM;
1538 goto out_free;
1539 }
1540
1541 t->dev_addr = mmc_test_capacity(test->card) / 2;
1542 t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1543
1544 if (erase) {
1545 ret = mmc_test_area_erase(test);
1546 if (ret)
1547 goto out_free;
1548 }
1549
1550 if (fill) {
1551 ret = mmc_test_area_fill(test);
1552 if (ret)
1553 goto out_free;
1554 }
1555
1556 return 0;
1557
1558 out_free:
1559 mmc_test_area_cleanup(test);
1560 return ret;
1561 }
1562
1563 /*
1564 * Prepare for large transfers. Do not erase the test area.
1565 */
1566 static int mmc_test_area_prepare(struct mmc_test_card *test)
1567 {
1568 return mmc_test_area_init(test, 0, 0);
1569 }
1570
1571 /*
1572 * Prepare for large transfers. Do erase the test area.
1573 */
1574 static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1575 {
1576 return mmc_test_area_init(test, 1, 0);
1577 }
1578
1579 /*
1580 * Prepare for large transfers. Erase and fill the test area.
1581 */
1582 static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1583 {
1584 return mmc_test_area_init(test, 1, 1);
1585 }
1586
1587 /*
1588 * Test best-case performance. Best-case performance is expected from
1589 * a single large transfer.
1590 *
1591 * An additional option (max_scatter) allows the measurement of the same
1592 * transfer but with no contiguous pages in the scatter list. This tests
1593 * the efficiency of DMA to handle scattered pages.
1594 */
1595 static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1596 int max_scatter)
1597 {
1598 struct mmc_test_area *t = &test->area;
1599
1600 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
1601 max_scatter, 1);
1602 }
1603
1604 /*
1605 * Best-case read performance.
1606 */
1607 static int mmc_test_best_read_performance(struct mmc_test_card *test)
1608 {
1609 return mmc_test_best_performance(test, 0, 0);
1610 }
1611
1612 /*
1613 * Best-case write performance.
1614 */
1615 static int mmc_test_best_write_performance(struct mmc_test_card *test)
1616 {
1617 return mmc_test_best_performance(test, 1, 0);
1618 }
1619
1620 /*
1621 * Best-case read performance into scattered pages.
1622 */
1623 static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1624 {
1625 return mmc_test_best_performance(test, 0, 1);
1626 }
1627
1628 /*
1629 * Best-case write performance from scattered pages.
1630 */
1631 static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1632 {
1633 return mmc_test_best_performance(test, 1, 1);
1634 }
1635
1636 /*
1637 * Single read performance by transfer size.
1638 */
1639 static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1640 {
1641 struct mmc_test_area *t = &test->area;
1642 unsigned long sz;
1643 unsigned int dev_addr;
1644 int ret;
1645
1646 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1647 dev_addr = t->dev_addr + (sz >> 9);
1648 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1649 if (ret)
1650 return ret;
1651 }
1652 sz = t->max_tfr;
1653 dev_addr = t->dev_addr;
1654 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1655 }
1656
1657 /*
1658 * Single write performance by transfer size.
1659 */
1660 static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1661 {
1662 struct mmc_test_area *t = &test->area;
1663 unsigned long sz;
1664 unsigned int dev_addr;
1665 int ret;
1666
1667 ret = mmc_test_area_erase(test);
1668 if (ret)
1669 return ret;
1670 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1671 dev_addr = t->dev_addr + (sz >> 9);
1672 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1673 if (ret)
1674 return ret;
1675 }
1676 ret = mmc_test_area_erase(test);
1677 if (ret)
1678 return ret;
1679 sz = t->max_tfr;
1680 dev_addr = t->dev_addr;
1681 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1682 }
1683
1684 /*
1685 * Single trim performance by transfer size.
1686 */
1687 static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1688 {
1689 struct mmc_test_area *t = &test->area;
1690 unsigned long sz;
1691 unsigned int dev_addr;
1692 struct timespec ts1, ts2;
1693 int ret;
1694
1695 if (!mmc_can_trim(test->card))
1696 return RESULT_UNSUP_CARD;
1697
1698 if (!mmc_can_erase(test->card))
1699 return RESULT_UNSUP_HOST;
1700
1701 for (sz = 512; sz < t->max_sz; sz <<= 1) {
1702 dev_addr = t->dev_addr + (sz >> 9);
1703 getnstimeofday(&ts1);
1704 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1705 if (ret)
1706 return ret;
1707 getnstimeofday(&ts2);
1708 mmc_test_print_rate(test, sz, &ts1, &ts2);
1709 }
1710 dev_addr = t->dev_addr;
1711 getnstimeofday(&ts1);
1712 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1713 if (ret)
1714 return ret;
1715 getnstimeofday(&ts2);
1716 mmc_test_print_rate(test, sz, &ts1, &ts2);
1717 return 0;
1718 }
1719
1720 static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1721 {
1722 struct mmc_test_area *t = &test->area;
1723 unsigned int dev_addr, i, cnt;
1724 struct timespec ts1, ts2;
1725 int ret;
1726
1727 cnt = t->max_sz / sz;
1728 dev_addr = t->dev_addr;
1729 getnstimeofday(&ts1);
1730 for (i = 0; i < cnt; i++) {
1731 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1732 if (ret)
1733 return ret;
1734 dev_addr += (sz >> 9);
1735 }
1736 getnstimeofday(&ts2);
1737 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1738 return 0;
1739 }
1740
1741 /*
1742 * Consecutive read performance by transfer size.
1743 */
1744 static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1745 {
1746 struct mmc_test_area *t = &test->area;
1747 unsigned long sz;
1748 int ret;
1749
1750 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1751 ret = mmc_test_seq_read_perf(test, sz);
1752 if (ret)
1753 return ret;
1754 }
1755 sz = t->max_tfr;
1756 return mmc_test_seq_read_perf(test, sz);
1757 }
1758
1759 static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1760 {
1761 struct mmc_test_area *t = &test->area;
1762 unsigned int dev_addr, i, cnt;
1763 struct timespec ts1, ts2;
1764 int ret;
1765
1766 ret = mmc_test_area_erase(test);
1767 if (ret)
1768 return ret;
1769 cnt = t->max_sz / sz;
1770 dev_addr = t->dev_addr;
1771 getnstimeofday(&ts1);
1772 for (i = 0; i < cnt; i++) {
1773 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1774 if (ret)
1775 return ret;
1776 dev_addr += (sz >> 9);
1777 }
1778 getnstimeofday(&ts2);
1779 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1780 return 0;
1781 }
1782
1783 /*
1784 * Consecutive write performance by transfer size.
1785 */
1786 static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1787 {
1788 struct mmc_test_area *t = &test->area;
1789 unsigned long sz;
1790 int ret;
1791
1792 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1793 ret = mmc_test_seq_write_perf(test, sz);
1794 if (ret)
1795 return ret;
1796 }
1797 sz = t->max_tfr;
1798 return mmc_test_seq_write_perf(test, sz);
1799 }
1800
1801 /*
1802 * Consecutive trim performance by transfer size.
1803 */
1804 static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1805 {
1806 struct mmc_test_area *t = &test->area;
1807 unsigned long sz;
1808 unsigned int dev_addr, i, cnt;
1809 struct timespec ts1, ts2;
1810 int ret;
1811
1812 if (!mmc_can_trim(test->card))
1813 return RESULT_UNSUP_CARD;
1814
1815 if (!mmc_can_erase(test->card))
1816 return RESULT_UNSUP_HOST;
1817
1818 for (sz = 512; sz <= t->max_sz; sz <<= 1) {
1819 ret = mmc_test_area_erase(test);
1820 if (ret)
1821 return ret;
1822 ret = mmc_test_area_fill(test);
1823 if (ret)
1824 return ret;
1825 cnt = t->max_sz / sz;
1826 dev_addr = t->dev_addr;
1827 getnstimeofday(&ts1);
1828 for (i = 0; i < cnt; i++) {
1829 ret = mmc_erase(test->card, dev_addr, sz >> 9,
1830 MMC_TRIM_ARG);
1831 if (ret)
1832 return ret;
1833 dev_addr += (sz >> 9);
1834 }
1835 getnstimeofday(&ts2);
1836 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1837 }
1838 return 0;
1839 }
1840
1841 static unsigned int rnd_next = 1;
1842
1843 static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
1844 {
1845 uint64_t r;
1846
1847 rnd_next = rnd_next * 1103515245 + 12345;
1848 r = (rnd_next >> 16) & 0x7fff;
1849 return (r * rnd_cnt) >> 15;
1850 }
1851
1852 static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1853 unsigned long sz)
1854 {
1855 unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
1856 unsigned int ssz;
1857 struct timespec ts1, ts2, ts;
1858 int ret;
1859
1860 ssz = sz >> 9;
1861
1862 rnd_addr = mmc_test_capacity(test->card) / 4;
1863 range1 = rnd_addr / test->card->pref_erase;
1864 range2 = range1 / ssz;
1865
1866 getnstimeofday(&ts1);
1867 for (cnt = 0; cnt < UINT_MAX; cnt++) {
1868 getnstimeofday(&ts2);
1869 ts = timespec_sub(ts2, ts1);
1870 if (ts.tv_sec >= 10)
1871 break;
1872 ea = mmc_test_rnd_num(range1);
1873 if (ea == last_ea)
1874 ea -= 1;
1875 last_ea = ea;
1876 dev_addr = rnd_addr + test->card->pref_erase * ea +
1877 ssz * mmc_test_rnd_num(range2);
1878 ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
1879 if (ret)
1880 return ret;
1881 }
1882 if (print)
1883 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1884 return 0;
1885 }
1886
1887 static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1888 {
1889 struct mmc_test_area *t = &test->area;
1890 unsigned int next;
1891 unsigned long sz;
1892 int ret;
1893
1894 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1895 /*
1896 * When writing, try to get more consistent results by running
1897 * the test twice with exactly the same I/O but outputting the
1898 * results only for the 2nd run.
1899 */
1900 if (write) {
1901 next = rnd_next;
1902 ret = mmc_test_rnd_perf(test, write, 0, sz);
1903 if (ret)
1904 return ret;
1905 rnd_next = next;
1906 }
1907 ret = mmc_test_rnd_perf(test, write, 1, sz);
1908 if (ret)
1909 return ret;
1910 }
1911 sz = t->max_tfr;
1912 if (write) {
1913 next = rnd_next;
1914 ret = mmc_test_rnd_perf(test, write, 0, sz);
1915 if (ret)
1916 return ret;
1917 rnd_next = next;
1918 }
1919 return mmc_test_rnd_perf(test, write, 1, sz);
1920 }
1921
1922 /*
1923 * Random read performance by transfer size.
1924 */
1925 static int mmc_test_random_read_perf(struct mmc_test_card *test)
1926 {
1927 return mmc_test_random_perf(test, 0);
1928 }
1929
1930 /*
1931 * Random write performance by transfer size.
1932 */
1933 static int mmc_test_random_write_perf(struct mmc_test_card *test)
1934 {
1935 return mmc_test_random_perf(test, 1);
1936 }
1937
1938 static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
1939 unsigned int tot_sz, int max_scatter)
1940 {
1941 struct mmc_test_area *t = &test->area;
1942 unsigned int dev_addr, i, cnt, sz, ssz;
1943 struct timespec ts1, ts2;
1944 int ret;
1945
1946 sz = t->max_tfr;
1947
1948 /*
1949 * In the case of a maximally scattered transfer, the maximum transfer
1950 * size is further limited by using PAGE_SIZE segments.
1951 */
1952 if (max_scatter) {
1953 unsigned long max_tfr;
1954
1955 if (t->max_seg_sz >= PAGE_SIZE)
1956 max_tfr = t->max_segs * PAGE_SIZE;
1957 else
1958 max_tfr = t->max_segs * t->max_seg_sz;
1959 if (sz > max_tfr)
1960 sz = max_tfr;
1961 }
1962
1963 ssz = sz >> 9;
1964 dev_addr = mmc_test_capacity(test->card) / 4;
1965 if (tot_sz > dev_addr << 9)
1966 tot_sz = dev_addr << 9;
1967 cnt = tot_sz / sz;
1968 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
1969
1970 getnstimeofday(&ts1);
1971 for (i = 0; i < cnt; i++) {
1972 ret = mmc_test_area_io(test, sz, dev_addr, write,
1973 max_scatter, 0);
1974 if (ret)
1975 return ret;
1976 dev_addr += ssz;
1977 }
1978 getnstimeofday(&ts2);
1979
1980 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1981
1982 return 0;
1983 }
1984
1985 static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
1986 {
1987 int ret, i;
1988
1989 for (i = 0; i < 10; i++) {
1990 ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
1991 if (ret)
1992 return ret;
1993 }
1994 for (i = 0; i < 5; i++) {
1995 ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
1996 if (ret)
1997 return ret;
1998 }
1999 for (i = 0; i < 3; i++) {
2000 ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
2001 if (ret)
2002 return ret;
2003 }
2004
2005 return ret;
2006 }
2007
2008 /*
2009 * Large sequential read performance.
2010 */
2011 static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
2012 {
2013 return mmc_test_large_seq_perf(test, 0);
2014 }
2015
2016 /*
2017 * Large sequential write performance.
2018 */
2019 static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
2020 {
2021 return mmc_test_large_seq_perf(test, 1);
2022 }
2023
2024 static int mmc_test_rw_multiple(struct mmc_test_card *test,
2025 struct mmc_test_multiple_rw *tdata,
2026 unsigned int reqsize, unsigned int size,
2027 int min_sg_len)
2028 {
2029 unsigned int dev_addr;
2030 struct mmc_test_area *t = &test->area;
2031 int ret = 0;
2032
2033 /* Set up test area */
2034 if (size > mmc_test_capacity(test->card) / 2 * 512)
2035 size = mmc_test_capacity(test->card) / 2 * 512;
2036 if (reqsize > t->max_tfr)
2037 reqsize = t->max_tfr;
2038 dev_addr = mmc_test_capacity(test->card) / 4;
2039 if ((dev_addr & 0xffff0000))
2040 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2041 else
2042 dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
2043 if (!dev_addr)
2044 goto err;
2045
2046 if (reqsize > size)
2047 return 0;
2048
2049 /* prepare test area */
2050 if (mmc_can_erase(test->card) &&
2051 tdata->prepare & MMC_TEST_PREP_ERASE) {
2052 ret = mmc_erase(test->card, dev_addr,
2053 size / 512, MMC_SECURE_ERASE_ARG);
2054 if (ret)
2055 ret = mmc_erase(test->card, dev_addr,
2056 size / 512, MMC_ERASE_ARG);
2057 if (ret)
2058 goto err;
2059 }
2060
2061 /* Run test */
2062 ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
2063 tdata->do_write, 0, 1, size / reqsize,
2064 tdata->do_nonblock_req, min_sg_len);
2065 if (ret)
2066 goto err;
2067
2068 return ret;
2069 err:
2070 pr_info("[%s] error\n", __func__);
2071 return ret;
2072 }
2073
2074 static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
2075 struct mmc_test_multiple_rw *rw)
2076 {
2077 int ret = 0;
2078 int i;
2079 void *pre_req = test->card->host->ops->pre_req;
2080 void *post_req = test->card->host->ops->post_req;
2081
2082 if (rw->do_nonblock_req &&
2083 ((!pre_req && post_req) || (pre_req && !post_req))) {
2084 pr_info("error: only one of pre/post is defined\n");
2085 return -EINVAL;
2086 }
2087
2088 for (i = 0 ; i < rw->len && ret == 0; i++) {
2089 ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
2090 if (ret)
2091 break;
2092 }
2093 return ret;
2094 }
2095
2096 static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
2097 struct mmc_test_multiple_rw *rw)
2098 {
2099 int ret = 0;
2100 int i;
2101
2102 for (i = 0 ; i < rw->len && ret == 0; i++) {
2103 ret = mmc_test_rw_multiple(test, rw, 512*1024, rw->size,
2104 rw->sg_len[i]);
2105 if (ret)
2106 break;
2107 }
2108 return ret;
2109 }
2110
2111 /*
2112 * Multiple blocking write 4k to 4 MB chunks
2113 */
2114 static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
2115 {
2116 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2117 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2118 struct mmc_test_multiple_rw test_data = {
2119 .bs = bs,
2120 .size = TEST_AREA_MAX_SIZE,
2121 .len = ARRAY_SIZE(bs),
2122 .do_write = true,
2123 .do_nonblock_req = false,
2124 .prepare = MMC_TEST_PREP_ERASE,
2125 };
2126
2127 return mmc_test_rw_multiple_size(test, &test_data);
2128 };
2129
2130 /*
2131 * Multiple non-blocking write 4k to 4 MB chunks
2132 */
2133 static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
2134 {
2135 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2136 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2137 struct mmc_test_multiple_rw test_data = {
2138 .bs = bs,
2139 .size = TEST_AREA_MAX_SIZE,
2140 .len = ARRAY_SIZE(bs),
2141 .do_write = true,
2142 .do_nonblock_req = true,
2143 .prepare = MMC_TEST_PREP_ERASE,
2144 };
2145
2146 return mmc_test_rw_multiple_size(test, &test_data);
2147 }
2148
2149 /*
2150 * Multiple blocking read 4k to 4 MB chunks
2151 */
2152 static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
2153 {
2154 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2155 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2156 struct mmc_test_multiple_rw test_data = {
2157 .bs = bs,
2158 .size = TEST_AREA_MAX_SIZE,
2159 .len = ARRAY_SIZE(bs),
2160 .do_write = false,
2161 .do_nonblock_req = false,
2162 .prepare = MMC_TEST_PREP_NONE,
2163 };
2164
2165 return mmc_test_rw_multiple_size(test, &test_data);
2166 }
2167
2168 /*
2169 * Multiple non-blocking read 4k to 4 MB chunks
2170 */
2171 static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
2172 {
2173 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2174 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2175 struct mmc_test_multiple_rw test_data = {
2176 .bs = bs,
2177 .size = TEST_AREA_MAX_SIZE,
2178 .len = ARRAY_SIZE(bs),
2179 .do_write = false,
2180 .do_nonblock_req = true,
2181 .prepare = MMC_TEST_PREP_NONE,
2182 };
2183
2184 return mmc_test_rw_multiple_size(test, &test_data);
2185 }
2186
2187 /*
2188 * Multiple blocking write 1 to 512 sg elements
2189 */
2190 static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
2191 {
2192 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2193 1 << 7, 1 << 8, 1 << 9};
2194 struct mmc_test_multiple_rw test_data = {
2195 .sg_len = sg_len,
2196 .size = TEST_AREA_MAX_SIZE,
2197 .len = ARRAY_SIZE(sg_len),
2198 .do_write = true,
2199 .do_nonblock_req = false,
2200 .prepare = MMC_TEST_PREP_ERASE,
2201 };
2202
2203 return mmc_test_rw_multiple_sg_len(test, &test_data);
2204 };
2205
2206 /*
2207 * Multiple non-blocking write 1 to 512 sg elements
2208 */
2209 static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
2210 {
2211 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2212 1 << 7, 1 << 8, 1 << 9};
2213 struct mmc_test_multiple_rw test_data = {
2214 .sg_len = sg_len,
2215 .size = TEST_AREA_MAX_SIZE,
2216 .len = ARRAY_SIZE(sg_len),
2217 .do_write = true,
2218 .do_nonblock_req = true,
2219 .prepare = MMC_TEST_PREP_ERASE,
2220 };
2221
2222 return mmc_test_rw_multiple_sg_len(test, &test_data);
2223 }
2224
2225 /*
2226 * Multiple blocking read 1 to 512 sg elements
2227 */
2228 static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
2229 {
2230 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2231 1 << 7, 1 << 8, 1 << 9};
2232 struct mmc_test_multiple_rw test_data = {
2233 .sg_len = sg_len,
2234 .size = TEST_AREA_MAX_SIZE,
2235 .len = ARRAY_SIZE(sg_len),
2236 .do_write = false,
2237 .do_nonblock_req = false,
2238 .prepare = MMC_TEST_PREP_NONE,
2239 };
2240
2241 return mmc_test_rw_multiple_sg_len(test, &test_data);
2242 }
2243
2244 /*
2245 * Multiple non-blocking read 1 to 512 sg elements
2246 */
2247 static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
2248 {
2249 unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2250 1 << 7, 1 << 8, 1 << 9};
2251 struct mmc_test_multiple_rw test_data = {
2252 .sg_len = sg_len,
2253 .size = TEST_AREA_MAX_SIZE,
2254 .len = ARRAY_SIZE(sg_len),
2255 .do_write = false,
2256 .do_nonblock_req = true,
2257 .prepare = MMC_TEST_PREP_NONE,
2258 };
2259
2260 return mmc_test_rw_multiple_sg_len(test, &test_data);
2261 }
2262
2263 /*
2264 * eMMC hardware reset.
2265 */
2266 static int mmc_test_reset(struct mmc_test_card *test)
2267 {
2268 struct mmc_card *card = test->card;
2269 struct mmc_host *host = card->host;
2270 int err;
2271
2272 err = mmc_hw_reset(host);
2273 if (!err)
2274 return RESULT_OK;
2275 else if (err == -EOPNOTSUPP)
2276 return RESULT_UNSUP_HOST;
2277
2278 return RESULT_FAIL;
2279 }
2280
2281 static const struct mmc_test_case mmc_test_cases[] = {
2282 {
2283 .name = "Basic write (no data verification)",
2284 .run = mmc_test_basic_write,
2285 },
2286
2287 {
2288 .name = "Basic read (no data verification)",
2289 .run = mmc_test_basic_read,
2290 },
2291
2292 {
2293 .name = "Basic write (with data verification)",
2294 .prepare = mmc_test_prepare_write,
2295 .run = mmc_test_verify_write,
2296 .cleanup = mmc_test_cleanup,
2297 },
2298
2299 {
2300 .name = "Basic read (with data verification)",
2301 .prepare = mmc_test_prepare_read,
2302 .run = mmc_test_verify_read,
2303 .cleanup = mmc_test_cleanup,
2304 },
2305
2306 {
2307 .name = "Multi-block write",
2308 .prepare = mmc_test_prepare_write,
2309 .run = mmc_test_multi_write,
2310 .cleanup = mmc_test_cleanup,
2311 },
2312
2313 {
2314 .name = "Multi-block read",
2315 .prepare = mmc_test_prepare_read,
2316 .run = mmc_test_multi_read,
2317 .cleanup = mmc_test_cleanup,
2318 },
2319
2320 {
2321 .name = "Power of two block writes",
2322 .prepare = mmc_test_prepare_write,
2323 .run = mmc_test_pow2_write,
2324 .cleanup = mmc_test_cleanup,
2325 },
2326
2327 {
2328 .name = "Power of two block reads",
2329 .prepare = mmc_test_prepare_read,
2330 .run = mmc_test_pow2_read,
2331 .cleanup = mmc_test_cleanup,
2332 },
2333
2334 {
2335 .name = "Weird sized block writes",
2336 .prepare = mmc_test_prepare_write,
2337 .run = mmc_test_weird_write,
2338 .cleanup = mmc_test_cleanup,
2339 },
2340
2341 {
2342 .name = "Weird sized block reads",
2343 .prepare = mmc_test_prepare_read,
2344 .run = mmc_test_weird_read,
2345 .cleanup = mmc_test_cleanup,
2346 },
2347
2348 {
2349 .name = "Badly aligned write",
2350 .prepare = mmc_test_prepare_write,
2351 .run = mmc_test_align_write,
2352 .cleanup = mmc_test_cleanup,
2353 },
2354
2355 {
2356 .name = "Badly aligned read",
2357 .prepare = mmc_test_prepare_read,
2358 .run = mmc_test_align_read,
2359 .cleanup = mmc_test_cleanup,
2360 },
2361
2362 {
2363 .name = "Badly aligned multi-block write",
2364 .prepare = mmc_test_prepare_write,
2365 .run = mmc_test_align_multi_write,
2366 .cleanup = mmc_test_cleanup,
2367 },
2368
2369 {
2370 .name = "Badly aligned multi-block read",
2371 .prepare = mmc_test_prepare_read,
2372 .run = mmc_test_align_multi_read,
2373 .cleanup = mmc_test_cleanup,
2374 },
2375
2376 {
2377 .name = "Correct xfer_size at write (start failure)",
2378 .run = mmc_test_xfersize_write,
2379 },
2380
2381 {
2382 .name = "Correct xfer_size at read (start failure)",
2383 .run = mmc_test_xfersize_read,
2384 },
2385
2386 {
2387 .name = "Correct xfer_size at write (midway failure)",
2388 .run = mmc_test_multi_xfersize_write,
2389 },
2390
2391 {
2392 .name = "Correct xfer_size at read (midway failure)",
2393 .run = mmc_test_multi_xfersize_read,
2394 },
2395
2396 #ifdef CONFIG_HIGHMEM
2397
2398 {
2399 .name = "Highmem write",
2400 .prepare = mmc_test_prepare_write,
2401 .run = mmc_test_write_high,
2402 .cleanup = mmc_test_cleanup,
2403 },
2404
2405 {
2406 .name = "Highmem read",
2407 .prepare = mmc_test_prepare_read,
2408 .run = mmc_test_read_high,
2409 .cleanup = mmc_test_cleanup,
2410 },
2411
2412 {
2413 .name = "Multi-block highmem write",
2414 .prepare = mmc_test_prepare_write,
2415 .run = mmc_test_multi_write_high,
2416 .cleanup = mmc_test_cleanup,
2417 },
2418
2419 {
2420 .name = "Multi-block highmem read",
2421 .prepare = mmc_test_prepare_read,
2422 .run = mmc_test_multi_read_high,
2423 .cleanup = mmc_test_cleanup,
2424 },
2425
2426 #else
2427
2428 {
2429 .name = "Highmem write",
2430 .run = mmc_test_no_highmem,
2431 },
2432
2433 {
2434 .name = "Highmem read",
2435 .run = mmc_test_no_highmem,
2436 },
2437
2438 {
2439 .name = "Multi-block highmem write",
2440 .run = mmc_test_no_highmem,
2441 },
2442
2443 {
2444 .name = "Multi-block highmem read",
2445 .run = mmc_test_no_highmem,
2446 },
2447
2448 #endif /* CONFIG_HIGHMEM */
2449
2450 {
2451 .name = "Best-case read performance",
2452 .prepare = mmc_test_area_prepare_fill,
2453 .run = mmc_test_best_read_performance,
2454 .cleanup = mmc_test_area_cleanup,
2455 },
2456
2457 {
2458 .name = "Best-case write performance",
2459 .prepare = mmc_test_area_prepare_erase,
2460 .run = mmc_test_best_write_performance,
2461 .cleanup = mmc_test_area_cleanup,
2462 },
2463
2464 {
2465 .name = "Best-case read performance into scattered pages",
2466 .prepare = mmc_test_area_prepare_fill,
2467 .run = mmc_test_best_read_perf_max_scatter,
2468 .cleanup = mmc_test_area_cleanup,
2469 },
2470
2471 {
2472 .name = "Best-case write performance from scattered pages",
2473 .prepare = mmc_test_area_prepare_erase,
2474 .run = mmc_test_best_write_perf_max_scatter,
2475 .cleanup = mmc_test_area_cleanup,
2476 },
2477
2478 {
2479 .name = "Single read performance by transfer size",
2480 .prepare = mmc_test_area_prepare_fill,
2481 .run = mmc_test_profile_read_perf,
2482 .cleanup = mmc_test_area_cleanup,
2483 },
2484
2485 {
2486 .name = "Single write performance by transfer size",
2487 .prepare = mmc_test_area_prepare,
2488 .run = mmc_test_profile_write_perf,
2489 .cleanup = mmc_test_area_cleanup,
2490 },
2491
2492 {
2493 .name = "Single trim performance by transfer size",
2494 .prepare = mmc_test_area_prepare_fill,
2495 .run = mmc_test_profile_trim_perf,
2496 .cleanup = mmc_test_area_cleanup,
2497 },
2498
2499 {
2500 .name = "Consecutive read performance by transfer size",
2501 .prepare = mmc_test_area_prepare_fill,
2502 .run = mmc_test_profile_seq_read_perf,
2503 .cleanup = mmc_test_area_cleanup,
2504 },
2505
2506 {
2507 .name = "Consecutive write performance by transfer size",
2508 .prepare = mmc_test_area_prepare,
2509 .run = mmc_test_profile_seq_write_perf,
2510 .cleanup = mmc_test_area_cleanup,
2511 },
2512
2513 {
2514 .name = "Consecutive trim performance by transfer size",
2515 .prepare = mmc_test_area_prepare,
2516 .run = mmc_test_profile_seq_trim_perf,
2517 .cleanup = mmc_test_area_cleanup,
2518 },
2519
2520 {
2521 .name = "Random read performance by transfer size",
2522 .prepare = mmc_test_area_prepare,
2523 .run = mmc_test_random_read_perf,
2524 .cleanup = mmc_test_area_cleanup,
2525 },
2526
2527 {
2528 .name = "Random write performance by transfer size",
2529 .prepare = mmc_test_area_prepare,
2530 .run = mmc_test_random_write_perf,
2531 .cleanup = mmc_test_area_cleanup,
2532 },
2533
2534 {
2535 .name = "Large sequential read into scattered pages",
2536 .prepare = mmc_test_area_prepare,
2537 .run = mmc_test_large_seq_read_perf,
2538 .cleanup = mmc_test_area_cleanup,
2539 },
2540
2541 {
2542 .name = "Large sequential write from scattered pages",
2543 .prepare = mmc_test_area_prepare,
2544 .run = mmc_test_large_seq_write_perf,
2545 .cleanup = mmc_test_area_cleanup,
2546 },
2547
2548 {
2549 .name = "Write performance with blocking req 4k to 4MB",
2550 .prepare = mmc_test_area_prepare,
2551 .run = mmc_test_profile_mult_write_blocking_perf,
2552 .cleanup = mmc_test_area_cleanup,
2553 },
2554
2555 {
2556 .name = "Write performance with non-blocking req 4k to 4MB",
2557 .prepare = mmc_test_area_prepare,
2558 .run = mmc_test_profile_mult_write_nonblock_perf,
2559 .cleanup = mmc_test_area_cleanup,
2560 },
2561
2562 {
2563 .name = "Read performance with blocking req 4k to 4MB",
2564 .prepare = mmc_test_area_prepare,
2565 .run = mmc_test_profile_mult_read_blocking_perf,
2566 .cleanup = mmc_test_area_cleanup,
2567 },
2568
2569 {
2570 .name = "Read performance with non-blocking req 4k to 4MB",
2571 .prepare = mmc_test_area_prepare,
2572 .run = mmc_test_profile_mult_read_nonblock_perf,
2573 .cleanup = mmc_test_area_cleanup,
2574 },
2575
2576 {
2577 .name = "Write performance blocking req 1 to 512 sg elems",
2578 .prepare = mmc_test_area_prepare,
2579 .run = mmc_test_profile_sglen_wr_blocking_perf,
2580 .cleanup = mmc_test_area_cleanup,
2581 },
2582
2583 {
2584 .name = "Write performance non-blocking req 1 to 512 sg elems",
2585 .prepare = mmc_test_area_prepare,
2586 .run = mmc_test_profile_sglen_wr_nonblock_perf,
2587 .cleanup = mmc_test_area_cleanup,
2588 },
2589
2590 {
2591 .name = "Read performance blocking req 1 to 512 sg elems",
2592 .prepare = mmc_test_area_prepare,
2593 .run = mmc_test_profile_sglen_r_blocking_perf,
2594 .cleanup = mmc_test_area_cleanup,
2595 },
2596
2597 {
2598 .name = "Read performance non-blocking req 1 to 512 sg elems",
2599 .prepare = mmc_test_area_prepare,
2600 .run = mmc_test_profile_sglen_r_nonblock_perf,
2601 .cleanup = mmc_test_area_cleanup,
2602 },
2603
2604 {
2605 .name = "Reset test",
2606 .run = mmc_test_reset,
2607 },
2608 };
2609
2610 static DEFINE_MUTEX(mmc_test_lock);
2611
2612 static LIST_HEAD(mmc_test_result);
2613
2614 static void mmc_test_run(struct mmc_test_card *test, int testcase)
2615 {
2616 int i, ret;
2617
2618 pr_info("%s: Starting tests of card %s...\n",
2619 mmc_hostname(test->card->host), mmc_card_id(test->card));
2620
2621 mmc_claim_host(test->card->host);
2622
2623 for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
2624 struct mmc_test_general_result *gr;
2625
2626 if (testcase && ((i + 1) != testcase))
2627 continue;
2628
2629 pr_info("%s: Test case %d. %s...\n",
2630 mmc_hostname(test->card->host), i + 1,
2631 mmc_test_cases[i].name);
2632
2633 if (mmc_test_cases[i].prepare) {
2634 ret = mmc_test_cases[i].prepare(test);
2635 if (ret) {
2636 pr_info("%s: Result: Prepare "
2637 "stage failed! (%d)\n",
2638 mmc_hostname(test->card->host),
2639 ret);
2640 continue;
2641 }
2642 }
2643
2644 gr = kzalloc(sizeof(struct mmc_test_general_result),
2645 GFP_KERNEL);
2646 if (gr) {
2647 INIT_LIST_HEAD(&gr->tr_lst);
2648
2649 /* Assign data what we know already */
2650 gr->card = test->card;
2651 gr->testcase = i;
2652
2653 /* Append container to global one */
2654 list_add_tail(&gr->link, &mmc_test_result);
2655
2656 /*
2657 * Save the pointer to created container in our private
2658 * structure.
2659 */
2660 test->gr = gr;
2661 }
2662
2663 ret = mmc_test_cases[i].run(test);
2664 switch (ret) {
2665 case RESULT_OK:
2666 pr_info("%s: Result: OK\n",
2667 mmc_hostname(test->card->host));
2668 break;
2669 case RESULT_FAIL:
2670 pr_info("%s: Result: FAILED\n",
2671 mmc_hostname(test->card->host));
2672 break;
2673 case RESULT_UNSUP_HOST:
2674 pr_info("%s: Result: UNSUPPORTED "
2675 "(by host)\n",
2676 mmc_hostname(test->card->host));
2677 break;
2678 case RESULT_UNSUP_CARD:
2679 pr_info("%s: Result: UNSUPPORTED "
2680 "(by card)\n",
2681 mmc_hostname(test->card->host));
2682 break;
2683 default:
2684 pr_info("%s: Result: ERROR (%d)\n",
2685 mmc_hostname(test->card->host), ret);
2686 }
2687
2688 /* Save the result */
2689 if (gr)
2690 gr->result = ret;
2691
2692 if (mmc_test_cases[i].cleanup) {
2693 ret = mmc_test_cases[i].cleanup(test);
2694 if (ret) {
2695 pr_info("%s: Warning: Cleanup "
2696 "stage failed! (%d)\n",
2697 mmc_hostname(test->card->host),
2698 ret);
2699 }
2700 }
2701 }
2702
2703 mmc_release_host(test->card->host);
2704
2705 pr_info("%s: Tests completed.\n",
2706 mmc_hostname(test->card->host));
2707 }
2708
2709 static void mmc_test_free_result(struct mmc_card *card)
2710 {
2711 struct mmc_test_general_result *gr, *grs;
2712
2713 mutex_lock(&mmc_test_lock);
2714
2715 list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
2716 struct mmc_test_transfer_result *tr, *trs;
2717
2718 if (card && gr->card != card)
2719 continue;
2720
2721 list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
2722 list_del(&tr->link);
2723 kfree(tr);
2724 }
2725
2726 list_del(&gr->link);
2727 kfree(gr);
2728 }
2729
2730 mutex_unlock(&mmc_test_lock);
2731 }
2732
2733 static LIST_HEAD(mmc_test_file_test);
2734
2735 static int mtf_test_show(struct seq_file *sf, void *data)
2736 {
2737 struct mmc_card *card = (struct mmc_card *)sf->private;
2738 struct mmc_test_general_result *gr;
2739
2740 mutex_lock(&mmc_test_lock);
2741
2742 list_for_each_entry(gr, &mmc_test_result, link) {
2743 struct mmc_test_transfer_result *tr;
2744
2745 if (gr->card != card)
2746 continue;
2747
2748 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
2749
2750 list_for_each_entry(tr, &gr->tr_lst, link) {
2751 seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
2752 tr->count, tr->sectors,
2753 (unsigned long)tr->ts.tv_sec,
2754 (unsigned long)tr->ts.tv_nsec,
2755 tr->rate, tr->iops / 100, tr->iops % 100);
2756 }
2757 }
2758
2759 mutex_unlock(&mmc_test_lock);
2760
2761 return 0;
2762 }
2763
2764 static int mtf_test_open(struct inode *inode, struct file *file)
2765 {
2766 return single_open(file, mtf_test_show, inode->i_private);
2767 }
2768
2769 static ssize_t mtf_test_write(struct file *file, const char __user *buf,
2770 size_t count, loff_t *pos)
2771 {
2772 struct seq_file *sf = (struct seq_file *)file->private_data;
2773 struct mmc_card *card = (struct mmc_card *)sf->private;
2774 struct mmc_test_card *test;
2775 long testcase;
2776 int ret;
2777
2778 ret = kstrtol_from_user(buf, count, 10, &testcase);
2779 if (ret)
2780 return ret;
2781
2782 test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
2783 if (!test)
2784 return -ENOMEM;
2785
2786 /*
2787 * Remove all test cases associated with given card. Thus we have only
2788 * actual data of the last run.
2789 */
2790 mmc_test_free_result(card);
2791
2792 test->card = card;
2793
2794 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
2795 #ifdef CONFIG_HIGHMEM
2796 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
2797 #endif
2798
2799 #ifdef CONFIG_HIGHMEM
2800 if (test->buffer && test->highmem) {
2801 #else
2802 if (test->buffer) {
2803 #endif
2804 mutex_lock(&mmc_test_lock);
2805 mmc_test_run(test, testcase);
2806 mutex_unlock(&mmc_test_lock);
2807 }
2808
2809 #ifdef CONFIG_HIGHMEM
2810 __free_pages(test->highmem, BUFFER_ORDER);
2811 #endif
2812 kfree(test->buffer);
2813 kfree(test);
2814
2815 return count;
2816 }
2817
2818 static const struct file_operations mmc_test_fops_test = {
2819 .open = mtf_test_open,
2820 .read = seq_read,
2821 .write = mtf_test_write,
2822 .llseek = seq_lseek,
2823 .release = single_release,
2824 };
2825
2826 static int mtf_testlist_show(struct seq_file *sf, void *data)
2827 {
2828 int i;
2829
2830 mutex_lock(&mmc_test_lock);
2831
2832 seq_printf(sf, "0:\tRun all tests\n");
2833 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
2834 seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
2835
2836 mutex_unlock(&mmc_test_lock);
2837
2838 return 0;
2839 }
2840
2841 static int mtf_testlist_open(struct inode *inode, struct file *file)
2842 {
2843 return single_open(file, mtf_testlist_show, inode->i_private);
2844 }
2845
2846 static const struct file_operations mmc_test_fops_testlist = {
2847 .open = mtf_testlist_open,
2848 .read = seq_read,
2849 .llseek = seq_lseek,
2850 .release = single_release,
2851 };
2852
2853 static void mmc_test_free_dbgfs_file(struct mmc_card *card)
2854 {
2855 struct mmc_test_dbgfs_file *df, *dfs;
2856
2857 mutex_lock(&mmc_test_lock);
2858
2859 list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
2860 if (card && df->card != card)
2861 continue;
2862 debugfs_remove(df->file);
2863 list_del(&df->link);
2864 kfree(df);
2865 }
2866
2867 mutex_unlock(&mmc_test_lock);
2868 }
2869
2870 static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
2871 const char *name, umode_t mode, const struct file_operations *fops)
2872 {
2873 struct dentry *file = NULL;
2874 struct mmc_test_dbgfs_file *df;
2875
2876 if (card->debugfs_root)
2877 file = debugfs_create_file(name, mode, card->debugfs_root,
2878 card, fops);
2879
2880 if (IS_ERR_OR_NULL(file)) {
2881 dev_err(&card->dev,
2882 "Can't create %s. Perhaps debugfs is disabled.\n",
2883 name);
2884 return -ENODEV;
2885 }
2886
2887 df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
2888 if (!df) {
2889 debugfs_remove(file);
2890 dev_err(&card->dev,
2891 "Can't allocate memory for internal usage.\n");
2892 return -ENOMEM;
2893 }
2894
2895 df->card = card;
2896 df->file = file;
2897
2898 list_add(&df->link, &mmc_test_file_test);
2899 return 0;
2900 }
2901
2902 static int mmc_test_register_dbgfs_file(struct mmc_card *card)
2903 {
2904 int ret;
2905
2906 mutex_lock(&mmc_test_lock);
2907
2908 ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
2909 &mmc_test_fops_test);
2910 if (ret)
2911 goto err;
2912
2913 ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
2914 &mmc_test_fops_testlist);
2915 if (ret)
2916 goto err;
2917
2918 err:
2919 mutex_unlock(&mmc_test_lock);
2920
2921 return ret;
2922 }
2923
2924 static int mmc_test_probe(struct mmc_card *card)
2925 {
2926 int ret;
2927
2928 if (!mmc_card_mmc(card) && !mmc_card_sd(card))
2929 return -ENODEV;
2930
2931 ret = mmc_test_register_dbgfs_file(card);
2932 if (ret)
2933 return ret;
2934
2935 dev_info(&card->dev, "Card claimed for testing.\n");
2936
2937 return 0;
2938 }
2939
2940 static void mmc_test_remove(struct mmc_card *card)
2941 {
2942 mmc_test_free_result(card);
2943 mmc_test_free_dbgfs_file(card);
2944 }
2945
2946 static void mmc_test_shutdown(struct mmc_card *card)
2947 {
2948 }
2949
2950 static struct mmc_driver mmc_driver = {
2951 .drv = {
2952 .name = "mmc_test",
2953 },
2954 .probe = mmc_test_probe,
2955 .remove = mmc_test_remove,
2956 .shutdown = mmc_test_shutdown,
2957 };
2958
2959 static int __init mmc_test_init(void)
2960 {
2961 return mmc_register_driver(&mmc_driver);
2962 }
2963
2964 static void __exit mmc_test_exit(void)
2965 {
2966 /* Clear stalled data if card is still plugged */
2967 mmc_test_free_result(NULL);
2968 mmc_test_free_dbgfs_file(NULL);
2969
2970 mmc_unregister_driver(&mmc_driver);
2971 }
2972
2973 module_init(mmc_test_init);
2974 module_exit(mmc_test_exit);
2975
2976 MODULE_LICENSE("GPL");
2977 MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
2978 MODULE_AUTHOR("Pierre Ossman");
This page took 0.105899 seconds and 6 git commands to generate.