mmc: mmc_test: add test for non-blocking transfers
[deliverable/linux.git] / drivers / mmc / card / mmc_test.c
CommitLineData
88ae600d
PO
1/*
2 * linux/drivers/mmc/card/mmc_test.c
3 *
0121a982 4 * Copyright 2007-2008 Pierre Ossman
88ae600d
PO
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 */
11
12#include <linux/mmc/core.h>
13#include <linux/mmc/card.h>
14#include <linux/mmc/host.h>
15#include <linux/mmc/mmc.h>
5a0e3ad6 16#include <linux/slab.h>
88ae600d
PO
17
18#include <linux/scatterlist.h>
fec4dcce 19#include <linux/swap.h> /* For nr_free_buffer_pages() */
3183aa15 20#include <linux/list.h>
88ae600d 21
130067ed
AS
22#include <linux/debugfs.h>
23#include <linux/uaccess.h>
24#include <linux/seq_file.h>
25
88ae600d
PO
26#define RESULT_OK 0
27#define RESULT_FAIL 1
28#define RESULT_UNSUP_HOST 2
29#define RESULT_UNSUP_CARD 3
30
2661081f
PO
31#define BUFFER_ORDER 2
32#define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER)
88ae600d 33
fec4dcce
AH
34/*
35 * Limit the test area size to the maximum MMC HC erase group size. Note that
36 * the maximum SD allocation unit size is just 4MiB.
37 */
38#define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
39
64f7120d
AH
40/**
41 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
42 * @page: first page in the allocation
43 * @order: order of the number of pages allocated
44 */
45struct mmc_test_pages {
46 struct page *page;
47 unsigned int order;
48};
49
50/**
51 * struct mmc_test_mem - allocated memory.
52 * @arr: array of allocations
53 * @cnt: number of allocations
54 */
55struct mmc_test_mem {
56 struct mmc_test_pages *arr;
57 unsigned int cnt;
58};
59
60/**
61 * struct mmc_test_area - information for performance tests.
64f7120d 62 * @max_sz: test area size (in bytes)
fec4dcce 63 * @dev_addr: address on card at which to do performance tests
c8c8c1bd
AH
64 * @max_tfr: maximum transfer size allowed by driver (in bytes)
65 * @max_segs: maximum segments allowed by driver in scatterlist @sg
66 * @max_seg_sz: maximum segment size allowed by driver
64f7120d
AH
67 * @blocks: number of (512 byte) blocks currently mapped by @sg
68 * @sg_len: length of currently mapped scatterlist @sg
69 * @mem: allocated memory
70 * @sg: scatterlist
71 */
72struct mmc_test_area {
fec4dcce 73 unsigned long max_sz;
64f7120d 74 unsigned int dev_addr;
c8c8c1bd 75 unsigned int max_tfr;
64f7120d 76 unsigned int max_segs;
c8c8c1bd 77 unsigned int max_seg_sz;
64f7120d
AH
78 unsigned int blocks;
79 unsigned int sg_len;
80 struct mmc_test_mem *mem;
81 struct scatterlist *sg;
82};
83
3183aa15
AS
84/**
85 * struct mmc_test_transfer_result - transfer results for performance tests.
86 * @link: double-linked list
87 * @count: amount of group of sectors to check
88 * @sectors: amount of sectors to check in one group
89 * @ts: time values of transfer
90 * @rate: calculated transfer rate
b6056d12 91 * @iops: I/O operations per second (times 100)
3183aa15
AS
92 */
93struct mmc_test_transfer_result {
94 struct list_head link;
95 unsigned int count;
96 unsigned int sectors;
97 struct timespec ts;
98 unsigned int rate;
b6056d12 99 unsigned int iops;
3183aa15
AS
100};
101
102/**
103 * struct mmc_test_general_result - results for tests.
104 * @link: double-linked list
105 * @card: card under test
106 * @testcase: number of test case
107 * @result: result of test run
108 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
109 */
110struct mmc_test_general_result {
111 struct list_head link;
112 struct mmc_card *card;
113 int testcase;
114 int result;
115 struct list_head tr_lst;
116};
117
130067ed
AS
118/**
119 * struct mmc_test_dbgfs_file - debugfs related file.
120 * @link: double-linked list
121 * @card: card under test
122 * @file: file created under debugfs
123 */
124struct mmc_test_dbgfs_file {
125 struct list_head link;
126 struct mmc_card *card;
127 struct dentry *file;
128};
129
64f7120d
AH
130/**
131 * struct mmc_test_card - test information.
132 * @card: card under test
133 * @scratch: transfer buffer
134 * @buffer: transfer buffer
135 * @highmem: buffer for highmem tests
136 * @area: information for performance tests
3183aa15 137 * @gr: pointer to results of current testcase
64f7120d 138 */
88ae600d
PO
139struct mmc_test_card {
140 struct mmc_card *card;
141
6b174931 142 u8 scratch[BUFFER_SIZE];
88ae600d 143 u8 *buffer;
2661081f
PO
144#ifdef CONFIG_HIGHMEM
145 struct page *highmem;
146#endif
3183aa15
AS
147 struct mmc_test_area area;
148 struct mmc_test_general_result *gr;
88ae600d
PO
149};
150
9f9c4180
PF
151enum mmc_test_prep_media {
152 MMC_TEST_PREP_NONE = 0,
153 MMC_TEST_PREP_WRITE_FULL = 1 << 0,
154 MMC_TEST_PREP_ERASE = 1 << 1,
155};
156
157struct mmc_test_multiple_rw {
158 unsigned int *bs;
159 unsigned int len;
160 unsigned int size;
161 bool do_write;
162 bool do_nonblock_req;
163 enum mmc_test_prep_media prepare;
164};
165
166struct mmc_test_async_req {
167 struct mmc_async_req areq;
168 struct mmc_test_card *test;
169};
170
88ae600d 171/*******************************************************************/
6b174931 172/* General helper functions */
88ae600d
PO
173/*******************************************************************/
174
6b174931
PO
175/*
176 * Configure correct block size in card
177 */
88ae600d
PO
178static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
179{
0f8d8ea6 180 return mmc_set_blocklen(test->card, size);
88ae600d
PO
181}
182
6b174931
PO
183/*
184 * Fill in the mmc_request structure given a set of transfer parameters.
185 */
186static void mmc_test_prepare_mrq(struct mmc_test_card *test,
187 struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
188 unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
88ae600d 189{
6b174931 190 BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
88ae600d 191
6b174931
PO
192 if (blocks > 1) {
193 mrq->cmd->opcode = write ?
194 MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
88ae600d 195 } else {
6b174931
PO
196 mrq->cmd->opcode = write ?
197 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
88ae600d
PO
198 }
199
6b174931 200 mrq->cmd->arg = dev_addr;
c286d03c
JK
201 if (!mmc_card_blockaddr(test->card))
202 mrq->cmd->arg <<= 9;
203
6b174931 204 mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
88ae600d 205
6b174931
PO
206 if (blocks == 1)
207 mrq->stop = NULL;
208 else {
209 mrq->stop->opcode = MMC_STOP_TRANSMISSION;
210 mrq->stop->arg = 0;
211 mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
88ae600d
PO
212 }
213
6b174931
PO
214 mrq->data->blksz = blksz;
215 mrq->data->blocks = blocks;
216 mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
217 mrq->data->sg = sg;
218 mrq->data->sg_len = sg_len;
88ae600d 219
6b174931
PO
220 mmc_set_data_timeout(mrq->data, test->card);
221}
88ae600d 222
64f7120d
AH
223static int mmc_test_busy(struct mmc_command *cmd)
224{
225 return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
226 (R1_CURRENT_STATE(cmd->resp[0]) == 7);
227}
228
6b174931
PO
229/*
230 * Wait for the card to finish the busy state
231 */
232static int mmc_test_wait_busy(struct mmc_test_card *test)
233{
234 int ret, busy;
1278dba1 235 struct mmc_command cmd = {0};
88ae600d
PO
236
237 busy = 0;
238 do {
88ae600d
PO
239 memset(&cmd, 0, sizeof(struct mmc_command));
240
241 cmd.opcode = MMC_SEND_STATUS;
242 cmd.arg = test->card->rca << 16;
243 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
244
6b174931
PO
245 ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
246 if (ret)
88ae600d
PO
247 break;
248
64f7120d 249 if (!busy && mmc_test_busy(&cmd)) {
88ae600d 250 busy = 1;
54d6b44a
PM
251 if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
252 printk(KERN_INFO "%s: Warning: Host did not "
253 "wait for busy state to end.\n",
254 mmc_hostname(test->card->host));
88ae600d 255 }
64f7120d 256 } while (mmc_test_busy(&cmd));
88ae600d
PO
257
258 return ret;
259}
260
6b174931
PO
261/*
262 * Transfer a single sector of kernel addressable data
263 */
264static int mmc_test_buffer_transfer(struct mmc_test_card *test,
265 u8 *buffer, unsigned addr, unsigned blksz, int write)
88ae600d 266{
6b174931
PO
267 int ret;
268
24f5b53b 269 struct mmc_request mrq = {0};
1278dba1
CB
270 struct mmc_command cmd = {0};
271 struct mmc_command stop = {0};
a61ad2b4 272 struct mmc_data data = {0};
6b174931
PO
273
274 struct scatterlist sg;
275
6b174931
PO
276 mrq.cmd = &cmd;
277 mrq.data = &data;
278 mrq.stop = &stop;
279
280 sg_init_one(&sg, buffer, blksz);
281
282 mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
283
284 mmc_wait_for_req(test->card->host, &mrq);
285
286 if (cmd.error)
287 return cmd.error;
288 if (data.error)
289 return data.error;
290
291 ret = mmc_test_wait_busy(test);
292 if (ret)
293 return ret;
294
295 return 0;
88ae600d
PO
296}
297
64f7120d
AH
298static void mmc_test_free_mem(struct mmc_test_mem *mem)
299{
300 if (!mem)
301 return;
302 while (mem->cnt--)
303 __free_pages(mem->arr[mem->cnt].page,
304 mem->arr[mem->cnt].order);
305 kfree(mem->arr);
306 kfree(mem);
307}
308
309/*
25985edc 310 * Allocate a lot of memory, preferably max_sz but at least min_sz. In case
c8c8c1bd
AH
311 * there isn't much memory do not exceed 1/16th total lowmem pages. Also do
312 * not exceed a maximum number of segments and try not to make segments much
313 * bigger than maximum segment size.
64f7120d 314 */
fec4dcce 315static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
c8c8c1bd
AH
316 unsigned long max_sz,
317 unsigned int max_segs,
318 unsigned int max_seg_sz)
64f7120d 319{
fec4dcce
AH
320 unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
321 unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
c8c8c1bd 322 unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
fec4dcce
AH
323 unsigned long page_cnt = 0;
324 unsigned long limit = nr_free_buffer_pages() >> 4;
64f7120d 325 struct mmc_test_mem *mem;
64f7120d 326
fec4dcce
AH
327 if (max_page_cnt > limit)
328 max_page_cnt = limit;
3d203be8
AH
329 if (min_page_cnt > max_page_cnt)
330 min_page_cnt = max_page_cnt;
64f7120d 331
c8c8c1bd
AH
332 if (max_seg_page_cnt > max_page_cnt)
333 max_seg_page_cnt = max_page_cnt;
334
335 if (max_segs > max_page_cnt)
336 max_segs = max_page_cnt;
337
64f7120d
AH
338 mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL);
339 if (!mem)
340 return NULL;
341
c8c8c1bd 342 mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs,
64f7120d
AH
343 GFP_KERNEL);
344 if (!mem->arr)
345 goto out_free;
346
347 while (max_page_cnt) {
348 struct page *page;
349 unsigned int order;
350 gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
351 __GFP_NORETRY;
352
c8c8c1bd 353 order = get_order(max_seg_page_cnt << PAGE_SHIFT);
64f7120d
AH
354 while (1) {
355 page = alloc_pages(flags, order);
356 if (page || !order)
357 break;
358 order -= 1;
359 }
360 if (!page) {
361 if (page_cnt < min_page_cnt)
362 goto out_free;
363 break;
364 }
365 mem->arr[mem->cnt].page = page;
366 mem->arr[mem->cnt].order = order;
367 mem->cnt += 1;
fec4dcce
AH
368 if (max_page_cnt <= (1UL << order))
369 break;
3d203be8
AH
370 max_page_cnt -= 1UL << order;
371 page_cnt += 1UL << order;
c8c8c1bd
AH
372 if (mem->cnt >= max_segs) {
373 if (page_cnt < min_page_cnt)
374 goto out_free;
375 break;
376 }
64f7120d
AH
377 }
378
379 return mem;
380
381out_free:
382 mmc_test_free_mem(mem);
383 return NULL;
384}
385
386/*
387 * Map memory into a scatterlist. Optionally allow the same memory to be
388 * mapped more than once.
389 */
fec4dcce 390static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz,
64f7120d 391 struct scatterlist *sglist, int repeat,
c8c8c1bd
AH
392 unsigned int max_segs, unsigned int max_seg_sz,
393 unsigned int *sg_len)
64f7120d
AH
394{
395 struct scatterlist *sg = NULL;
396 unsigned int i;
397
398 sg_init_table(sglist, max_segs);
399
400 *sg_len = 0;
401 do {
402 for (i = 0; i < mem->cnt; i++) {
fec4dcce 403 unsigned long len = PAGE_SIZE << mem->arr[i].order;
64f7120d 404
c8c8c1bd 405 if (len > sz)
64f7120d 406 len = sz;
c8c8c1bd
AH
407 if (len > max_seg_sz)
408 len = max_seg_sz;
64f7120d
AH
409 if (sg)
410 sg = sg_next(sg);
411 else
412 sg = sglist;
413 if (!sg)
414 return -EINVAL;
415 sg_set_page(sg, mem->arr[i].page, len, 0);
416 sz -= len;
417 *sg_len += 1;
418 if (!sz)
419 break;
420 }
421 } while (sz && repeat);
422
423 if (sz)
424 return -EINVAL;
425
426 if (sg)
427 sg_mark_end(sg);
428
429 return 0;
430}
431
432/*
433 * Map memory into a scatterlist so that no pages are contiguous. Allow the
434 * same memory to be mapped more than once.
435 */
436static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
fec4dcce 437 unsigned long sz,
64f7120d
AH
438 struct scatterlist *sglist,
439 unsigned int max_segs,
c8c8c1bd 440 unsigned int max_seg_sz,
64f7120d
AH
441 unsigned int *sg_len)
442{
443 struct scatterlist *sg = NULL;
fec4dcce
AH
444 unsigned int i = mem->cnt, cnt;
445 unsigned long len;
64f7120d
AH
446 void *base, *addr, *last_addr = NULL;
447
448 sg_init_table(sglist, max_segs);
449
450 *sg_len = 0;
c8c8c1bd 451 while (sz) {
64f7120d
AH
452 base = page_address(mem->arr[--i].page);
453 cnt = 1 << mem->arr[i].order;
454 while (sz && cnt) {
455 addr = base + PAGE_SIZE * --cnt;
456 if (last_addr && last_addr + PAGE_SIZE == addr)
457 continue;
458 last_addr = addr;
459 len = PAGE_SIZE;
c8c8c1bd
AH
460 if (len > max_seg_sz)
461 len = max_seg_sz;
462 if (len > sz)
64f7120d
AH
463 len = sz;
464 if (sg)
465 sg = sg_next(sg);
466 else
467 sg = sglist;
468 if (!sg)
469 return -EINVAL;
470 sg_set_page(sg, virt_to_page(addr), len, 0);
471 sz -= len;
472 *sg_len += 1;
473 }
c8c8c1bd
AH
474 if (i == 0)
475 i = mem->cnt;
64f7120d
AH
476 }
477
478 if (sg)
479 sg_mark_end(sg);
480
481 return 0;
482}
483
484/*
485 * Calculate transfer rate in bytes per second.
486 */
487static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
488{
489 uint64_t ns;
490
491 ns = ts->tv_sec;
492 ns *= 1000000000;
493 ns += ts->tv_nsec;
494
495 bytes *= 1000000000;
496
497 while (ns > UINT_MAX) {
498 bytes >>= 1;
499 ns >>= 1;
500 }
501
502 if (!ns)
503 return 0;
504
505 do_div(bytes, (uint32_t)ns);
506
507 return bytes;
508}
509
3183aa15
AS
510/*
511 * Save transfer results for future usage
512 */
513static void mmc_test_save_transfer_result(struct mmc_test_card *test,
514 unsigned int count, unsigned int sectors, struct timespec ts,
b6056d12 515 unsigned int rate, unsigned int iops)
3183aa15
AS
516{
517 struct mmc_test_transfer_result *tr;
518
519 if (!test->gr)
520 return;
521
522 tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL);
523 if (!tr)
524 return;
525
526 tr->count = count;
527 tr->sectors = sectors;
528 tr->ts = ts;
529 tr->rate = rate;
b6056d12 530 tr->iops = iops;
3183aa15
AS
531
532 list_add_tail(&tr->link, &test->gr->tr_lst);
533}
534
64f7120d
AH
535/*
536 * Print the transfer rate.
537 */
538static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
539 struct timespec *ts1, struct timespec *ts2)
540{
b6056d12 541 unsigned int rate, iops, sectors = bytes >> 9;
64f7120d
AH
542 struct timespec ts;
543
544 ts = timespec_sub(*ts2, *ts1);
545
546 rate = mmc_test_rate(bytes, &ts);
b6056d12 547 iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
64f7120d
AH
548
549 printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
b6056d12 550 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
64f7120d 551 mmc_hostname(test->card->host), sectors, sectors >> 1,
c27d37ae 552 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
b6056d12
AH
553 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
554 iops / 100, iops % 100);
3183aa15 555
b6056d12 556 mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
64f7120d
AH
557}
558
559/*
560 * Print the average transfer rate.
561 */
562static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
563 unsigned int count, struct timespec *ts1,
564 struct timespec *ts2)
565{
b6056d12 566 unsigned int rate, iops, sectors = bytes >> 9;
64f7120d
AH
567 uint64_t tot = bytes * count;
568 struct timespec ts;
569
570 ts = timespec_sub(*ts2, *ts1);
571
572 rate = mmc_test_rate(tot, &ts);
b6056d12 573 iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
64f7120d
AH
574
575 printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
b6056d12
AH
576 "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
577 "%u.%02u IOPS)\n",
64f7120d 578 mmc_hostname(test->card->host), count, sectors, count,
c27d37ae 579 sectors >> 1, (sectors & 1 ? ".5" : ""),
64f7120d 580 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
b6056d12 581 rate / 1000, rate / 1024, iops / 100, iops % 100);
3183aa15 582
b6056d12 583 mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
64f7120d
AH
584}
585
586/*
587 * Return the card size in sectors.
588 */
589static unsigned int mmc_test_capacity(struct mmc_card *card)
590{
591 if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
592 return card->ext_csd.sectors;
593 else
594 return card->csd.capacity << (card->csd.read_blkbits - 9);
595}
596
6b174931
PO
597/*******************************************************************/
598/* Test preparation and cleanup */
599/*******************************************************************/
600
601/*
602 * Fill the first couple of sectors of the card with known data
603 * so that bad reads/writes can be detected
604 */
605static int __mmc_test_prepare(struct mmc_test_card *test, int write)
88ae600d
PO
606{
607 int ret, i;
608
609 ret = mmc_test_set_blksize(test, 512);
610 if (ret)
611 return ret;
612
613 if (write)
6b174931 614 memset(test->buffer, 0xDF, 512);
88ae600d 615 else {
6b174931 616 for (i = 0;i < 512;i++)
88ae600d
PO
617 test->buffer[i] = i;
618 }
619
620 for (i = 0;i < BUFFER_SIZE / 512;i++) {
c286d03c 621 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
88ae600d
PO
622 if (ret)
623 return ret;
624 }
625
626 return 0;
627}
628
6b174931
PO
629static int mmc_test_prepare_write(struct mmc_test_card *test)
630{
631 return __mmc_test_prepare(test, 1);
632}
633
634static int mmc_test_prepare_read(struct mmc_test_card *test)
635{
636 return __mmc_test_prepare(test, 0);
637}
638
639static int mmc_test_cleanup(struct mmc_test_card *test)
640{
641 int ret, i;
642
643 ret = mmc_test_set_blksize(test, 512);
644 if (ret)
645 return ret;
646
647 memset(test->buffer, 0, 512);
648
649 for (i = 0;i < BUFFER_SIZE / 512;i++) {
c286d03c 650 ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
6b174931
PO
651 if (ret)
652 return ret;
653 }
654
655 return 0;
656}
657
658/*******************************************************************/
659/* Test execution helpers */
660/*******************************************************************/
661
662/*
663 * Modifies the mmc_request to perform the "short transfer" tests
664 */
665static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
666 struct mmc_request *mrq, int write)
667{
668 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
669
670 if (mrq->data->blocks > 1) {
671 mrq->cmd->opcode = write ?
672 MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
673 mrq->stop = NULL;
674 } else {
675 mrq->cmd->opcode = MMC_SEND_STATUS;
676 mrq->cmd->arg = test->card->rca << 16;
677 }
678}
679
680/*
681 * Checks that a normal transfer didn't have any errors
682 */
683static int mmc_test_check_result(struct mmc_test_card *test,
9f9c4180 684 struct mmc_request *mrq)
88ae600d 685{
6b174931
PO
686 int ret;
687
688 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
689
690 ret = 0;
691
692 if (!ret && mrq->cmd->error)
693 ret = mrq->cmd->error;
694 if (!ret && mrq->data->error)
695 ret = mrq->data->error;
696 if (!ret && mrq->stop && mrq->stop->error)
697 ret = mrq->stop->error;
698 if (!ret && mrq->data->bytes_xfered !=
699 mrq->data->blocks * mrq->data->blksz)
700 ret = RESULT_FAIL;
701
702 if (ret == -EINVAL)
703 ret = RESULT_UNSUP_HOST;
704
705 return ret;
88ae600d
PO
706}
707
9f9c4180
PF
708static int mmc_test_check_result_async(struct mmc_card *card,
709 struct mmc_async_req *areq)
710{
711 struct mmc_test_async_req *test_async =
712 container_of(areq, struct mmc_test_async_req, areq);
713
714 mmc_test_wait_busy(test_async->test);
715
716 return mmc_test_check_result(test_async->test, areq->mrq);
717}
718
6b174931
PO
719/*
720 * Checks that a "short transfer" behaved as expected
721 */
722static int mmc_test_check_broken_result(struct mmc_test_card *test,
723 struct mmc_request *mrq)
88ae600d 724{
6b174931
PO
725 int ret;
726
727 BUG_ON(!mrq || !mrq->cmd || !mrq->data);
728
729 ret = 0;
730
731 if (!ret && mrq->cmd->error)
732 ret = mrq->cmd->error;
733 if (!ret && mrq->data->error == 0)
734 ret = RESULT_FAIL;
735 if (!ret && mrq->data->error != -ETIMEDOUT)
736 ret = mrq->data->error;
737 if (!ret && mrq->stop && mrq->stop->error)
738 ret = mrq->stop->error;
739 if (mrq->data->blocks > 1) {
740 if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
741 ret = RESULT_FAIL;
742 } else {
743 if (!ret && mrq->data->bytes_xfered > 0)
744 ret = RESULT_FAIL;
745 }
746
747 if (ret == -EINVAL)
748 ret = RESULT_UNSUP_HOST;
749
750 return ret;
88ae600d
PO
751}
752
9f9c4180
PF
753/*
754 * Tests nonblock transfer with certain parameters
755 */
756static void mmc_test_nonblock_reset(struct mmc_request *mrq,
757 struct mmc_command *cmd,
758 struct mmc_command *stop,
759 struct mmc_data *data)
760{
761 memset(mrq, 0, sizeof(struct mmc_request));
762 memset(cmd, 0, sizeof(struct mmc_command));
763 memset(data, 0, sizeof(struct mmc_data));
764 memset(stop, 0, sizeof(struct mmc_command));
765
766 mrq->cmd = cmd;
767 mrq->data = data;
768 mrq->stop = stop;
769}
770static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
771 struct scatterlist *sg, unsigned sg_len,
772 unsigned dev_addr, unsigned blocks,
773 unsigned blksz, int write, int count)
774{
775 struct mmc_request mrq1;
776 struct mmc_command cmd1;
777 struct mmc_command stop1;
778 struct mmc_data data1;
779
780 struct mmc_request mrq2;
781 struct mmc_command cmd2;
782 struct mmc_command stop2;
783 struct mmc_data data2;
784
785 struct mmc_test_async_req test_areq[2];
786 struct mmc_async_req *done_areq;
787 struct mmc_async_req *cur_areq = &test_areq[0].areq;
788 struct mmc_async_req *other_areq = &test_areq[1].areq;
789 int i;
790 int ret;
791
792 test_areq[0].test = test;
793 test_areq[1].test = test;
794
795 mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1);
796 mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2);
797
798 cur_areq->mrq = &mrq1;
799 cur_areq->err_check = mmc_test_check_result_async;
800 other_areq->mrq = &mrq2;
801 other_areq->err_check = mmc_test_check_result_async;
802
803 for (i = 0; i < count; i++) {
804 mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
805 blocks, blksz, write);
806 done_areq = mmc_start_req(test->card->host, cur_areq, &ret);
807
808 if (ret || (!done_areq && i > 0))
809 goto err;
810
811 if (done_areq) {
812 if (done_areq->mrq == &mrq2)
813 mmc_test_nonblock_reset(&mrq2, &cmd2,
814 &stop2, &data2);
815 else
816 mmc_test_nonblock_reset(&mrq1, &cmd1,
817 &stop1, &data1);
818 }
819 done_areq = cur_areq;
820 cur_areq = other_areq;
821 other_areq = done_areq;
822 dev_addr += blocks;
823 }
824
825 done_areq = mmc_start_req(test->card->host, NULL, &ret);
826
827 return ret;
828err:
829 return ret;
830}
831
6b174931
PO
832/*
833 * Tests a basic transfer with certain parameters
834 */
835static int mmc_test_simple_transfer(struct mmc_test_card *test,
836 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
837 unsigned blocks, unsigned blksz, int write)
88ae600d 838{
24f5b53b 839 struct mmc_request mrq = {0};
1278dba1
CB
840 struct mmc_command cmd = {0};
841 struct mmc_command stop = {0};
a61ad2b4 842 struct mmc_data data = {0};
88ae600d 843
6b174931
PO
844 mrq.cmd = &cmd;
845 mrq.data = &data;
846 mrq.stop = &stop;
847
848 mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
849 blocks, blksz, write);
850
851 mmc_wait_for_req(test->card->host, &mrq);
88ae600d 852
6b174931
PO
853 mmc_test_wait_busy(test);
854
855 return mmc_test_check_result(test, &mrq);
856}
857
858/*
859 * Tests a transfer where the card will fail completely or partly
860 */
861static int mmc_test_broken_transfer(struct mmc_test_card *test,
862 unsigned blocks, unsigned blksz, int write)
863{
24f5b53b 864 struct mmc_request mrq = {0};
1278dba1
CB
865 struct mmc_command cmd = {0};
866 struct mmc_command stop = {0};
a61ad2b4 867 struct mmc_data data = {0};
6b174931
PO
868
869 struct scatterlist sg;
870
6b174931
PO
871 mrq.cmd = &cmd;
872 mrq.data = &data;
873 mrq.stop = &stop;
874
875 sg_init_one(&sg, test->buffer, blocks * blksz);
876
877 mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
878 mmc_test_prepare_broken_mrq(test, &mrq, write);
879
880 mmc_wait_for_req(test->card->host, &mrq);
881
882 mmc_test_wait_busy(test);
883
884 return mmc_test_check_broken_result(test, &mrq);
885}
886
887/*
888 * Does a complete transfer test where data is also validated
889 *
890 * Note: mmc_test_prepare() must have been done before this call
891 */
892static int mmc_test_transfer(struct mmc_test_card *test,
893 struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
894 unsigned blocks, unsigned blksz, int write)
895{
896 int ret, i;
897 unsigned long flags;
88ae600d
PO
898
899 if (write) {
900 for (i = 0;i < blocks * blksz;i++)
6b174931
PO
901 test->scratch[i] = i;
902 } else {
b7ac2cf1 903 memset(test->scratch, 0, BUFFER_SIZE);
88ae600d 904 }
6b174931 905 local_irq_save(flags);
b7ac2cf1 906 sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
6b174931 907 local_irq_restore(flags);
88ae600d
PO
908
909 ret = mmc_test_set_blksize(test, blksz);
910 if (ret)
911 return ret;
912
6b174931
PO
913 ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
914 blocks, blksz, write);
88ae600d
PO
915 if (ret)
916 return ret;
917
918 if (write) {
6b174931
PO
919 int sectors;
920
88ae600d
PO
921 ret = mmc_test_set_blksize(test, 512);
922 if (ret)
923 return ret;
924
925 sectors = (blocks * blksz + 511) / 512;
926 if ((sectors * 512) == (blocks * blksz))
927 sectors++;
928
929 if ((sectors * 512) > BUFFER_SIZE)
930 return -EINVAL;
931
932 memset(test->buffer, 0, sectors * 512);
933
934 for (i = 0;i < sectors;i++) {
6b174931 935 ret = mmc_test_buffer_transfer(test,
88ae600d 936 test->buffer + i * 512,
c286d03c 937 dev_addr + i, 512, 0);
88ae600d
PO
938 if (ret)
939 return ret;
940 }
941
942 for (i = 0;i < blocks * blksz;i++) {
943 if (test->buffer[i] != (u8)i)
944 return RESULT_FAIL;
945 }
946
947 for (;i < sectors * 512;i++) {
948 if (test->buffer[i] != 0xDF)
949 return RESULT_FAIL;
950 }
951 } else {
6b174931 952 local_irq_save(flags);
b7ac2cf1 953 sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
6b174931 954 local_irq_restore(flags);
88ae600d 955 for (i = 0;i < blocks * blksz;i++) {
6b174931 956 if (test->scratch[i] != (u8)i)
88ae600d
PO
957 return RESULT_FAIL;
958 }
959 }
960
961 return 0;
962}
963
88ae600d
PO
964/*******************************************************************/
965/* Tests */
966/*******************************************************************/
967
968struct mmc_test_case {
969 const char *name;
970
971 int (*prepare)(struct mmc_test_card *);
972 int (*run)(struct mmc_test_card *);
973 int (*cleanup)(struct mmc_test_card *);
974};
975
976static int mmc_test_basic_write(struct mmc_test_card *test)
977{
978 int ret;
6b174931 979 struct scatterlist sg;
88ae600d
PO
980
981 ret = mmc_test_set_blksize(test, 512);
982 if (ret)
983 return ret;
984
6b174931
PO
985 sg_init_one(&sg, test->buffer, 512);
986
987 ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
88ae600d
PO
988 if (ret)
989 return ret;
990
991 return 0;
992}
993
994static int mmc_test_basic_read(struct mmc_test_card *test)
995{
996 int ret;
6b174931 997 struct scatterlist sg;
88ae600d
PO
998
999 ret = mmc_test_set_blksize(test, 512);
1000 if (ret)
1001 return ret;
1002
6b174931
PO
1003 sg_init_one(&sg, test->buffer, 512);
1004
58a5dd3e 1005 ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
88ae600d
PO
1006 if (ret)
1007 return ret;
1008
1009 return 0;
1010}
1011
1012static int mmc_test_verify_write(struct mmc_test_card *test)
1013{
1014 int ret;
6b174931
PO
1015 struct scatterlist sg;
1016
1017 sg_init_one(&sg, test->buffer, 512);
88ae600d 1018
6b174931 1019 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
88ae600d
PO
1020 if (ret)
1021 return ret;
1022
1023 return 0;
1024}
1025
1026static int mmc_test_verify_read(struct mmc_test_card *test)
1027{
1028 int ret;
6b174931
PO
1029 struct scatterlist sg;
1030
1031 sg_init_one(&sg, test->buffer, 512);
88ae600d 1032
6b174931 1033 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
88ae600d
PO
1034 if (ret)
1035 return ret;
1036
1037 return 0;
1038}
1039
1040static int mmc_test_multi_write(struct mmc_test_card *test)
1041{
1042 int ret;
1043 unsigned int size;
6b174931 1044 struct scatterlist sg;
88ae600d
PO
1045
1046 if (test->card->host->max_blk_count == 1)
1047 return RESULT_UNSUP_HOST;
1048
1049 size = PAGE_SIZE * 2;
1050 size = min(size, test->card->host->max_req_size);
1051 size = min(size, test->card->host->max_seg_size);
1052 size = min(size, test->card->host->max_blk_count * 512);
1053
1054 if (size < 1024)
1055 return RESULT_UNSUP_HOST;
1056
6b174931
PO
1057 sg_init_one(&sg, test->buffer, size);
1058
1059 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
88ae600d
PO
1060 if (ret)
1061 return ret;
1062
1063 return 0;
1064}
1065
1066static int mmc_test_multi_read(struct mmc_test_card *test)
1067{
1068 int ret;
1069 unsigned int size;
6b174931 1070 struct scatterlist sg;
88ae600d
PO
1071
1072 if (test->card->host->max_blk_count == 1)
1073 return RESULT_UNSUP_HOST;
1074
1075 size = PAGE_SIZE * 2;
1076 size = min(size, test->card->host->max_req_size);
1077 size = min(size, test->card->host->max_seg_size);
1078 size = min(size, test->card->host->max_blk_count * 512);
1079
1080 if (size < 1024)
1081 return RESULT_UNSUP_HOST;
1082
6b174931
PO
1083 sg_init_one(&sg, test->buffer, size);
1084
1085 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
88ae600d
PO
1086 if (ret)
1087 return ret;
1088
1089 return 0;
1090}
1091
1092static int mmc_test_pow2_write(struct mmc_test_card *test)
1093{
1094 int ret, i;
6b174931 1095 struct scatterlist sg;
88ae600d
PO
1096
1097 if (!test->card->csd.write_partial)
1098 return RESULT_UNSUP_CARD;
1099
1100 for (i = 1; i < 512;i <<= 1) {
6b174931
PO
1101 sg_init_one(&sg, test->buffer, i);
1102 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
88ae600d
PO
1103 if (ret)
1104 return ret;
1105 }
1106
1107 return 0;
1108}
1109
1110static int mmc_test_pow2_read(struct mmc_test_card *test)
1111{
1112 int ret, i;
6b174931 1113 struct scatterlist sg;
88ae600d
PO
1114
1115 if (!test->card->csd.read_partial)
1116 return RESULT_UNSUP_CARD;
1117
1118 for (i = 1; i < 512;i <<= 1) {
6b174931
PO
1119 sg_init_one(&sg, test->buffer, i);
1120 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
88ae600d
PO
1121 if (ret)
1122 return ret;
1123 }
1124
1125 return 0;
1126}
1127
1128static int mmc_test_weird_write(struct mmc_test_card *test)
1129{
1130 int ret, i;
6b174931 1131 struct scatterlist sg;
88ae600d
PO
1132
1133 if (!test->card->csd.write_partial)
1134 return RESULT_UNSUP_CARD;
1135
1136 for (i = 3; i < 512;i += 7) {
6b174931
PO
1137 sg_init_one(&sg, test->buffer, i);
1138 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
88ae600d
PO
1139 if (ret)
1140 return ret;
1141 }
1142
1143 return 0;
1144}
1145
1146static int mmc_test_weird_read(struct mmc_test_card *test)
1147{
1148 int ret, i;
6b174931 1149 struct scatterlist sg;
88ae600d
PO
1150
1151 if (!test->card->csd.read_partial)
1152 return RESULT_UNSUP_CARD;
1153
1154 for (i = 3; i < 512;i += 7) {
6b174931
PO
1155 sg_init_one(&sg, test->buffer, i);
1156 ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
88ae600d
PO
1157 if (ret)
1158 return ret;
1159 }
1160
1161 return 0;
1162}
1163
1164static int mmc_test_align_write(struct mmc_test_card *test)
1165{
1166 int ret, i;
6b174931 1167 struct scatterlist sg;
88ae600d
PO
1168
1169 for (i = 1;i < 4;i++) {
6b174931
PO
1170 sg_init_one(&sg, test->buffer + i, 512);
1171 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
88ae600d
PO
1172 if (ret)
1173 return ret;
1174 }
1175
1176 return 0;
1177}
1178
1179static int mmc_test_align_read(struct mmc_test_card *test)
1180{
1181 int ret, i;
6b174931 1182 struct scatterlist sg;
88ae600d
PO
1183
1184 for (i = 1;i < 4;i++) {
6b174931
PO
1185 sg_init_one(&sg, test->buffer + i, 512);
1186 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
88ae600d
PO
1187 if (ret)
1188 return ret;
1189 }
1190
1191 return 0;
1192}
1193
1194static int mmc_test_align_multi_write(struct mmc_test_card *test)
1195{
1196 int ret, i;
1197 unsigned int size;
6b174931 1198 struct scatterlist sg;
88ae600d
PO
1199
1200 if (test->card->host->max_blk_count == 1)
1201 return RESULT_UNSUP_HOST;
1202
1203 size = PAGE_SIZE * 2;
1204 size = min(size, test->card->host->max_req_size);
1205 size = min(size, test->card->host->max_seg_size);
1206 size = min(size, test->card->host->max_blk_count * 512);
1207
1208 if (size < 1024)
1209 return RESULT_UNSUP_HOST;
1210
1211 for (i = 1;i < 4;i++) {
6b174931
PO
1212 sg_init_one(&sg, test->buffer + i, size);
1213 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
88ae600d
PO
1214 if (ret)
1215 return ret;
1216 }
1217
1218 return 0;
1219}
1220
1221static int mmc_test_align_multi_read(struct mmc_test_card *test)
1222{
1223 int ret, i;
1224 unsigned int size;
6b174931 1225 struct scatterlist sg;
88ae600d
PO
1226
1227 if (test->card->host->max_blk_count == 1)
1228 return RESULT_UNSUP_HOST;
1229
1230 size = PAGE_SIZE * 2;
1231 size = min(size, test->card->host->max_req_size);
1232 size = min(size, test->card->host->max_seg_size);
1233 size = min(size, test->card->host->max_blk_count * 512);
1234
1235 if (size < 1024)
1236 return RESULT_UNSUP_HOST;
1237
1238 for (i = 1;i < 4;i++) {
6b174931
PO
1239 sg_init_one(&sg, test->buffer + i, size);
1240 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
88ae600d
PO
1241 if (ret)
1242 return ret;
1243 }
1244
1245 return 0;
1246}
1247
1248static int mmc_test_xfersize_write(struct mmc_test_card *test)
1249{
1250 int ret;
1251
1252 ret = mmc_test_set_blksize(test, 512);
1253 if (ret)
1254 return ret;
1255
6b174931 1256 ret = mmc_test_broken_transfer(test, 1, 512, 1);
88ae600d
PO
1257 if (ret)
1258 return ret;
1259
1260 return 0;
1261}
1262
1263static int mmc_test_xfersize_read(struct mmc_test_card *test)
1264{
1265 int ret;
1266
1267 ret = mmc_test_set_blksize(test, 512);
1268 if (ret)
1269 return ret;
1270
6b174931 1271 ret = mmc_test_broken_transfer(test, 1, 512, 0);
88ae600d
PO
1272 if (ret)
1273 return ret;
1274
1275 return 0;
1276}
1277
1278static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1279{
1280 int ret;
1281
1282 if (test->card->host->max_blk_count == 1)
1283 return RESULT_UNSUP_HOST;
1284
1285 ret = mmc_test_set_blksize(test, 512);
1286 if (ret)
1287 return ret;
1288
6b174931 1289 ret = mmc_test_broken_transfer(test, 2, 512, 1);
88ae600d
PO
1290 if (ret)
1291 return ret;
1292
1293 return 0;
1294}
1295
1296static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1297{
1298 int ret;
1299
1300 if (test->card->host->max_blk_count == 1)
1301 return RESULT_UNSUP_HOST;
1302
1303 ret = mmc_test_set_blksize(test, 512);
1304 if (ret)
1305 return ret;
1306
6b174931 1307 ret = mmc_test_broken_transfer(test, 2, 512, 0);
88ae600d
PO
1308 if (ret)
1309 return ret;
1310
1311 return 0;
1312}
1313
2661081f
PO
1314#ifdef CONFIG_HIGHMEM
1315
1316static int mmc_test_write_high(struct mmc_test_card *test)
1317{
1318 int ret;
1319 struct scatterlist sg;
1320
1321 sg_init_table(&sg, 1);
1322 sg_set_page(&sg, test->highmem, 512, 0);
1323
1324 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1325 if (ret)
1326 return ret;
1327
1328 return 0;
1329}
1330
1331static int mmc_test_read_high(struct mmc_test_card *test)
1332{
1333 int ret;
1334 struct scatterlist sg;
1335
1336 sg_init_table(&sg, 1);
1337 sg_set_page(&sg, test->highmem, 512, 0);
1338
1339 ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1340 if (ret)
1341 return ret;
1342
1343 return 0;
1344}
1345
1346static int mmc_test_multi_write_high(struct mmc_test_card *test)
1347{
1348 int ret;
1349 unsigned int size;
1350 struct scatterlist sg;
1351
1352 if (test->card->host->max_blk_count == 1)
1353 return RESULT_UNSUP_HOST;
1354
1355 size = PAGE_SIZE * 2;
1356 size = min(size, test->card->host->max_req_size);
1357 size = min(size, test->card->host->max_seg_size);
1358 size = min(size, test->card->host->max_blk_count * 512);
1359
1360 if (size < 1024)
1361 return RESULT_UNSUP_HOST;
1362
1363 sg_init_table(&sg, 1);
1364 sg_set_page(&sg, test->highmem, size, 0);
1365
1366 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1);
1367 if (ret)
1368 return ret;
1369
1370 return 0;
1371}
1372
1373static int mmc_test_multi_read_high(struct mmc_test_card *test)
1374{
1375 int ret;
1376 unsigned int size;
1377 struct scatterlist sg;
1378
1379 if (test->card->host->max_blk_count == 1)
1380 return RESULT_UNSUP_HOST;
1381
1382 size = PAGE_SIZE * 2;
1383 size = min(size, test->card->host->max_req_size);
1384 size = min(size, test->card->host->max_seg_size);
1385 size = min(size, test->card->host->max_blk_count * 512);
1386
1387 if (size < 1024)
1388 return RESULT_UNSUP_HOST;
1389
1390 sg_init_table(&sg, 1);
1391 sg_set_page(&sg, test->highmem, size, 0);
1392
1393 ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
1394 if (ret)
1395 return ret;
1396
1397 return 0;
1398}
1399
64f7120d
AH
1400#else
1401
1402static int mmc_test_no_highmem(struct mmc_test_card *test)
1403{
1404 printk(KERN_INFO "%s: Highmem not configured - test skipped\n",
1405 mmc_hostname(test->card->host));
1406 return 0;
1407}
1408
2661081f
PO
1409#endif /* CONFIG_HIGHMEM */
1410
64f7120d
AH
1411/*
1412 * Map sz bytes so that it can be transferred.
1413 */
fec4dcce 1414static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
64f7120d
AH
1415 int max_scatter)
1416{
1417 struct mmc_test_area *t = &test->area;
c8c8c1bd 1418 int err;
64f7120d
AH
1419
1420 t->blocks = sz >> 9;
1421
1422 if (max_scatter) {
c8c8c1bd
AH
1423 err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1424 t->max_segs, t->max_seg_sz,
64f7120d 1425 &t->sg_len);
c8c8c1bd
AH
1426 } else {
1427 err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1428 t->max_seg_sz, &t->sg_len);
64f7120d 1429 }
c8c8c1bd
AH
1430 if (err)
1431 printk(KERN_INFO "%s: Failed to map sg list\n",
1432 mmc_hostname(test->card->host));
1433 return err;
64f7120d
AH
1434}
1435
1436/*
1437 * Transfer bytes mapped by mmc_test_area_map().
1438 */
1439static int mmc_test_area_transfer(struct mmc_test_card *test,
1440 unsigned int dev_addr, int write)
1441{
1442 struct mmc_test_area *t = &test->area;
1443
1444 return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1445 t->blocks, 512, write);
1446}
1447
1448/*
9f9c4180 1449 * Map and transfer bytes for multiple transfers.
64f7120d 1450 */
9f9c4180
PF
1451static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
1452 unsigned int dev_addr, int write,
1453 int max_scatter, int timed, int count,
1454 bool nonblock)
64f7120d
AH
1455{
1456 struct timespec ts1, ts2;
9f9c4180
PF
1457 int ret = 0;
1458 int i;
1459 struct mmc_test_area *t = &test->area;
64f7120d 1460
c8c8c1bd
AH
1461 /*
1462 * In the case of a maximally scattered transfer, the maximum transfer
1463 * size is further limited by using PAGE_SIZE segments.
1464 */
1465 if (max_scatter) {
1466 struct mmc_test_area *t = &test->area;
1467 unsigned long max_tfr;
1468
1469 if (t->max_seg_sz >= PAGE_SIZE)
1470 max_tfr = t->max_segs * PAGE_SIZE;
1471 else
1472 max_tfr = t->max_segs * t->max_seg_sz;
1473 if (sz > max_tfr)
1474 sz = max_tfr;
1475 }
1476
64f7120d
AH
1477 ret = mmc_test_area_map(test, sz, max_scatter);
1478 if (ret)
1479 return ret;
1480
1481 if (timed)
1482 getnstimeofday(&ts1);
9f9c4180
PF
1483 if (nonblock)
1484 ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
1485 dev_addr, t->blocks, 512, write, count);
1486 else
1487 for (i = 0; i < count && ret == 0; i++) {
1488 ret = mmc_test_area_transfer(test, dev_addr, write);
1489 dev_addr += sz >> 9;
1490 }
64f7120d 1491
64f7120d
AH
1492 if (ret)
1493 return ret;
1494
1495 if (timed)
1496 getnstimeofday(&ts2);
1497
1498 if (timed)
9f9c4180 1499 mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
64f7120d
AH
1500
1501 return 0;
1502}
1503
9f9c4180
PF
1504static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1505 unsigned int dev_addr, int write, int max_scatter,
1506 int timed)
1507{
1508 return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
1509 timed, 1, false);
1510}
1511
64f7120d
AH
1512/*
1513 * Write the test area entirely.
1514 */
1515static int mmc_test_area_fill(struct mmc_test_card *test)
1516{
253d6a28
AS
1517 struct mmc_test_area *t = &test->area;
1518
1519 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
64f7120d
AH
1520}
1521
1522/*
1523 * Erase the test area entirely.
1524 */
1525static int mmc_test_area_erase(struct mmc_test_card *test)
1526{
1527 struct mmc_test_area *t = &test->area;
1528
1529 if (!mmc_can_erase(test->card))
1530 return 0;
1531
253d6a28 1532 return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
64f7120d
AH
1533 MMC_ERASE_ARG);
1534}
1535
1536/*
1537 * Cleanup struct mmc_test_area.
1538 */
1539static int mmc_test_area_cleanup(struct mmc_test_card *test)
1540{
1541 struct mmc_test_area *t = &test->area;
1542
1543 kfree(t->sg);
1544 mmc_test_free_mem(t->mem);
1545
1546 return 0;
1547}
1548
1549/*
0532ff63
AH
1550 * Initialize an area for testing large transfers. The test area is set to the
1551 * middle of the card because cards may have different charateristics at the
1552 * front (for FAT file system optimization). Optionally, the area is erased
1553 * (if the card supports it) which may improve write performance. Optionally,
1554 * the area is filled with data for subsequent read tests.
64f7120d
AH
1555 */
1556static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1557{
1558 struct mmc_test_area *t = &test->area;
0532ff63 1559 unsigned long min_sz = 64 * 1024, sz;
64f7120d
AH
1560 int ret;
1561
1562 ret = mmc_test_set_blksize(test, 512);
1563 if (ret)
1564 return ret;
1565
0532ff63
AH
1566 /* Make the test area size about 4MiB */
1567 sz = (unsigned long)test->card->pref_erase << 9;
1568 t->max_sz = sz;
1569 while (t->max_sz < 4 * 1024 * 1024)
1570 t->max_sz += sz;
1571 while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1572 t->max_sz -= sz;
c8c8c1bd
AH
1573
1574 t->max_segs = test->card->host->max_segs;
1575 t->max_seg_sz = test->card->host->max_seg_size;
1576
1577 t->max_tfr = t->max_sz;
1578 if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1579 t->max_tfr = test->card->host->max_blk_count << 9;
1580 if (t->max_tfr > test->card->host->max_req_size)
1581 t->max_tfr = test->card->host->max_req_size;
1582 if (t->max_tfr / t->max_seg_sz > t->max_segs)
1583 t->max_tfr = t->max_segs * t->max_seg_sz;
1584
64f7120d 1585 /*
3d203be8 1586 * Try to allocate enough memory for a max. sized transfer. Less is OK
64f7120d 1587 * because the same memory can be mapped into the scatterlist more than
c8c8c1bd
AH
1588 * once. Also, take into account the limits imposed on scatterlist
1589 * segments by the host driver.
64f7120d 1590 */
3d203be8 1591 t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
c8c8c1bd 1592 t->max_seg_sz);
64f7120d
AH
1593 if (!t->mem)
1594 return -ENOMEM;
1595
64f7120d
AH
1596 t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL);
1597 if (!t->sg) {
1598 ret = -ENOMEM;
1599 goto out_free;
1600 }
1601
1602 t->dev_addr = mmc_test_capacity(test->card) / 2;
1603 t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1604
1605 if (erase) {
1606 ret = mmc_test_area_erase(test);
1607 if (ret)
1608 goto out_free;
1609 }
1610
1611 if (fill) {
1612 ret = mmc_test_area_fill(test);
1613 if (ret)
1614 goto out_free;
1615 }
1616
1617 return 0;
1618
1619out_free:
1620 mmc_test_area_cleanup(test);
1621 return ret;
1622}
1623
1624/*
1625 * Prepare for large transfers. Do not erase the test area.
1626 */
1627static int mmc_test_area_prepare(struct mmc_test_card *test)
1628{
1629 return mmc_test_area_init(test, 0, 0);
1630}
1631
1632/*
1633 * Prepare for large transfers. Do erase the test area.
1634 */
1635static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1636{
1637 return mmc_test_area_init(test, 1, 0);
1638}
1639
1640/*
1641 * Prepare for large transfers. Erase and fill the test area.
1642 */
1643static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1644{
1645 return mmc_test_area_init(test, 1, 1);
1646}
1647
1648/*
1649 * Test best-case performance. Best-case performance is expected from
1650 * a single large transfer.
1651 *
1652 * An additional option (max_scatter) allows the measurement of the same
1653 * transfer but with no contiguous pages in the scatter list. This tests
1654 * the efficiency of DMA to handle scattered pages.
1655 */
1656static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1657 int max_scatter)
1658{
253d6a28
AS
1659 struct mmc_test_area *t = &test->area;
1660
1661 return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
1662 max_scatter, 1);
64f7120d
AH
1663}
1664
1665/*
1666 * Best-case read performance.
1667 */
1668static int mmc_test_best_read_performance(struct mmc_test_card *test)
1669{
1670 return mmc_test_best_performance(test, 0, 0);
1671}
1672
1673/*
1674 * Best-case write performance.
1675 */
1676static int mmc_test_best_write_performance(struct mmc_test_card *test)
1677{
1678 return mmc_test_best_performance(test, 1, 0);
1679}
1680
1681/*
1682 * Best-case read performance into scattered pages.
1683 */
1684static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1685{
1686 return mmc_test_best_performance(test, 0, 1);
1687}
1688
1689/*
1690 * Best-case write performance from scattered pages.
1691 */
1692static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1693{
1694 return mmc_test_best_performance(test, 1, 1);
1695}
1696
1697/*
1698 * Single read performance by transfer size.
1699 */
1700static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1701{
253d6a28 1702 struct mmc_test_area *t = &test->area;
fec4dcce
AH
1703 unsigned long sz;
1704 unsigned int dev_addr;
64f7120d
AH
1705 int ret;
1706
253d6a28
AS
1707 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1708 dev_addr = t->dev_addr + (sz >> 9);
64f7120d
AH
1709 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1710 if (ret)
1711 return ret;
1712 }
253d6a28
AS
1713 sz = t->max_tfr;
1714 dev_addr = t->dev_addr;
64f7120d
AH
1715 return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1716}
1717
1718/*
1719 * Single write performance by transfer size.
1720 */
1721static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1722{
253d6a28 1723 struct mmc_test_area *t = &test->area;
fec4dcce
AH
1724 unsigned long sz;
1725 unsigned int dev_addr;
64f7120d
AH
1726 int ret;
1727
1728 ret = mmc_test_area_erase(test);
1729 if (ret)
1730 return ret;
253d6a28
AS
1731 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1732 dev_addr = t->dev_addr + (sz >> 9);
64f7120d
AH
1733 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1734 if (ret)
1735 return ret;
1736 }
1737 ret = mmc_test_area_erase(test);
1738 if (ret)
1739 return ret;
253d6a28
AS
1740 sz = t->max_tfr;
1741 dev_addr = t->dev_addr;
64f7120d
AH
1742 return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1743}
1744
1745/*
1746 * Single trim performance by transfer size.
1747 */
1748static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1749{
253d6a28 1750 struct mmc_test_area *t = &test->area;
fec4dcce
AH
1751 unsigned long sz;
1752 unsigned int dev_addr;
64f7120d
AH
1753 struct timespec ts1, ts2;
1754 int ret;
1755
1756 if (!mmc_can_trim(test->card))
1757 return RESULT_UNSUP_CARD;
1758
1759 if (!mmc_can_erase(test->card))
1760 return RESULT_UNSUP_HOST;
1761
253d6a28
AS
1762 for (sz = 512; sz < t->max_sz; sz <<= 1) {
1763 dev_addr = t->dev_addr + (sz >> 9);
64f7120d
AH
1764 getnstimeofday(&ts1);
1765 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1766 if (ret)
1767 return ret;
1768 getnstimeofday(&ts2);
1769 mmc_test_print_rate(test, sz, &ts1, &ts2);
1770 }
253d6a28 1771 dev_addr = t->dev_addr;
64f7120d
AH
1772 getnstimeofday(&ts1);
1773 ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1774 if (ret)
1775 return ret;
1776 getnstimeofday(&ts2);
1777 mmc_test_print_rate(test, sz, &ts1, &ts2);
1778 return 0;
1779}
1780
c8c8c1bd
AH
1781static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1782{
253d6a28 1783 struct mmc_test_area *t = &test->area;
c8c8c1bd
AH
1784 unsigned int dev_addr, i, cnt;
1785 struct timespec ts1, ts2;
1786 int ret;
1787
253d6a28
AS
1788 cnt = t->max_sz / sz;
1789 dev_addr = t->dev_addr;
c8c8c1bd
AH
1790 getnstimeofday(&ts1);
1791 for (i = 0; i < cnt; i++) {
1792 ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1793 if (ret)
1794 return ret;
1795 dev_addr += (sz >> 9);
1796 }
1797 getnstimeofday(&ts2);
1798 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1799 return 0;
1800}
1801
64f7120d
AH
1802/*
1803 * Consecutive read performance by transfer size.
1804 */
1805static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1806{
253d6a28 1807 struct mmc_test_area *t = &test->area;
fec4dcce 1808 unsigned long sz;
c8c8c1bd
AH
1809 int ret;
1810
253d6a28 1811 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
c8c8c1bd
AH
1812 ret = mmc_test_seq_read_perf(test, sz);
1813 if (ret)
1814 return ret;
1815 }
253d6a28 1816 sz = t->max_tfr;
c8c8c1bd
AH
1817 return mmc_test_seq_read_perf(test, sz);
1818}
1819
1820static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1821{
253d6a28 1822 struct mmc_test_area *t = &test->area;
fec4dcce 1823 unsigned int dev_addr, i, cnt;
64f7120d
AH
1824 struct timespec ts1, ts2;
1825 int ret;
1826
c8c8c1bd
AH
1827 ret = mmc_test_area_erase(test);
1828 if (ret)
1829 return ret;
253d6a28
AS
1830 cnt = t->max_sz / sz;
1831 dev_addr = t->dev_addr;
c8c8c1bd
AH
1832 getnstimeofday(&ts1);
1833 for (i = 0; i < cnt; i++) {
1834 ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1835 if (ret)
1836 return ret;
1837 dev_addr += (sz >> 9);
64f7120d 1838 }
c8c8c1bd
AH
1839 getnstimeofday(&ts2);
1840 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
64f7120d
AH
1841 return 0;
1842}
1843
1844/*
1845 * Consecutive write performance by transfer size.
1846 */
1847static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1848{
253d6a28 1849 struct mmc_test_area *t = &test->area;
fec4dcce 1850 unsigned long sz;
64f7120d
AH
1851 int ret;
1852
253d6a28 1853 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
c8c8c1bd 1854 ret = mmc_test_seq_write_perf(test, sz);
64f7120d
AH
1855 if (ret)
1856 return ret;
64f7120d 1857 }
253d6a28 1858 sz = t->max_tfr;
c8c8c1bd 1859 return mmc_test_seq_write_perf(test, sz);
64f7120d
AH
1860}
1861
1862/*
1863 * Consecutive trim performance by transfer size.
1864 */
1865static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1866{
253d6a28 1867 struct mmc_test_area *t = &test->area;
fec4dcce
AH
1868 unsigned long sz;
1869 unsigned int dev_addr, i, cnt;
64f7120d
AH
1870 struct timespec ts1, ts2;
1871 int ret;
1872
1873 if (!mmc_can_trim(test->card))
1874 return RESULT_UNSUP_CARD;
1875
1876 if (!mmc_can_erase(test->card))
1877 return RESULT_UNSUP_HOST;
1878
253d6a28 1879 for (sz = 512; sz <= t->max_sz; sz <<= 1) {
64f7120d
AH
1880 ret = mmc_test_area_erase(test);
1881 if (ret)
1882 return ret;
1883 ret = mmc_test_area_fill(test);
1884 if (ret)
1885 return ret;
253d6a28
AS
1886 cnt = t->max_sz / sz;
1887 dev_addr = t->dev_addr;
64f7120d
AH
1888 getnstimeofday(&ts1);
1889 for (i = 0; i < cnt; i++) {
1890 ret = mmc_erase(test->card, dev_addr, sz >> 9,
1891 MMC_TRIM_ARG);
1892 if (ret)
1893 return ret;
1894 dev_addr += (sz >> 9);
1895 }
1896 getnstimeofday(&ts2);
1897 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1898 }
1899 return 0;
1900}
1901
b6056d12
AH
1902static unsigned int rnd_next = 1;
1903
1904static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
1905{
1906 uint64_t r;
1907
1908 rnd_next = rnd_next * 1103515245 + 12345;
1909 r = (rnd_next >> 16) & 0x7fff;
1910 return (r * rnd_cnt) >> 15;
1911}
1912
1913static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1914 unsigned long sz)
1915{
1916 unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
1917 unsigned int ssz;
1918 struct timespec ts1, ts2, ts;
1919 int ret;
1920
1921 ssz = sz >> 9;
1922
1923 rnd_addr = mmc_test_capacity(test->card) / 4;
1924 range1 = rnd_addr / test->card->pref_erase;
1925 range2 = range1 / ssz;
1926
1927 getnstimeofday(&ts1);
1928 for (cnt = 0; cnt < UINT_MAX; cnt++) {
1929 getnstimeofday(&ts2);
1930 ts = timespec_sub(ts2, ts1);
1931 if (ts.tv_sec >= 10)
1932 break;
1933 ea = mmc_test_rnd_num(range1);
1934 if (ea == last_ea)
1935 ea -= 1;
1936 last_ea = ea;
1937 dev_addr = rnd_addr + test->card->pref_erase * ea +
1938 ssz * mmc_test_rnd_num(range2);
1939 ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
1940 if (ret)
1941 return ret;
1942 }
1943 if (print)
1944 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1945 return 0;
1946}
1947
1948static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1949{
253d6a28 1950 struct mmc_test_area *t = &test->area;
b6056d12
AH
1951 unsigned int next;
1952 unsigned long sz;
1953 int ret;
1954
253d6a28 1955 for (sz = 512; sz < t->max_tfr; sz <<= 1) {
b6056d12
AH
1956 /*
1957 * When writing, try to get more consistent results by running
1958 * the test twice with exactly the same I/O but outputting the
1959 * results only for the 2nd run.
1960 */
1961 if (write) {
1962 next = rnd_next;
1963 ret = mmc_test_rnd_perf(test, write, 0, sz);
1964 if (ret)
1965 return ret;
1966 rnd_next = next;
1967 }
1968 ret = mmc_test_rnd_perf(test, write, 1, sz);
1969 if (ret)
1970 return ret;
1971 }
253d6a28 1972 sz = t->max_tfr;
b6056d12
AH
1973 if (write) {
1974 next = rnd_next;
1975 ret = mmc_test_rnd_perf(test, write, 0, sz);
1976 if (ret)
1977 return ret;
1978 rnd_next = next;
1979 }
1980 return mmc_test_rnd_perf(test, write, 1, sz);
1981}
1982
1983/*
1984 * Random read performance by transfer size.
1985 */
1986static int mmc_test_random_read_perf(struct mmc_test_card *test)
1987{
1988 return mmc_test_random_perf(test, 0);
1989}
1990
1991/*
1992 * Random write performance by transfer size.
1993 */
1994static int mmc_test_random_write_perf(struct mmc_test_card *test)
1995{
1996 return mmc_test_random_perf(test, 1);
1997}
1998
a803d551
AH
1999static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
2000 unsigned int tot_sz, int max_scatter)
2001{
253d6a28 2002 struct mmc_test_area *t = &test->area;
a803d551 2003 unsigned int dev_addr, i, cnt, sz, ssz;
5a8fba52 2004 struct timespec ts1, ts2;
a803d551
AH
2005 int ret;
2006
253d6a28
AS
2007 sz = t->max_tfr;
2008
a803d551
AH
2009 /*
2010 * In the case of a maximally scattered transfer, the maximum transfer
2011 * size is further limited by using PAGE_SIZE segments.
2012 */
2013 if (max_scatter) {
a803d551
AH
2014 unsigned long max_tfr;
2015
2016 if (t->max_seg_sz >= PAGE_SIZE)
2017 max_tfr = t->max_segs * PAGE_SIZE;
2018 else
2019 max_tfr = t->max_segs * t->max_seg_sz;
2020 if (sz > max_tfr)
2021 sz = max_tfr;
2022 }
2023
2024 ssz = sz >> 9;
2025 dev_addr = mmc_test_capacity(test->card) / 4;
2026 if (tot_sz > dev_addr << 9)
2027 tot_sz = dev_addr << 9;
2028 cnt = tot_sz / sz;
2029 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2030
2031 getnstimeofday(&ts1);
2032 for (i = 0; i < cnt; i++) {
2033 ret = mmc_test_area_io(test, sz, dev_addr, write,
2034 max_scatter, 0);
2035 if (ret)
2036 return ret;
2037 dev_addr += ssz;
2038 }
2039 getnstimeofday(&ts2);
2040
a803d551
AH
2041 mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
2042
2043 return 0;
2044}
2045
2046static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
2047{
2048 int ret, i;
2049
2050 for (i = 0; i < 10; i++) {
2051 ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
2052 if (ret)
2053 return ret;
2054 }
2055 for (i = 0; i < 5; i++) {
2056 ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
2057 if (ret)
2058 return ret;
2059 }
2060 for (i = 0; i < 3; i++) {
2061 ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
2062 if (ret)
2063 return ret;
2064 }
2065
2066 return ret;
2067}
2068
2069/*
2070 * Large sequential read performance.
2071 */
2072static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
2073{
2074 return mmc_test_large_seq_perf(test, 0);
2075}
2076
2077/*
2078 * Large sequential write performance.
2079 */
2080static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
2081{
2082 return mmc_test_large_seq_perf(test, 1);
2083}
2084
9f9c4180
PF
2085static int mmc_test_rw_multiple(struct mmc_test_card *test,
2086 struct mmc_test_multiple_rw *tdata,
2087 unsigned int reqsize, unsigned int size)
2088{
2089 unsigned int dev_addr;
2090 struct mmc_test_area *t = &test->area;
2091 int ret = 0;
2092
2093 /* Set up test area */
2094 if (size > mmc_test_capacity(test->card) / 2 * 512)
2095 size = mmc_test_capacity(test->card) / 2 * 512;
2096 if (reqsize > t->max_tfr)
2097 reqsize = t->max_tfr;
2098 dev_addr = mmc_test_capacity(test->card) / 4;
2099 if ((dev_addr & 0xffff0000))
2100 dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2101 else
2102 dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
2103 if (!dev_addr)
2104 goto err;
2105
2106 if (reqsize > size)
2107 return 0;
2108
2109 /* prepare test area */
2110 if (mmc_can_erase(test->card) &&
2111 tdata->prepare & MMC_TEST_PREP_ERASE) {
2112 ret = mmc_erase(test->card, dev_addr,
2113 size / 512, MMC_SECURE_ERASE_ARG);
2114 if (ret)
2115 ret = mmc_erase(test->card, dev_addr,
2116 size / 512, MMC_ERASE_ARG);
2117 if (ret)
2118 goto err;
2119 }
2120
2121 /* Run test */
2122 ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
2123 tdata->do_write, 0, 1, size / reqsize,
2124 tdata->do_nonblock_req);
2125 if (ret)
2126 goto err;
2127
2128 return ret;
2129 err:
2130 printk(KERN_INFO "[%s] error\n", __func__);
2131 return ret;
2132}
2133
2134static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
2135 struct mmc_test_multiple_rw *rw)
2136{
2137 int ret = 0;
2138 int i;
2139 void *pre_req = test->card->host->ops->pre_req;
2140 void *post_req = test->card->host->ops->post_req;
2141
2142 if (rw->do_nonblock_req &&
2143 ((!pre_req && post_req) || (pre_req && !post_req))) {
2144 printk(KERN_INFO "error: only one of pre/post is defined\n");
2145 return -EINVAL;
2146 }
2147
2148 for (i = 0 ; i < rw->len && ret == 0; i++) {
2149 ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size);
2150 if (ret)
2151 break;
2152 }
2153 return ret;
2154}
2155
2156/*
2157 * Multiple blocking write 4k to 4 MB chunks
2158 */
2159static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
2160{
2161 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2162 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2163 struct mmc_test_multiple_rw test_data = {
2164 .bs = bs,
2165 .size = TEST_AREA_MAX_SIZE,
2166 .len = ARRAY_SIZE(bs),
2167 .do_write = true,
2168 .do_nonblock_req = false,
2169 .prepare = MMC_TEST_PREP_ERASE,
2170 };
2171
2172 return mmc_test_rw_multiple_size(test, &test_data);
2173};
2174
2175/*
2176 * Multiple non-blocking write 4k to 4 MB chunks
2177 */
2178static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
2179{
2180 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2181 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2182 struct mmc_test_multiple_rw test_data = {
2183 .bs = bs,
2184 .size = TEST_AREA_MAX_SIZE,
2185 .len = ARRAY_SIZE(bs),
2186 .do_write = true,
2187 .do_nonblock_req = true,
2188 .prepare = MMC_TEST_PREP_ERASE,
2189 };
2190
2191 return mmc_test_rw_multiple_size(test, &test_data);
2192}
2193
2194/*
2195 * Multiple blocking read 4k to 4 MB chunks
2196 */
2197static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
2198{
2199 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2200 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2201 struct mmc_test_multiple_rw test_data = {
2202 .bs = bs,
2203 .size = TEST_AREA_MAX_SIZE,
2204 .len = ARRAY_SIZE(bs),
2205 .do_write = false,
2206 .do_nonblock_req = false,
2207 .prepare = MMC_TEST_PREP_NONE,
2208 };
2209
2210 return mmc_test_rw_multiple_size(test, &test_data);
2211}
2212
2213/*
2214 * Multiple non-blocking read 4k to 4 MB chunks
2215 */
2216static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
2217{
2218 unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2219 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2220 struct mmc_test_multiple_rw test_data = {
2221 .bs = bs,
2222 .size = TEST_AREA_MAX_SIZE,
2223 .len = ARRAY_SIZE(bs),
2224 .do_write = false,
2225 .do_nonblock_req = true,
2226 .prepare = MMC_TEST_PREP_NONE,
2227 };
2228
2229 return mmc_test_rw_multiple_size(test, &test_data);
2230}
2231
88ae600d
PO
2232static const struct mmc_test_case mmc_test_cases[] = {
2233 {
2234 .name = "Basic write (no data verification)",
2235 .run = mmc_test_basic_write,
2236 },
2237
2238 {
2239 .name = "Basic read (no data verification)",
2240 .run = mmc_test_basic_read,
2241 },
2242
2243 {
2244 .name = "Basic write (with data verification)",
6b174931 2245 .prepare = mmc_test_prepare_write,
88ae600d 2246 .run = mmc_test_verify_write,
6b174931 2247 .cleanup = mmc_test_cleanup,
88ae600d
PO
2248 },
2249
2250 {
2251 .name = "Basic read (with data verification)",
6b174931 2252 .prepare = mmc_test_prepare_read,
88ae600d 2253 .run = mmc_test_verify_read,
6b174931 2254 .cleanup = mmc_test_cleanup,
88ae600d
PO
2255 },
2256
2257 {
2258 .name = "Multi-block write",
6b174931 2259 .prepare = mmc_test_prepare_write,
88ae600d 2260 .run = mmc_test_multi_write,
6b174931 2261 .cleanup = mmc_test_cleanup,
88ae600d
PO
2262 },
2263
2264 {
2265 .name = "Multi-block read",
6b174931 2266 .prepare = mmc_test_prepare_read,
88ae600d 2267 .run = mmc_test_multi_read,
6b174931 2268 .cleanup = mmc_test_cleanup,
88ae600d
PO
2269 },
2270
2271 {
2272 .name = "Power of two block writes",
6b174931 2273 .prepare = mmc_test_prepare_write,
88ae600d 2274 .run = mmc_test_pow2_write,
6b174931 2275 .cleanup = mmc_test_cleanup,
88ae600d
PO
2276 },
2277
2278 {
2279 .name = "Power of two block reads",
6b174931 2280 .prepare = mmc_test_prepare_read,
88ae600d 2281 .run = mmc_test_pow2_read,
6b174931 2282 .cleanup = mmc_test_cleanup,
88ae600d
PO
2283 },
2284
2285 {
2286 .name = "Weird sized block writes",
6b174931 2287 .prepare = mmc_test_prepare_write,
88ae600d 2288 .run = mmc_test_weird_write,
6b174931 2289 .cleanup = mmc_test_cleanup,
88ae600d
PO
2290 },
2291
2292 {
2293 .name = "Weird sized block reads",
6b174931 2294 .prepare = mmc_test_prepare_read,
88ae600d 2295 .run = mmc_test_weird_read,
6b174931 2296 .cleanup = mmc_test_cleanup,
88ae600d
PO
2297 },
2298
2299 {
2300 .name = "Badly aligned write",
6b174931 2301 .prepare = mmc_test_prepare_write,
88ae600d 2302 .run = mmc_test_align_write,
6b174931 2303 .cleanup = mmc_test_cleanup,
88ae600d
PO
2304 },
2305
2306 {
2307 .name = "Badly aligned read",
6b174931 2308 .prepare = mmc_test_prepare_read,
88ae600d 2309 .run = mmc_test_align_read,
6b174931 2310 .cleanup = mmc_test_cleanup,
88ae600d
PO
2311 },
2312
2313 {
2314 .name = "Badly aligned multi-block write",
6b174931 2315 .prepare = mmc_test_prepare_write,
88ae600d 2316 .run = mmc_test_align_multi_write,
6b174931 2317 .cleanup = mmc_test_cleanup,
88ae600d
PO
2318 },
2319
2320 {
2321 .name = "Badly aligned multi-block read",
6b174931 2322 .prepare = mmc_test_prepare_read,
88ae600d 2323 .run = mmc_test_align_multi_read,
6b174931 2324 .cleanup = mmc_test_cleanup,
88ae600d
PO
2325 },
2326
2327 {
2328 .name = "Correct xfer_size at write (start failure)",
2329 .run = mmc_test_xfersize_write,
2330 },
2331
2332 {
2333 .name = "Correct xfer_size at read (start failure)",
2334 .run = mmc_test_xfersize_read,
2335 },
2336
2337 {
2338 .name = "Correct xfer_size at write (midway failure)",
2339 .run = mmc_test_multi_xfersize_write,
2340 },
2341
2342 {
2343 .name = "Correct xfer_size at read (midway failure)",
2344 .run = mmc_test_multi_xfersize_read,
2345 },
2661081f
PO
2346
2347#ifdef CONFIG_HIGHMEM
2348
2349 {
2350 .name = "Highmem write",
2351 .prepare = mmc_test_prepare_write,
2352 .run = mmc_test_write_high,
2353 .cleanup = mmc_test_cleanup,
2354 },
2355
2356 {
2357 .name = "Highmem read",
2358 .prepare = mmc_test_prepare_read,
2359 .run = mmc_test_read_high,
2360 .cleanup = mmc_test_cleanup,
2361 },
2362
2363 {
2364 .name = "Multi-block highmem write",
2365 .prepare = mmc_test_prepare_write,
2366 .run = mmc_test_multi_write_high,
2367 .cleanup = mmc_test_cleanup,
2368 },
2369
2370 {
2371 .name = "Multi-block highmem read",
2372 .prepare = mmc_test_prepare_read,
2373 .run = mmc_test_multi_read_high,
2374 .cleanup = mmc_test_cleanup,
2375 },
2376
64f7120d
AH
2377#else
2378
2379 {
2380 .name = "Highmem write",
2381 .run = mmc_test_no_highmem,
2382 },
2383
2384 {
2385 .name = "Highmem read",
2386 .run = mmc_test_no_highmem,
2387 },
2388
2389 {
2390 .name = "Multi-block highmem write",
2391 .run = mmc_test_no_highmem,
2392 },
2393
2394 {
2395 .name = "Multi-block highmem read",
2396 .run = mmc_test_no_highmem,
2397 },
2398
2661081f
PO
2399#endif /* CONFIG_HIGHMEM */
2400
64f7120d
AH
2401 {
2402 .name = "Best-case read performance",
2403 .prepare = mmc_test_area_prepare_fill,
2404 .run = mmc_test_best_read_performance,
2405 .cleanup = mmc_test_area_cleanup,
2406 },
2407
2408 {
2409 .name = "Best-case write performance",
2410 .prepare = mmc_test_area_prepare_erase,
2411 .run = mmc_test_best_write_performance,
2412 .cleanup = mmc_test_area_cleanup,
2413 },
2414
2415 {
2416 .name = "Best-case read performance into scattered pages",
2417 .prepare = mmc_test_area_prepare_fill,
2418 .run = mmc_test_best_read_perf_max_scatter,
2419 .cleanup = mmc_test_area_cleanup,
2420 },
2421
2422 {
2423 .name = "Best-case write performance from scattered pages",
2424 .prepare = mmc_test_area_prepare_erase,
2425 .run = mmc_test_best_write_perf_max_scatter,
2426 .cleanup = mmc_test_area_cleanup,
2427 },
2428
2429 {
2430 .name = "Single read performance by transfer size",
2431 .prepare = mmc_test_area_prepare_fill,
2432 .run = mmc_test_profile_read_perf,
2433 .cleanup = mmc_test_area_cleanup,
2434 },
2435
2436 {
2437 .name = "Single write performance by transfer size",
2438 .prepare = mmc_test_area_prepare,
2439 .run = mmc_test_profile_write_perf,
2440 .cleanup = mmc_test_area_cleanup,
2441 },
2442
2443 {
2444 .name = "Single trim performance by transfer size",
2445 .prepare = mmc_test_area_prepare_fill,
2446 .run = mmc_test_profile_trim_perf,
2447 .cleanup = mmc_test_area_cleanup,
2448 },
2449
2450 {
2451 .name = "Consecutive read performance by transfer size",
2452 .prepare = mmc_test_area_prepare_fill,
2453 .run = mmc_test_profile_seq_read_perf,
2454 .cleanup = mmc_test_area_cleanup,
2455 },
2456
2457 {
2458 .name = "Consecutive write performance by transfer size",
2459 .prepare = mmc_test_area_prepare,
2460 .run = mmc_test_profile_seq_write_perf,
2461 .cleanup = mmc_test_area_cleanup,
2462 },
2463
2464 {
2465 .name = "Consecutive trim performance by transfer size",
2466 .prepare = mmc_test_area_prepare,
2467 .run = mmc_test_profile_seq_trim_perf,
2468 .cleanup = mmc_test_area_cleanup,
2469 },
2470
b6056d12
AH
2471 {
2472 .name = "Random read performance by transfer size",
2473 .prepare = mmc_test_area_prepare,
2474 .run = mmc_test_random_read_perf,
2475 .cleanup = mmc_test_area_cleanup,
2476 },
2477
2478 {
2479 .name = "Random write performance by transfer size",
2480 .prepare = mmc_test_area_prepare,
2481 .run = mmc_test_random_write_perf,
2482 .cleanup = mmc_test_area_cleanup,
2483 },
2484
a803d551
AH
2485 {
2486 .name = "Large sequential read into scattered pages",
2487 .prepare = mmc_test_area_prepare,
2488 .run = mmc_test_large_seq_read_perf,
2489 .cleanup = mmc_test_area_cleanup,
2490 },
2491
2492 {
2493 .name = "Large sequential write from scattered pages",
2494 .prepare = mmc_test_area_prepare,
2495 .run = mmc_test_large_seq_write_perf,
2496 .cleanup = mmc_test_area_cleanup,
2497 },
2498
9f9c4180
PF
2499 {
2500 .name = "Write performance with blocking req 4k to 4MB",
2501 .prepare = mmc_test_area_prepare,
2502 .run = mmc_test_profile_mult_write_blocking_perf,
2503 .cleanup = mmc_test_area_cleanup,
2504 },
2505
2506 {
2507 .name = "Write performance with non-blocking req 4k to 4MB",
2508 .prepare = mmc_test_area_prepare,
2509 .run = mmc_test_profile_mult_write_nonblock_perf,
2510 .cleanup = mmc_test_area_cleanup,
2511 },
2512
2513 {
2514 .name = "Read performance with blocking req 4k to 4MB",
2515 .prepare = mmc_test_area_prepare,
2516 .run = mmc_test_profile_mult_read_blocking_perf,
2517 .cleanup = mmc_test_area_cleanup,
2518 },
2519
2520 {
2521 .name = "Read performance with non-blocking req 4k to 4MB",
2522 .prepare = mmc_test_area_prepare,
2523 .run = mmc_test_profile_mult_read_nonblock_perf,
2524 .cleanup = mmc_test_area_cleanup,
2525 },
88ae600d
PO
2526};
2527
a650031a 2528static DEFINE_MUTEX(mmc_test_lock);
88ae600d 2529
3183aa15
AS
2530static LIST_HEAD(mmc_test_result);
2531
fd8c326c 2532static void mmc_test_run(struct mmc_test_card *test, int testcase)
88ae600d
PO
2533{
2534 int i, ret;
2535
2536 printk(KERN_INFO "%s: Starting tests of card %s...\n",
2537 mmc_hostname(test->card->host), mmc_card_id(test->card));
2538
2539 mmc_claim_host(test->card->host);
2540
2541 for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) {
3183aa15
AS
2542 struct mmc_test_general_result *gr;
2543
fd8c326c
PO
2544 if (testcase && ((i + 1) != testcase))
2545 continue;
2546
88ae600d
PO
2547 printk(KERN_INFO "%s: Test case %d. %s...\n",
2548 mmc_hostname(test->card->host), i + 1,
2549 mmc_test_cases[i].name);
2550
2551 if (mmc_test_cases[i].prepare) {
2552 ret = mmc_test_cases[i].prepare(test);
2553 if (ret) {
2554 printk(KERN_INFO "%s: Result: Prepare "
2555 "stage failed! (%d)\n",
2556 mmc_hostname(test->card->host),
2557 ret);
2558 continue;
2559 }
2560 }
2561
3183aa15
AS
2562 gr = kzalloc(sizeof(struct mmc_test_general_result),
2563 GFP_KERNEL);
2564 if (gr) {
2565 INIT_LIST_HEAD(&gr->tr_lst);
2566
2567 /* Assign data what we know already */
2568 gr->card = test->card;
2569 gr->testcase = i;
2570
2571 /* Append container to global one */
2572 list_add_tail(&gr->link, &mmc_test_result);
2573
2574 /*
2575 * Save the pointer to created container in our private
2576 * structure.
2577 */
2578 test->gr = gr;
2579 }
2580
88ae600d
PO
2581 ret = mmc_test_cases[i].run(test);
2582 switch (ret) {
2583 case RESULT_OK:
2584 printk(KERN_INFO "%s: Result: OK\n",
2585 mmc_hostname(test->card->host));
2586 break;
2587 case RESULT_FAIL:
2588 printk(KERN_INFO "%s: Result: FAILED\n",
2589 mmc_hostname(test->card->host));
2590 break;
2591 case RESULT_UNSUP_HOST:
2592 printk(KERN_INFO "%s: Result: UNSUPPORTED "
2593 "(by host)\n",
2594 mmc_hostname(test->card->host));
2595 break;
2596 case RESULT_UNSUP_CARD:
2597 printk(KERN_INFO "%s: Result: UNSUPPORTED "
2598 "(by card)\n",
2599 mmc_hostname(test->card->host));
2600 break;
2601 default:
2602 printk(KERN_INFO "%s: Result: ERROR (%d)\n",
2603 mmc_hostname(test->card->host), ret);
2604 }
2605
3183aa15
AS
2606 /* Save the result */
2607 if (gr)
2608 gr->result = ret;
2609
88ae600d
PO
2610 if (mmc_test_cases[i].cleanup) {
2611 ret = mmc_test_cases[i].cleanup(test);
2612 if (ret) {
2613 printk(KERN_INFO "%s: Warning: Cleanup "
2614 "stage failed! (%d)\n",
2615 mmc_hostname(test->card->host),
2616 ret);
2617 }
2618 }
2619 }
2620
2621 mmc_release_host(test->card->host);
2622
2623 printk(KERN_INFO "%s: Tests completed.\n",
2624 mmc_hostname(test->card->host));
2625}
2626
3183aa15
AS
2627static void mmc_test_free_result(struct mmc_card *card)
2628{
2629 struct mmc_test_general_result *gr, *grs;
2630
2631 mutex_lock(&mmc_test_lock);
2632
2633 list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
2634 struct mmc_test_transfer_result *tr, *trs;
2635
2636 if (card && gr->card != card)
2637 continue;
2638
2639 list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
2640 list_del(&tr->link);
2641 kfree(tr);
2642 }
2643
2644 list_del(&gr->link);
2645 kfree(gr);
2646 }
2647
2648 mutex_unlock(&mmc_test_lock);
2649}
2650
130067ed
AS
2651static LIST_HEAD(mmc_test_file_test);
2652
2653static int mtf_test_show(struct seq_file *sf, void *data)
88ae600d 2654{
130067ed 2655 struct mmc_card *card = (struct mmc_card *)sf->private;
3183aa15 2656 struct mmc_test_general_result *gr;
3183aa15 2657
88ae600d 2658 mutex_lock(&mmc_test_lock);
3183aa15
AS
2659
2660 list_for_each_entry(gr, &mmc_test_result, link) {
2661 struct mmc_test_transfer_result *tr;
2662
2663 if (gr->card != card)
2664 continue;
2665
130067ed 2666 seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
3183aa15
AS
2667
2668 list_for_each_entry(tr, &gr->tr_lst, link) {
b6056d12 2669 seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
3183aa15
AS
2670 tr->count, tr->sectors,
2671 (unsigned long)tr->ts.tv_sec,
2672 (unsigned long)tr->ts.tv_nsec,
b6056d12 2673 tr->rate, tr->iops / 100, tr->iops % 100);
3183aa15
AS
2674 }
2675 }
2676
88ae600d
PO
2677 mutex_unlock(&mmc_test_lock);
2678
130067ed 2679 return 0;
88ae600d
PO
2680}
2681
130067ed 2682static int mtf_test_open(struct inode *inode, struct file *file)
88ae600d 2683{
130067ed
AS
2684 return single_open(file, mtf_test_show, inode->i_private);
2685}
2686
2687static ssize_t mtf_test_write(struct file *file, const char __user *buf,
2688 size_t count, loff_t *pos)
2689{
2690 struct seq_file *sf = (struct seq_file *)file->private_data;
2691 struct mmc_card *card = (struct mmc_card *)sf->private;
88ae600d 2692 struct mmc_test_card *test;
130067ed 2693 char lbuf[12];
5c25aee5 2694 long testcase;
88ae600d 2695
130067ed
AS
2696 if (count >= sizeof(lbuf))
2697 return -EINVAL;
2698
2699 if (copy_from_user(lbuf, buf, count))
2700 return -EFAULT;
2701 lbuf[count] = '\0';
2702
2703 if (strict_strtol(lbuf, 10, &testcase))
5c25aee5 2704 return -EINVAL;
fd8c326c 2705
88ae600d
PO
2706 test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
2707 if (!test)
2708 return -ENOMEM;
2709
3183aa15
AS
2710 /*
2711 * Remove all test cases associated with given card. Thus we have only
2712 * actual data of the last run.
2713 */
2714 mmc_test_free_result(card);
2715
88ae600d
PO
2716 test->card = card;
2717
2718 test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
2661081f
PO
2719#ifdef CONFIG_HIGHMEM
2720 test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
2721#endif
2722
2723#ifdef CONFIG_HIGHMEM
2724 if (test->buffer && test->highmem) {
2725#else
88ae600d 2726 if (test->buffer) {
2661081f 2727#endif
88ae600d 2728 mutex_lock(&mmc_test_lock);
fd8c326c 2729 mmc_test_run(test, testcase);
88ae600d
PO
2730 mutex_unlock(&mmc_test_lock);
2731 }
2732
2661081f
PO
2733#ifdef CONFIG_HIGHMEM
2734 __free_pages(test->highmem, BUFFER_ORDER);
2735#endif
88ae600d
PO
2736 kfree(test->buffer);
2737 kfree(test);
2738
2739 return count;
2740}
2741
130067ed
AS
2742static const struct file_operations mmc_test_fops_test = {
2743 .open = mtf_test_open,
2744 .read = seq_read,
2745 .write = mtf_test_write,
2746 .llseek = seq_lseek,
2747 .release = single_release,
2748};
2749
54f3caf5
PF
2750static int mtf_testlist_show(struct seq_file *sf, void *data)
2751{
2752 int i;
2753
2754 mutex_lock(&mmc_test_lock);
2755
2756 for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
2757 seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
2758
2759 mutex_unlock(&mmc_test_lock);
2760
2761 return 0;
2762}
2763
2764static int mtf_testlist_open(struct inode *inode, struct file *file)
2765{
2766 return single_open(file, mtf_testlist_show, inode->i_private);
2767}
2768
2769static const struct file_operations mmc_test_fops_testlist = {
2770 .open = mtf_testlist_open,
2771 .read = seq_read,
2772 .llseek = seq_lseek,
2773 .release = single_release,
2774};
2775
130067ed
AS
2776static void mmc_test_free_file_test(struct mmc_card *card)
2777{
2778 struct mmc_test_dbgfs_file *df, *dfs;
2779
2780 mutex_lock(&mmc_test_lock);
2781
2782 list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
2783 if (card && df->card != card)
2784 continue;
2785 debugfs_remove(df->file);
2786 list_del(&df->link);
2787 kfree(df);
2788 }
2789
2790 mutex_unlock(&mmc_test_lock);
2791}
2792
2793static int mmc_test_register_file_test(struct mmc_card *card)
2794{
2795 struct dentry *file = NULL;
2796 struct mmc_test_dbgfs_file *df;
2797 int ret = 0;
2798
2799 mutex_lock(&mmc_test_lock);
2800
2801 if (card->debugfs_root)
2802 file = debugfs_create_file("test", S_IWUSR | S_IRUGO,
2803 card->debugfs_root, card, &mmc_test_fops_test);
2804
2805 if (IS_ERR_OR_NULL(file)) {
2806 dev_err(&card->dev,
54f3caf5
PF
2807 "Can't create test. Perhaps debugfs is disabled.\n");
2808 ret = -ENODEV;
2809 goto err;
2810 }
2811
2812 if (card->debugfs_root)
2813 file = debugfs_create_file("testlist", S_IRUGO,
2814 card->debugfs_root, card, &mmc_test_fops_testlist);
2815
2816 if (IS_ERR_OR_NULL(file)) {
2817 dev_err(&card->dev,
2818 "Can't create testlist. Perhaps debugfs is disabled.\n");
130067ed
AS
2819 ret = -ENODEV;
2820 goto err;
2821 }
2822
2823 df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
2824 if (!df) {
2825 debugfs_remove(file);
2826 dev_err(&card->dev,
2827 "Can't allocate memory for internal usage.\n");
2828 ret = -ENOMEM;
2829 goto err;
2830 }
2831
2832 df->card = card;
2833 df->file = file;
2834
2835 list_add(&df->link, &mmc_test_file_test);
2836
2837err:
2838 mutex_unlock(&mmc_test_lock);
2839
2840 return ret;
2841}
88ae600d
PO
2842
2843static int mmc_test_probe(struct mmc_card *card)
2844{
2845 int ret;
2846
63be54ce 2847 if (!mmc_card_mmc(card) && !mmc_card_sd(card))
0121a982
PO
2848 return -ENODEV;
2849
130067ed 2850 ret = mmc_test_register_file_test(card);
88ae600d
PO
2851 if (ret)
2852 return ret;
2853
60c9c7b1
PO
2854 dev_info(&card->dev, "Card claimed for testing.\n");
2855
88ae600d
PO
2856 return 0;
2857}
2858
2859static void mmc_test_remove(struct mmc_card *card)
2860{
3183aa15 2861 mmc_test_free_result(card);
130067ed 2862 mmc_test_free_file_test(card);
88ae600d
PO
2863}
2864
2865static struct mmc_driver mmc_driver = {
2866 .drv = {
2867 .name = "mmc_test",
2868 },
2869 .probe = mmc_test_probe,
2870 .remove = mmc_test_remove,
2871};
2872
2873static int __init mmc_test_init(void)
2874{
2875 return mmc_register_driver(&mmc_driver);
2876}
2877
2878static void __exit mmc_test_exit(void)
2879{
3183aa15
AS
2880 /* Clear stalled data if card is still plugged */
2881 mmc_test_free_result(NULL);
130067ed 2882 mmc_test_free_file_test(NULL);
3183aa15 2883
88ae600d
PO
2884 mmc_unregister_driver(&mmc_driver);
2885}
2886
2887module_init(mmc_test_init);
2888module_exit(mmc_test_exit);
2889
2890MODULE_LICENSE("GPL");
2891MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
2892MODULE_AUTHOR("Pierre Ossman");
This page took 0.385674 seconds and 5 git commands to generate.