diff options
Diffstat (limited to 'drivers/mmc/card')
-rw-r--r-- | drivers/mmc/card/Kconfig | 3 | ||||
-rw-r--r-- | drivers/mmc/card/block.c | 1 | ||||
-rw-r--r-- | drivers/mmc/card/mmc_test.c | 271 |
3 files changed, 247 insertions, 28 deletions
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig index 2a876c4..3b1f783 100644 --- a/drivers/mmc/card/Kconfig +++ b/drivers/mmc/card/Kconfig @@ -58,12 +58,11 @@ config SDIO_UART config MMC_TEST tristate "MMC host test driver" - default n help Development driver that performs a series of reads and writes to a memory card in order to expose certain well known bugs in host controllers. The tests are executed by writing to the - "test" file in sysfs under each card. Note that whatever is + "test" file in debugfs under each card. Note that whatever is on your card will be overwritten by these tests. This driver is only of interest to those developing or diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index bfc8a8a..61d233a 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -621,6 +621,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) md->disk->private_data = md; md->disk->queue = md->queue.queue; md->disk->driverfs_dev = &card->dev; + set_disk_ro(md->disk, md->read_only); /* * As discussed on lkml, GENHD_FL_REMOVABLE should: diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index 21adc27..5ec8edd 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c @@ -88,6 +88,7 @@ struct mmc_test_area { * @sectors: amount of sectors to check in one group * @ts: time values of transfer * @rate: calculated transfer rate + * @iops: I/O operations per second (times 100) */ struct mmc_test_transfer_result { struct list_head link; @@ -95,6 +96,7 @@ struct mmc_test_transfer_result { unsigned int sectors; struct timespec ts; unsigned int rate; + unsigned int iops; }; /** @@ -226,9 +228,10 @@ static int mmc_test_wait_busy(struct mmc_test_card *test) if (!busy && mmc_test_busy(&cmd)) { busy = 1; - printk(KERN_INFO "%s: Warning: Host did not " - "wait for busy state to end.\n", - mmc_hostname(test->card->host)); + if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) + printk(KERN_INFO "%s: Warning: Host did not " + "wait for busy state to end.\n", + mmc_hostname(test->card->host)); } } while (mmc_test_busy(&cmd)); @@ -494,7 +497,7 @@ static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts) */ static void mmc_test_save_transfer_result(struct mmc_test_card *test, unsigned int count, unsigned int sectors, struct timespec ts, - unsigned int rate) + unsigned int rate, unsigned int iops) { struct mmc_test_transfer_result *tr; @@ -509,6 +512,7 @@ static void mmc_test_save_transfer_result(struct mmc_test_card *test, tr->sectors = sectors; tr->ts = ts; tr->rate = rate; + tr->iops = iops; list_add_tail(&tr->link, &test->gr->tr_lst); } @@ -519,20 +523,22 @@ static void mmc_test_save_transfer_result(struct mmc_test_card *test, static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes, struct timespec *ts1, struct timespec *ts2) { - unsigned int rate, sectors = bytes >> 9; + unsigned int rate, iops, sectors = bytes >> 9; struct timespec ts; ts = timespec_sub(*ts2, *ts1); rate = mmc_test_rate(bytes, &ts); + iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */ printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu " - "seconds (%u kB/s, %u KiB/s)\n", + "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n", mmc_hostname(test->card->host), sectors, sectors >> 1, (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec, - (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024); + (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024, + iops / 100, iops % 100); - mmc_test_save_transfer_result(test, 1, sectors, ts, rate); + mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops); } /* @@ -542,22 +548,24 @@ static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes, unsigned int count, struct timespec *ts1, struct timespec *ts2) { - unsigned int rate, sectors = bytes >> 9; + unsigned int rate, iops, sectors = bytes >> 9; uint64_t tot = bytes * count; struct timespec ts; ts = timespec_sub(*ts2, *ts1); rate = mmc_test_rate(tot, &ts); + iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */ printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took " - "%lu.%09lu seconds (%u kB/s, %u KiB/s)\n", + "%lu.%09lu seconds (%u kB/s, %u KiB/s, " + "%u.%02u IOPS)\n", mmc_hostname(test->card->host), count, sectors, count, sectors >> 1, (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec, - rate / 1000, rate / 1024); + rate / 1000, rate / 1024, iops / 100, iops % 100); - mmc_test_save_transfer_result(test, count, sectors, ts, rate); + mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops); } /* @@ -1425,28 +1433,29 @@ static int mmc_test_area_cleanup(struct mmc_test_card *test) } /* - * Initialize an area for testing large transfers. The size of the area is the - * preferred erase size which is a good size for optimal transfer speed. Note - * that is typically 4MiB for modern cards. The test area is set to the middle - * of the card because cards may have different charateristics at the front - * (for FAT file system optimization). Optionally, the area is erased (if the - * card supports it) which may improve write performance. Optionally, the area - * is filled with data for subsequent read tests. + * Initialize an area for testing large transfers. The test area is set to the + * middle of the card because cards may have different charateristics at the + * front (for FAT file system optimization). Optionally, the area is erased + * (if the card supports it) which may improve write performance. Optionally, + * the area is filled with data for subsequent read tests. */ static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill) { struct mmc_test_area *t = &test->area; - unsigned long min_sz = 64 * 1024; + unsigned long min_sz = 64 * 1024, sz; int ret; ret = mmc_test_set_blksize(test, 512); if (ret) return ret; - if (test->card->pref_erase > TEST_AREA_MAX_SIZE >> 9) - t->max_sz = TEST_AREA_MAX_SIZE; - else - t->max_sz = (unsigned long)test->card->pref_erase << 9; + /* Make the test area size about 4MiB */ + sz = (unsigned long)test->card->pref_erase << 9; + t->max_sz = sz; + while (t->max_sz < 4 * 1024 * 1024) + t->max_sz += sz; + while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz) + t->max_sz -= sz; t->max_segs = test->card->host->max_segs; t->max_seg_sz = test->card->host->max_seg_size; @@ -1766,6 +1775,188 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test) return 0; } +static unsigned int rnd_next = 1; + +static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt) +{ + uint64_t r; + + rnd_next = rnd_next * 1103515245 + 12345; + r = (rnd_next >> 16) & 0x7fff; + return (r * rnd_cnt) >> 15; +} + +static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print, + unsigned long sz) +{ + unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea; + unsigned int ssz; + struct timespec ts1, ts2, ts; + int ret; + + ssz = sz >> 9; + + rnd_addr = mmc_test_capacity(test->card) / 4; + range1 = rnd_addr / test->card->pref_erase; + range2 = range1 / ssz; + + getnstimeofday(&ts1); + for (cnt = 0; cnt < UINT_MAX; cnt++) { + getnstimeofday(&ts2); + ts = timespec_sub(ts2, ts1); + if (ts.tv_sec >= 10) + break; + ea = mmc_test_rnd_num(range1); + if (ea == last_ea) + ea -= 1; + last_ea = ea; + dev_addr = rnd_addr + test->card->pref_erase * ea + + ssz * mmc_test_rnd_num(range2); + ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0); + if (ret) + return ret; + } + if (print) + mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); + return 0; +} + +static int mmc_test_random_perf(struct mmc_test_card *test, int write) +{ + unsigned int next; + unsigned long sz; + int ret; + + for (sz = 512; sz < test->area.max_tfr; sz <<= 1) { + /* + * When writing, try to get more consistent results by running + * the test twice with exactly the same I/O but outputting the + * results only for the 2nd run. + */ + if (write) { + next = rnd_next; + ret = mmc_test_rnd_perf(test, write, 0, sz); + if (ret) + return ret; + rnd_next = next; + } + ret = mmc_test_rnd_perf(test, write, 1, sz); + if (ret) + return ret; + } + sz = test->area.max_tfr; + if (write) { + next = rnd_next; + ret = mmc_test_rnd_perf(test, write, 0, sz); + if (ret) + return ret; + rnd_next = next; + } + return mmc_test_rnd_perf(test, write, 1, sz); +} + +/* + * Random read performance by transfer size. + */ +static int mmc_test_random_read_perf(struct mmc_test_card *test) +{ + return mmc_test_random_perf(test, 0); +} + +/* + * Random write performance by transfer size. + */ +static int mmc_test_random_write_perf(struct mmc_test_card *test) +{ + return mmc_test_random_perf(test, 1); +} + +static int mmc_test_seq_perf(struct mmc_test_card *test, int write, + unsigned int tot_sz, int max_scatter) +{ + unsigned int dev_addr, i, cnt, sz, ssz; + struct timespec ts1, ts2, ts; + int ret; + + sz = test->area.max_tfr; + /* + * In the case of a maximally scattered transfer, the maximum transfer + * size is further limited by using PAGE_SIZE segments. + */ + if (max_scatter) { + struct mmc_test_area *t = &test->area; + unsigned long max_tfr; + + if (t->max_seg_sz >= PAGE_SIZE) + max_tfr = t->max_segs * PAGE_SIZE; + else + max_tfr = t->max_segs * t->max_seg_sz; + if (sz > max_tfr) + sz = max_tfr; + } + + ssz = sz >> 9; + dev_addr = mmc_test_capacity(test->card) / 4; + if (tot_sz > dev_addr << 9) + tot_sz = dev_addr << 9; + cnt = tot_sz / sz; + dev_addr &= 0xffff0000; /* Round to 64MiB boundary */ + + getnstimeofday(&ts1); + for (i = 0; i < cnt; i++) { + ret = mmc_test_area_io(test, sz, dev_addr, write, + max_scatter, 0); + if (ret) + return ret; + dev_addr += ssz; + } + getnstimeofday(&ts2); + + ts = timespec_sub(ts2, ts1); + mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); + + return 0; +} + +static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write) +{ + int ret, i; + + for (i = 0; i < 10; i++) { + ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1); + if (ret) + return ret; + } + for (i = 0; i < 5; i++) { + ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1); + if (ret) + return ret; + } + for (i = 0; i < 3; i++) { + ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1); + if (ret) + return ret; + } + + return ret; +} + +/* + * Large sequential read performance. + */ +static int mmc_test_large_seq_read_perf(struct mmc_test_card *test) +{ + return mmc_test_large_seq_perf(test, 0); +} + +/* + * Large sequential write performance. + */ +static int mmc_test_large_seq_write_perf(struct mmc_test_card *test) +{ + return mmc_test_large_seq_perf(test, 1); +} + static const struct mmc_test_case mmc_test_cases[] = { { .name = "Basic write (no data verification)", @@ -2005,6 +2196,34 @@ static const struct mmc_test_case mmc_test_cases[] = { .cleanup = mmc_test_area_cleanup, }, + { + .name = "Random read performance by transfer size", + .prepare = mmc_test_area_prepare, + .run = mmc_test_random_read_perf, + .cleanup = mmc_test_area_cleanup, + }, + + { + .name = "Random write performance by transfer size", + .prepare = mmc_test_area_prepare, + .run = mmc_test_random_write_perf, + .cleanup = mmc_test_area_cleanup, + }, + + { + .name = "Large sequential read into scattered pages", + .prepare = mmc_test_area_prepare, + .run = mmc_test_large_seq_read_perf, + .cleanup = mmc_test_area_cleanup, + }, + + { + .name = "Large sequential write from scattered pages", + .prepare = mmc_test_area_prepare, + .run = mmc_test_large_seq_write_perf, + .cleanup = mmc_test_area_cleanup, + }, + }; static DEFINE_MUTEX(mmc_test_lock); @@ -2148,11 +2367,11 @@ static int mtf_test_show(struct seq_file *sf, void *data) seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result); list_for_each_entry(tr, &gr->tr_lst, link) { - seq_printf(sf, "%u %d %lu.%09lu %u\n", + seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n", tr->count, tr->sectors, (unsigned long)tr->ts.tv_sec, (unsigned long)tr->ts.tv_nsec, - tr->rate); + tr->rate, tr->iops / 100, tr->iops % 100); } } |