Compare commits
9 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0ceb01480f | ||
|
|
21b9118757 | ||
|
|
4e257da1a3 | ||
|
|
7c1985400a | ||
|
|
b476f7e51b | ||
|
|
350e7624ec | ||
|
|
bcfa556a36 | ||
|
|
570c30e820 | ||
|
|
4ee8f5c74f |
@@ -171,6 +171,34 @@ require a large amount of refactoring, e.g. with more use of pytest fixtures.
|
||||
The code-coverage tests are omitted since they cannot run in parallel due to a
|
||||
Python limitation.
|
||||
|
||||
Parallel C unit tests
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The ``ut`` command supports distributing tests across multiple sandbox
|
||||
instances using the ``-P`` flag. This is useful when running tests directly
|
||||
from the command line without pytest.
|
||||
|
||||
To run tests in parallel across 4 workers::
|
||||
|
||||
# Terminal 1
|
||||
/tmp/b/sandbox/u-boot -T -c "ut -P4:0 dm"
|
||||
|
||||
# Terminal 2
|
||||
/tmp/b/sandbox/u-boot -T -c "ut -P4:1 dm"
|
||||
|
||||
# Terminal 3
|
||||
/tmp/b/sandbox/u-boot -T -c "ut -P4:2 dm"
|
||||
|
||||
# Terminal 4
|
||||
/tmp/b/sandbox/u-boot -T -c "ut -P4:3 dm"
|
||||
|
||||
The format is ``-P<n>:<w>`` where ``n`` is the total number of workers and
|
||||
``w`` is this worker's ID (0 to n-1). Tests are distributed by index modulo
|
||||
the number of workers, so each worker runs a disjoint subset.
|
||||
|
||||
This can be combined with other flags, e.g. ``-EP4:0`` to emit result lines
|
||||
while running as worker 0 of 4.
|
||||
|
||||
|
||||
Testing under a debugger
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@@ -246,6 +274,13 @@ Command-line options
|
||||
sets the directory used to store persistent test data. This is test data that
|
||||
may be re-used across test runs, such as file-system images.
|
||||
|
||||
-P, --persist
|
||||
prevents cleanup of test-generated files like disk images after the test run
|
||||
completes. This is useful when iterating on C test code, allowing you to
|
||||
re-run the C tests without re-running the Python fixture that creates the
|
||||
test images. Note that this must be individually supported by each test, e.g.
|
||||
with a check against u_boot_config.persist before removing images.
|
||||
|
||||
--timing
|
||||
shows a histogram of test duration, at the end of the run. The columns are:
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ Synopsis
|
||||
|
||||
::
|
||||
|
||||
ut [-r<runs>] [-f] [-R] [-I<n>:<one_test>] [<suite> | all [<test>]] [<args>...]
|
||||
ut [-Efmr<runs>] [-R] [-I<n>:<one_test>] [-P<n>:<w>] [<suite> | all [<test>]] [<args>...]
|
||||
ut [-s] info
|
||||
|
||||
Description
|
||||
@@ -26,17 +26,30 @@ suite
|
||||
test
|
||||
Speciifes a particular test to run, within a suite, or all suites
|
||||
|
||||
-f
|
||||
Forces running of a manual test.
|
||||
-E
|
||||
Emit a result line after each test, in the format
|
||||
`Result: PASS|FAIL|SKIP: <test_name>: <file>`. This is useful for
|
||||
automated parsing of test results.
|
||||
|
||||
-f, -m
|
||||
Force running of manual tests. Manual tests have the `_norun` suffix and
|
||||
are normally skipped because they require external setup (e.g., creating
|
||||
disk images from Python/pytest).
|
||||
|
||||
-r <n>
|
||||
Specifies the number of types to run each test
|
||||
Specifies the number of times to run each test
|
||||
|
||||
-I <n>:<one_test>
|
||||
Test to run after <n> other tests have run. This is used to find which test
|
||||
causes another test to fail. If the one test fails, testing stops
|
||||
immediately.
|
||||
|
||||
-P <n>:<w>
|
||||
Run as worker `<w>` of `<n>` parallel workers. Tests are distributed by
|
||||
index modulo number of workers, so each worker runs a disjoint subset of
|
||||
tests. This allows running tests in parallel across multiple sandbox
|
||||
instances.
|
||||
|
||||
-R
|
||||
Preserve console recording on test failure. Normally when a test fails,
|
||||
console recording is disabled so error messages go directly to output.
|
||||
@@ -77,6 +90,23 @@ To specify a list of suites to run, <suites> can also be a comma-separated list.
|
||||
See :ref:`develop/tests_writing:writing c tests` for more information on how to
|
||||
write unit tests.
|
||||
|
||||
Return Value
|
||||
------------
|
||||
|
||||
The `ut` command returns 0 (success) if all tests pass, or 1 (failure) if any
|
||||
test fails.
|
||||
|
||||
Skipped tests do not cause a failure return. Tests may be skipped for several
|
||||
reasons:
|
||||
|
||||
- Manual tests (with `_norun` suffix) are skipped unless `-f` or `-m` is used
|
||||
- Tests requiring features not available on the current platform (e.g.,
|
||||
`UTF_OTHER_FDT` on non-sandbox, console recording disabled)
|
||||
- Tests that explicitly request to be skipped by returning `-EAGAIN`
|
||||
|
||||
To detect skipped tests programmatically, use the `-E` flag and check for
|
||||
`Result: SKIP:` lines in the output.
|
||||
|
||||
ut all
|
||||
~~~~~~
|
||||
|
||||
|
||||
@@ -170,11 +170,20 @@ static int sandbox_mmc_probe(struct udevice *dev)
|
||||
int ret;
|
||||
|
||||
if (plat->fname) {
|
||||
ret = os_map_file(plat->fname, OS_O_RDWR | OS_O_CREAT,
|
||||
const char *fname = plat->fname;
|
||||
char buf[256];
|
||||
|
||||
/*
|
||||
* Try persistent data directory first, then fall back to the
|
||||
* filename as given (for absolute paths or current directory)
|
||||
*/
|
||||
if (!os_persistent_file(buf, sizeof(buf), plat->fname))
|
||||
fname = buf;
|
||||
ret = os_map_file(fname, OS_O_RDWR | OS_O_CREAT,
|
||||
(void **)&priv->buf, &priv->size);
|
||||
if (ret) {
|
||||
log_err("%s: Unable to map file '%s'\n", dev->name,
|
||||
plat->fname);
|
||||
fname);
|
||||
return ret;
|
||||
}
|
||||
priv->csize = priv->size / SIZE_MULTIPLE - 1;
|
||||
|
||||
@@ -126,7 +126,9 @@ static int sandbox_sf_probe(struct udevice *dev)
|
||||
struct dm_spi_slave_plat *slave_plat;
|
||||
struct udevice *bus = dev->parent;
|
||||
const char *spec = NULL;
|
||||
const char *filename;
|
||||
struct udevice *emul;
|
||||
char buf[256];
|
||||
int ret = 0;
|
||||
int cs = -1;
|
||||
|
||||
@@ -170,10 +172,16 @@ static int sandbox_sf_probe(struct udevice *dev)
|
||||
if (sandbox_sf_0xff[0] == 0x00)
|
||||
memset(sandbox_sf_0xff, 0xff, sizeof(sandbox_sf_0xff));
|
||||
|
||||
sbsf->fd = os_open(pdata->filename, 02);
|
||||
/*
|
||||
* Try persistent data directory first, then fall back to the
|
||||
* filename as given (for absolute paths or current directory)
|
||||
*/
|
||||
filename = pdata->filename;
|
||||
if (!os_persistent_file(buf, sizeof(buf), pdata->filename))
|
||||
filename = buf;
|
||||
sbsf->fd = os_open(filename, 02);
|
||||
if (sbsf->fd < 0) {
|
||||
printf("%s: unable to open file '%s'\n", __func__,
|
||||
pdata->filename);
|
||||
log_err("Unable to open file '%s'\n", filename);
|
||||
ret = -EIO;
|
||||
goto error;
|
||||
}
|
||||
|
||||
@@ -104,9 +104,18 @@ static int sandbox_scsi_probe(struct udevice *dev)
|
||||
info->block_size = SANDBOX_SCSI_BLOCK_LEN;
|
||||
|
||||
if (priv->pathname) {
|
||||
priv->fd = os_open(priv->pathname, OS_O_RDONLY);
|
||||
const char *pathname = priv->pathname;
|
||||
char buf[256];
|
||||
|
||||
/*
|
||||
* Try persistent data directory first, then fall back to the
|
||||
* pathname as given (for absolute paths or current directory)
|
||||
*/
|
||||
if (!os_persistent_file(buf, sizeof(buf), priv->pathname))
|
||||
pathname = buf;
|
||||
priv->fd = os_open(pathname, OS_O_RDONLY);
|
||||
if (priv->fd >= 0) {
|
||||
ret = os_get_filesize(priv->pathname, &info->file_size);
|
||||
ret = os_get_filesize(pathname, &info->file_size);
|
||||
if (ret)
|
||||
return log_msg_ret("sz", ret);
|
||||
}
|
||||
|
||||
@@ -339,11 +339,19 @@ static int sandbox_flash_probe(struct udevice *dev)
|
||||
struct sandbox_flash_plat *plat = dev_get_plat(dev);
|
||||
struct sandbox_flash_priv *priv = dev_get_priv(dev);
|
||||
struct scsi_emul_info *info = &priv->eminfo;
|
||||
const char *pathname = plat->pathname;
|
||||
char buf[256];
|
||||
int ret;
|
||||
|
||||
priv->fd = os_open(plat->pathname, OS_O_RDWR);
|
||||
/*
|
||||
* Try persistent data directory first, then fall back to the
|
||||
* pathname as given (for absolute paths or current directory)
|
||||
*/
|
||||
if (!os_persistent_file(buf, sizeof(buf), plat->pathname))
|
||||
pathname = buf;
|
||||
priv->fd = os_open(pathname, OS_O_RDWR);
|
||||
if (priv->fd >= 0) {
|
||||
ret = os_get_filesize(plat->pathname, &info->file_size);
|
||||
ret = os_get_filesize(pathname, &info->file_size);
|
||||
if (ret)
|
||||
return log_msg_ret("sz", ret);
|
||||
}
|
||||
|
||||
@@ -89,6 +89,8 @@ struct ut_arg {
|
||||
* @of_other: Live tree for the other FDT
|
||||
* @runs_per_test: Number of times to run each test (typically 1)
|
||||
* @force_run: true to run tests marked with the UTF_MANUAL flag
|
||||
* @workers: Number of parallel workers, 0 if not sharding tests
|
||||
* @worker_id: ID of this worker (0 to workers-1)
|
||||
* @old_bloblist: stores the old gd->bloblist pointer
|
||||
* @soft_fail: continue execution of the test even after it fails
|
||||
* @expect_str: Temporary string used to hold expected string value
|
||||
@@ -97,6 +99,7 @@ struct ut_arg {
|
||||
* @arg_count: Number of parsed arguments
|
||||
* @arg_error: Set if ut_str/int/bool() detects a type mismatch
|
||||
* @keep_record: Preserve console recording when ut_fail() is called
|
||||
* @emit_result: Emit result line after each test completes
|
||||
* @priv: Private data for tests to use as needed
|
||||
*/
|
||||
struct unit_test_state {
|
||||
@@ -120,6 +123,8 @@ struct unit_test_state {
|
||||
struct device_node *of_other;
|
||||
int runs_per_test;
|
||||
bool force_run;
|
||||
int workers;
|
||||
int worker_id;
|
||||
void *old_bloblist;
|
||||
bool soft_fail;
|
||||
char expect_str[1024];
|
||||
@@ -128,6 +133,7 @@ struct unit_test_state {
|
||||
int arg_count;
|
||||
bool arg_error;
|
||||
bool keep_record;
|
||||
bool emit_result;
|
||||
char priv[UT_PRIV_SIZE];
|
||||
};
|
||||
|
||||
|
||||
@@ -255,7 +255,9 @@ static int do_ut(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[])
|
||||
bool show_suites = false;
|
||||
bool force_run = false;
|
||||
bool keep_record = false;
|
||||
bool emit_result = false;
|
||||
int runs_per_text = 1;
|
||||
int workers = 0, worker_id = 0;
|
||||
struct suite *ste;
|
||||
char *name;
|
||||
int ret;
|
||||
@@ -267,25 +269,41 @@ static int do_ut(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[])
|
||||
while (argc > 0 && *argv[0] == '-') {
|
||||
const char *str = argv[0];
|
||||
|
||||
switch (str[1]) {
|
||||
case 'r':
|
||||
runs_per_text = dectoul(str + 2, NULL);
|
||||
break;
|
||||
case 'f':
|
||||
force_run = true;
|
||||
break;
|
||||
case 'I':
|
||||
test_insert = str + 2;
|
||||
if (!strchr(test_insert, ':'))
|
||||
return CMD_RET_USAGE;
|
||||
break;
|
||||
case 'R':
|
||||
keep_record = true;
|
||||
break;
|
||||
case 's':
|
||||
show_suites = true;
|
||||
break;
|
||||
for (str++; *str; str++) {
|
||||
switch (*str) {
|
||||
case 'E':
|
||||
emit_result = true;
|
||||
break;
|
||||
case 'r':
|
||||
runs_per_text = dectoul(str + 1, NULL);
|
||||
goto next_arg;
|
||||
case 'f':
|
||||
case 'm':
|
||||
force_run = true;
|
||||
break;
|
||||
case 'I':
|
||||
test_insert = str + 1;
|
||||
if (!strchr(test_insert, ':'))
|
||||
return CMD_RET_USAGE;
|
||||
goto next_arg;
|
||||
case 'P': {
|
||||
const char *colon = strchr(str + 1, ':');
|
||||
|
||||
if (!colon)
|
||||
return CMD_RET_USAGE;
|
||||
workers = dectoul(str + 1, NULL);
|
||||
worker_id = dectoul(colon + 1, NULL);
|
||||
goto next_arg;
|
||||
}
|
||||
case 'R':
|
||||
keep_record = true;
|
||||
break;
|
||||
case 's':
|
||||
show_suites = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
next_arg:
|
||||
argv++;
|
||||
argc--;
|
||||
}
|
||||
@@ -295,6 +313,9 @@ static int do_ut(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[])
|
||||
|
||||
ut_init_state(&uts);
|
||||
uts.keep_record = keep_record;
|
||||
uts.emit_result = emit_result;
|
||||
uts.workers = workers;
|
||||
uts.worker_id = worker_id;
|
||||
name = argv[0];
|
||||
select_name = cmd_arg1(argc, argv);
|
||||
|
||||
@@ -340,10 +361,13 @@ static int do_ut(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[])
|
||||
}
|
||||
|
||||
U_BOOT_LONGHELP(ut,
|
||||
"[-rs] [-f] [-R] [-I<n>:<one_test>] <suite> [<test> [<args>...]] - run unit tests\n"
|
||||
"[-Efmrs] [-R] [-I<n>:<one_test>] [-P<n>:<w>] <suite> [<test> [<args>...]]\n"
|
||||
" - run unit tests\n"
|
||||
" -E Emit result line after each test\n"
|
||||
" -r<runs> Number of times to run each test\n"
|
||||
" -f Force 'manual' tests to run as well\n"
|
||||
" -f/-m Force 'manual' tests to run as well\n"
|
||||
" -I Test to run after <n> other tests have run\n"
|
||||
" -P<n>:<w> Run as worker <w> of <n> parallel workers\n"
|
||||
" -R Preserve console recording on test failure\n"
|
||||
" -s Show all suites with ut info\n"
|
||||
" <suite> Test suite to run (or comma-separated list)\n"
|
||||
|
||||
@@ -27,10 +27,12 @@ static int dm_test_spi_flash(struct unit_test_state *uts)
|
||||
uint map_size;
|
||||
ulong map_base;
|
||||
uint offset;
|
||||
char pathname[256];
|
||||
int i;
|
||||
|
||||
src = map_sysmem(0x20000, full_size);
|
||||
ut_assertok(os_write_file("spi.bin", src, full_size));
|
||||
ut_assertok(os_persistent_file(pathname, sizeof(pathname), "spi.bin"));
|
||||
ut_assertok(os_write_file(pathname, src, full_size));
|
||||
ut_assertok(uclass_first_device_err(UCLASS_SPI_FLASH, &dev));
|
||||
|
||||
dst = map_sysmem(0x20000 + full_size, full_size);
|
||||
|
||||
@@ -100,6 +100,8 @@ def pytest_addoption(parser):
|
||||
help="Assume that U-Boot is ready and don't wait for a prompt")
|
||||
parser.addoption('--timing', default=False, action='store_true',
|
||||
help='Show info on test timing')
|
||||
parser.addoption('-P', '--persist', default=False, action='store_true',
|
||||
help='Persist test artifacts (do not clean up after tests)')
|
||||
|
||||
|
||||
def run_build(config, source_dir, build_dir, board_type, log):
|
||||
@@ -346,6 +348,7 @@ def pytest_configure(config):
|
||||
ubconfig.dtb = build_dir + '/arch/sandbox/dts/test.dtb'
|
||||
ubconfig.connection_ok = True
|
||||
ubconfig.timing = config.getoption('timing')
|
||||
ubconfig.persist = config.getoption('persist')
|
||||
ubconfig.role = config.getoption('role')
|
||||
ubconfig.allow_exceptions = config.getoption('allow_exceptions')
|
||||
|
||||
|
||||
@@ -485,6 +485,33 @@ class ConsoleBase():
|
||||
output.append(self.run_command(cmd))
|
||||
return output
|
||||
|
||||
def run_ut(self, suite, test, **kwargs):
|
||||
"""Run a manual unit test
|
||||
|
||||
Run a unit test that has the _norun suffix, meaning it requires
|
||||
external setup (like creating a disk image) before it can run.
|
||||
|
||||
Args:
|
||||
suite (str): Test suite name (e.g., 'fs')
|
||||
test (str): Test name without _norun suffix
|
||||
(e.g., 'fs_test_ext4l_probe')
|
||||
**kwargs: Test arguments passed as key=value
|
||||
(e.g., fs_image='/path/to/img')
|
||||
|
||||
Returns:
|
||||
str: Command output
|
||||
|
||||
Raises:
|
||||
AssertionError: If test reports failures
|
||||
"""
|
||||
args = ' '.join(f'{k}={v}' for k, v in kwargs.items())
|
||||
cmd = f'ut -f {suite} {test}_norun'
|
||||
if args:
|
||||
cmd += f' {args}'
|
||||
output = self.run_command(cmd)
|
||||
assert 'failures: 0' in output, f'Test {test} failed'
|
||||
return output
|
||||
|
||||
def send(self, msg):
|
||||
"""Send characters without waiting for echo, etc."""
|
||||
self.run_command(msg, wait_for_prompt=False, wait_for_echo=False,
|
||||
|
||||
@@ -126,7 +126,7 @@ booti ${kernel_addr_r} ${ramdisk_addr_r} ${fdt_addr_r}
|
||||
utils.run_and_log_no_ubman(log, f'echo here {kernel} {symlink}')
|
||||
os.symlink(kernel, symlink)
|
||||
fsh.mk_fs()
|
||||
img = DiskHelper(config, mmc_dev, 'mmc', True)
|
||||
img = DiskHelper(config, mmc_dev, 'mmc')
|
||||
img.add_fs(fsh, DiskHelper.EXT4)
|
||||
img.create()
|
||||
fsh.cleanup()
|
||||
|
||||
@@ -84,7 +84,7 @@ def setup_extlinux_image(config, log, devnum, basename, vmlinux, initrd, dtbdir,
|
||||
|
||||
fsh.mk_fs()
|
||||
|
||||
img = DiskHelper(config, devnum, basename, True)
|
||||
img = DiskHelper(config, devnum, basename)
|
||||
img.add_fs(fsh, DiskHelper.VFAT, bootable=True)
|
||||
|
||||
ext4 = FsHelper(config, 'ext4', max(1, part2_size - 30), prefix=basename,
|
||||
|
||||
@@ -31,7 +31,7 @@ def setup_efi_image(config):
|
||||
|
||||
fsh.mk_fs()
|
||||
|
||||
img = DiskHelper(config, devnum, 'flash', True)
|
||||
img = DiskHelper(config, devnum, 'flash')
|
||||
img.add_fs(fsh, DiskHelper.VFAT)
|
||||
img.create()
|
||||
fsh.cleanup()
|
||||
|
||||
@@ -261,9 +261,7 @@ def test_fit_print(ubman):
|
||||
build_test_fit(ubman, fit)
|
||||
|
||||
# Run the C test which will load and verify this FIT
|
||||
ubman.run_command('ut -f bootstd test_fit_print_norun')
|
||||
result = ubman.run_command('echo $?')
|
||||
assert '0' == result
|
||||
ubman.run_ut('bootstd', 'test_fit_print')
|
||||
|
||||
|
||||
@pytest.mark.boardspec('sandbox')
|
||||
@@ -279,9 +277,7 @@ def test_fit_print_no_desc(ubman):
|
||||
utils.run_and_log(ubman, ['fdtput', '-d', fit, '/', 'description'])
|
||||
|
||||
# Run the C test to check the missing description
|
||||
ubman.run_command('ut -f bootstd test_fit_print_no_desc_norun')
|
||||
result = ubman.run_command('echo $?')
|
||||
assert '0' == result
|
||||
ubman.run_ut('bootstd', 'test_fit_print_no_desc')
|
||||
|
||||
@pytest.mark.boardspec('sandbox')
|
||||
@pytest.mark.buildconfigspec('fit_print')
|
||||
|
||||
@@ -16,39 +16,6 @@ from fstest_defs import SMALL_FILE, BIG_FILE
|
||||
from fstest_helpers import assert_fs_integrity
|
||||
|
||||
|
||||
def run_c_test(ubman, fs_type, fs_img, test_name, small=None, big=None,
|
||||
md5val=None):
|
||||
"""Run a C unit test with proper setup.
|
||||
|
||||
Args:
|
||||
ubman (ConsoleBase): U-Boot console manager
|
||||
fs_type (str): Filesystem type (ext4, fat, fs_generic, exfat)
|
||||
fs_img (str): Path to filesystem image
|
||||
test_name (str): Name of C test function (without _norun suffix)
|
||||
small (str): Filename of small test file (optional)
|
||||
big (str): Filename of big test file (optional)
|
||||
md5val (str): Expected MD5 value for verification (optional)
|
||||
|
||||
Returns:
|
||||
bool: True if test passed, False otherwise
|
||||
"""
|
||||
# Build the command with arguments
|
||||
cmd = f'ut -f fs {test_name}_norun fs_type={fs_type} fs_image={fs_img}'
|
||||
if small:
|
||||
cmd += f' small={small}'
|
||||
if big:
|
||||
cmd += f' big={big}'
|
||||
if md5val:
|
||||
cmd += f' md5val={md5val}'
|
||||
|
||||
# Run the C test
|
||||
ubman.run_command(cmd)
|
||||
|
||||
# Check result
|
||||
result = ubman.run_command('echo $?')
|
||||
return result.strip() == '0'
|
||||
|
||||
|
||||
@pytest.mark.boardspec('sandbox')
|
||||
@pytest.mark.slow
|
||||
class TestFsBasic:
|
||||
@@ -58,94 +25,92 @@ class TestFsBasic:
|
||||
"""Test Case 1 - ls command, listing root and invalid directories"""
|
||||
fs_type, fs_img, _ = fs_obj_basic
|
||||
with ubman.log.section('Test Case 1 - ls'):
|
||||
assert run_c_test(ubman, fs_type, fs_img, 'fs_test_ls',
|
||||
small=SMALL_FILE, big=BIG_FILE)
|
||||
ubman.run_ut('fs', 'fs_test_ls', fs_type=fs_type, fs_image=fs_img,
|
||||
small=SMALL_FILE, big=BIG_FILE)
|
||||
|
||||
def test_fs2(self, ubman, fs_obj_basic):
|
||||
"""Test Case 2 - size command for a small file"""
|
||||
fs_type, fs_img, _ = fs_obj_basic
|
||||
with ubman.log.section('Test Case 2 - size (small)'):
|
||||
assert run_c_test(ubman, fs_type, fs_img, 'fs_test_size_small',
|
||||
small=SMALL_FILE)
|
||||
ubman.run_ut('fs', 'fs_test_size_small', fs_type=fs_type,
|
||||
fs_image=fs_img, small=SMALL_FILE)
|
||||
|
||||
def test_fs3(self, ubman, fs_obj_basic):
|
||||
"""Test Case 3 - size command for a large file"""
|
||||
fs_type, fs_img, _ = fs_obj_basic
|
||||
with ubman.log.section('Test Case 3 - size (large)'):
|
||||
assert run_c_test(ubman, fs_type, fs_img, 'fs_test_size_big',
|
||||
big=BIG_FILE)
|
||||
ubman.run_ut('fs', 'fs_test_size_big', fs_type=fs_type,
|
||||
fs_image=fs_img, big=BIG_FILE)
|
||||
|
||||
def test_fs4(self, ubman, fs_obj_basic):
|
||||
"""Test Case 4 - load a small file, 1MB"""
|
||||
fs_type, fs_img, md5val = fs_obj_basic
|
||||
with ubman.log.section('Test Case 4 - load (small)'):
|
||||
assert run_c_test(ubman, fs_type, fs_img, 'fs_test_load_small',
|
||||
small=SMALL_FILE, md5val=md5val[0])
|
||||
ubman.run_ut('fs', 'fs_test_load_small', fs_type=fs_type,
|
||||
fs_image=fs_img, small=SMALL_FILE, md5val=md5val[0])
|
||||
|
||||
def test_fs5(self, ubman, fs_obj_basic):
|
||||
"""Test Case 5 - load, reading first 1MB of 3GB file"""
|
||||
fs_type, fs_img, md5val = fs_obj_basic
|
||||
with ubman.log.section('Test Case 5 - load (first 1MB)'):
|
||||
assert run_c_test(ubman, fs_type, fs_img, 'fs_test_load_big_first',
|
||||
big=BIG_FILE, md5val=md5val[1])
|
||||
ubman.run_ut('fs', 'fs_test_load_big_first', fs_type=fs_type,
|
||||
fs_image=fs_img, big=BIG_FILE, md5val=md5val[1])
|
||||
|
||||
def test_fs6(self, ubman, fs_obj_basic):
|
||||
"""Test Case 6 - load, reading last 1MB of 3GB file"""
|
||||
fs_type, fs_img, md5val = fs_obj_basic
|
||||
with ubman.log.section('Test Case 6 - load (last 1MB)'):
|
||||
assert run_c_test(ubman, fs_type, fs_img, 'fs_test_load_big_last',
|
||||
big=BIG_FILE, md5val=md5val[2])
|
||||
ubman.run_ut('fs', 'fs_test_load_big_last', fs_type=fs_type,
|
||||
fs_image=fs_img, big=BIG_FILE, md5val=md5val[2])
|
||||
|
||||
def test_fs7(self, ubman, fs_obj_basic):
|
||||
"""Test Case 7 - load, 1MB from the last 1MB in 2GB"""
|
||||
fs_type, fs_img, md5val = fs_obj_basic
|
||||
with ubman.log.section('Test Case 7 - load (last 1MB in 2GB)'):
|
||||
assert run_c_test(ubman, fs_type, fs_img,
|
||||
'fs_test_load_big_2g_last',
|
||||
big=BIG_FILE, md5val=md5val[3])
|
||||
ubman.run_ut('fs', 'fs_test_load_big_2g_last', fs_type=fs_type,
|
||||
fs_image=fs_img, big=BIG_FILE, md5val=md5val[3])
|
||||
|
||||
def test_fs8(self, ubman, fs_obj_basic):
|
||||
"""Test Case 8 - load, reading first 1MB in 2GB"""
|
||||
fs_type, fs_img, md5val = fs_obj_basic
|
||||
with ubman.log.section('Test Case 8 - load (first 1MB in 2GB)'):
|
||||
assert run_c_test(ubman, fs_type, fs_img,
|
||||
'fs_test_load_big_2g_first',
|
||||
big=BIG_FILE, md5val=md5val[4])
|
||||
ubman.run_ut('fs', 'fs_test_load_big_2g_first', fs_type=fs_type,
|
||||
fs_image=fs_img, big=BIG_FILE, md5val=md5val[4])
|
||||
|
||||
def test_fs9(self, ubman, fs_obj_basic):
|
||||
"""Test Case 9 - load, 1MB crossing 2GB boundary"""
|
||||
fs_type, fs_img, md5val = fs_obj_basic
|
||||
with ubman.log.section('Test Case 9 - load (crossing 2GB boundary)'):
|
||||
assert run_c_test(ubman, fs_type, fs_img,
|
||||
'fs_test_load_big_2g_cross',
|
||||
big=BIG_FILE, md5val=md5val[5])
|
||||
ubman.run_ut('fs', 'fs_test_load_big_2g_cross', fs_type=fs_type,
|
||||
fs_image=fs_img, big=BIG_FILE, md5val=md5val[5])
|
||||
|
||||
def test_fs10(self, ubman, fs_obj_basic):
|
||||
"""Test Case 10 - load, reading beyond file end"""
|
||||
fs_type, fs_img, _ = fs_obj_basic
|
||||
with ubman.log.section('Test Case 10 - load (beyond file end)'):
|
||||
assert run_c_test(ubman, fs_type, fs_img, 'fs_test_load_beyond',
|
||||
big=BIG_FILE)
|
||||
ubman.run_ut('fs', 'fs_test_load_beyond', fs_type=fs_type,
|
||||
fs_image=fs_img, big=BIG_FILE)
|
||||
|
||||
def test_fs11(self, ubman, fs_obj_basic):
|
||||
"""Test Case 11 - write"""
|
||||
fs_type, fs_img, md5val = fs_obj_basic
|
||||
with ubman.log.section('Test Case 11 - write'):
|
||||
assert run_c_test(ubman, fs_type, fs_img, 'fs_test_write',
|
||||
small=SMALL_FILE, md5val=md5val[0])
|
||||
ubman.run_ut('fs', 'fs_test_write', fs_type=fs_type,
|
||||
fs_image=fs_img, small=SMALL_FILE, md5val=md5val[0])
|
||||
assert_fs_integrity(fs_type, fs_img)
|
||||
|
||||
def test_fs12(self, ubman, fs_obj_basic):
|
||||
"""Test Case 12 - write to "." directory"""
|
||||
fs_type, fs_img, _ = fs_obj_basic
|
||||
with ubman.log.section('Test Case 12 - write (".")'):
|
||||
assert run_c_test(ubman, fs_type, fs_img, 'fs_test_write_dot')
|
||||
ubman.run_ut('fs', 'fs_test_write_dot', fs_type=fs_type,
|
||||
fs_image=fs_img)
|
||||
assert_fs_integrity(fs_type, fs_img)
|
||||
|
||||
def test_fs13(self, ubman, fs_obj_basic):
|
||||
"""Test Case 13 - write to a file with '/./<filename>'"""
|
||||
fs_type, fs_img, md5val = fs_obj_basic
|
||||
with ubman.log.section('Test Case 13 - write ("./<file>")'):
|
||||
assert run_c_test(ubman, fs_type, fs_img, 'fs_test_write_dotpath',
|
||||
small=SMALL_FILE, md5val=md5val[0])
|
||||
ubman.run_ut('fs', 'fs_test_write_dotpath', fs_type=fs_type,
|
||||
fs_image=fs_img, small=SMALL_FILE, md5val=md5val[0])
|
||||
assert_fs_integrity(fs_type, fs_img)
|
||||
|
||||
@@ -65,8 +65,8 @@ class TestExt4l:
|
||||
|
||||
yield image_path
|
||||
|
||||
# Cleanup
|
||||
if os.path.exists(image_path):
|
||||
# Cleanup (skip if --persist flag is set)
|
||||
if not u_boot_config.persist and os.path.exists(image_path):
|
||||
os.remove(image_path)
|
||||
|
||||
def test_probe(self, ubman, ext4_image):
|
||||
|
||||
@@ -33,5 +33,4 @@ def test_upl_handoff(ubman):
|
||||
assert 'UPL state: active' == output
|
||||
|
||||
# Check the FIT offsets look correct
|
||||
output = ubman.run_command('ut upl -f upl_test_info_norun')
|
||||
assert 'failures: 0' in output
|
||||
ubman.run_ut('upl', 'upl_test_info')
|
||||
|
||||
@@ -34,21 +34,22 @@ from img.localboot import setup_localboot_image
|
||||
def test_ut_dm_init(ubman):
|
||||
"""Initialize data for ut dm tests."""
|
||||
|
||||
fn = ubman.config.source_dir + '/testflash.bin'
|
||||
# This is used by flash-stick@0 in test.py
|
||||
fn = ubman.config.persistent_data_dir + '/testflash.bin'
|
||||
if not os.path.exists(fn):
|
||||
data = b'this is a test'
|
||||
data += b'\x00' * ((4 * 1024 * 1024) - len(data))
|
||||
with open(fn, 'wb') as fh:
|
||||
fh.write(data)
|
||||
|
||||
fn = ubman.config.source_dir + '/spi.bin'
|
||||
fn = ubman.config.persistent_data_dir + '/spi.bin'
|
||||
if not os.path.exists(fn):
|
||||
data = b'\x00' * (2 * 1024 * 1024)
|
||||
with open(fn, 'wb') as fh:
|
||||
fh.write(data)
|
||||
|
||||
# Create a file with a single partition
|
||||
fn = ubman.config.source_dir + '/scsi.img'
|
||||
# Create a file with a single partition (used by /scsi in test.dts) */
|
||||
fn = ubman.config.persistent_data_dir + '/scsi.img'
|
||||
if not os.path.exists(fn):
|
||||
data = b'\x00' * (2 * 1024 * 1024)
|
||||
with open(fn, 'wb') as fh:
|
||||
@@ -56,11 +57,13 @@ def test_ut_dm_init(ubman):
|
||||
utils.run_and_log(
|
||||
ubman, f'sfdisk {fn}', stdin=b'type=83')
|
||||
|
||||
# These two are used by test/dm/host.c
|
||||
FsHelper(ubman.config, 'ext2', 2, '2MB').mk_fs()
|
||||
FsHelper(ubman.config, 'fat32', 1, '1MB').mk_fs()
|
||||
|
||||
# This is used by test/cmd/mbr.c
|
||||
mmc_dev = 6
|
||||
fn = os.path.join(ubman.config.source_dir, f'mmc{mmc_dev}.img')
|
||||
fn = os.path.join(ubman.config.persistent_data_dir, f'mmc{mmc_dev}.img')
|
||||
data = b'\x00' * (12 * 1024 * 1024)
|
||||
with open(fn, 'wb') as fh:
|
||||
fh.write(data)
|
||||
|
||||
@@ -127,9 +127,7 @@ def test_vbe_extlinux_fit_no_oem(ubman):
|
||||
fname = os.path.join(ubman.config.persistent_data_dir, 'vbe0.img')
|
||||
ubman.run_command(f'host bind 0 {fname}')
|
||||
|
||||
ubman.run_command('ut -f bootstd vbe_test_abrec_no_oem_norun')
|
||||
result = ubman.run_command('echo $?')
|
||||
assert '0' == result
|
||||
ubman.run_ut('bootstd', 'vbe_test_abrec_no_oem')
|
||||
|
||||
@pytest.mark.boardspec('sandbox')
|
||||
def test_vbe_extlinux_fit_oem(ubman):
|
||||
@@ -137,6 +135,4 @@ def test_vbe_extlinux_fit_oem(ubman):
|
||||
fname = os.path.join(ubman.config.persistent_data_dir, 'vbe1.img')
|
||||
ubman.run_command(f'host bind 0 {fname}')
|
||||
|
||||
ubman.run_command('ut -f bootstd vbe_test_abrec_oem_norun')
|
||||
result = ubman.run_command('echo $?')
|
||||
assert '0' == result
|
||||
ubman.run_ut('bootstd', 'vbe_test_abrec_oem')
|
||||
|
||||
@@ -624,6 +624,7 @@ static int ut_run_test(struct unit_test_state *uts, struct unit_test *test,
|
||||
{
|
||||
const char *fname = strrchr(test->file, '/') + 1;
|
||||
const char *note = "";
|
||||
int old_fail_count;
|
||||
int ret;
|
||||
|
||||
if ((test->flags & UTF_DM) && !uts->of_live)
|
||||
@@ -639,6 +640,7 @@ static int ut_run_test(struct unit_test_state *uts, struct unit_test *test,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
old_fail_count = uts->cur.fail_count;
|
||||
uts->arg_error = false;
|
||||
ret = test->func(uts);
|
||||
if (ret == -EAGAIN)
|
||||
@@ -650,6 +652,13 @@ static int ut_run_test(struct unit_test_state *uts, struct unit_test *test,
|
||||
|
||||
ut_set_state(NULL);
|
||||
|
||||
if (uts->emit_result) {
|
||||
bool passed = uts->cur.fail_count == old_fail_count;
|
||||
|
||||
printf("Result: %s: %s: %s%s\n", passed ? "PASS" : "FAIL",
|
||||
test_name, fname, note);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -781,6 +790,10 @@ static int ut_run_tests(struct unit_test_state *uts, const char *prefix,
|
||||
!test_matches(prefix, test_name, select_name))
|
||||
continue;
|
||||
|
||||
/* Skip tests not assigned to this worker */
|
||||
if (uts->workers && upto % uts->workers != uts->worker_id)
|
||||
continue;
|
||||
|
||||
if (test->flags & UTF_MANUAL) {
|
||||
int len;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user