diff --git a/.github/workflows/trickops.yml b/.github/workflows/trickops.yml new file mode 100644 index 000000000..8b8a52651 --- /dev/null +++ b/.github/workflows/trickops.yml @@ -0,0 +1,79 @@ +name: TrickOps +# This workflow is triggered on pushes to the repository. +on: [push, pull_request] + +defaults: + run: + shell: bash + +jobs: + trickops-tests-ubuntu: + name: Unit Tests Ubuntu:20.04 + runs-on: ubuntu-20.04 + container: ubuntu:20.04 + steps: + - uses: actions/checkout@master + - name: install dependencies + # Note that perl is for trick-gte which TrickOps runs and qt and everything after it is for koviz + run: | + export DEBIAN_FRONTEND=noninteractive && apt-get update && apt-get install -y git python3 python3-venv perl perl-modules-5.30 qtbase5-dev wget unzip g++ make flex bison + - name: create virtual environment + run: | + cd share/trick/trickops/ + python3 -m venv .venv && source .venv/bin/activate && pip3 install -r requirements.txt + - name: get and build koviz + run: | + cd /tmp/ && wget -q https://github.com/nasa/koviz/archive/refs/heads/master.zip && unzip master.zip + cd /tmp/koviz-master/ && qmake && make + - name: run unit and doc tests + run: | + cd share/trick/trickops/tests/ + source ../.venv/bin/activate + export PATH="/tmp/koviz-master/bin:${PATH}" + ./run_tests.py + - uses: actions/upload-artifact@master + if: ${{ always() }} + with: + name: doctests + path: | + share/trick/trickops/tests/*_doctest_log.txt + /tmp/log.* + + trickops-tests-centos8: + name: Unit Tests CentOS:latest + runs-on: ubuntu-20.04 + container: centos:latest + steps: + - uses: actions/checkout@master + - name: install dependencies + # Note that perl is for trick-gte which TrickOps runs and qt and everything after it is for koviz + run: | + dnf install -y git python3-devel which perl perl-Digest-MD5 qt5-qtbase-devel bison clang flex make gcc gcc-c++ wget + - name: create virtual environment + run: | + cd share/trick/trickops/ + python3 -m venv .venv && source .venv/bin/activate && pip3 install -r requirements.txt + - name: get and build koviz + run: | + cd /tmp/ && wget -q https://github.com/nasa/koviz/archive/refs/heads/master.zip && unzip master.zip + cd /tmp/koviz-master/ && qmake-qt5 && make + - name: run unit and doc tests + run: | + cd share/trick/trickops/tests/ + source ../.venv/bin/activate + export PATH="/tmp/koviz-master/bin:${PATH}" + ./run_tests.py + - uses: actions/upload-artifact@master + if: ${{ always() }} + with: + name: doctests + path: | + share/trick/trickops/tests/*_doctest_log.txt + /tmp/log.* + +# TODO: ExampleWorkflow.py is not included here because it needs a built Trick +# to function correctly and I don't want to duplicate the Trick build testing +# here to provide testing of what is essentially an example provided for +# documentation purposes. If we could leverage artifacts from a previous +# stage and/or stable containers where Trick is already pre-built, we should +# consider adding ExampleWorfklow.py to testing in this file. -Jordan 4/2021 diff --git a/docs/documentation/Documentation-Home.md b/docs/documentation/Documentation-Home.md index 0eb74b390..2b80b6ede 100644 --- a/docs/documentation/Documentation-Home.md +++ b/docs/documentation/Documentation-Home.md @@ -66,5 +66,6 @@ The user guide contains information pertinent to Trick users. These pages will h 01. [Miscellaneous Trick Tools](miscellaneous_trick_tools/Miscellaneous-Trick-Tools) 01. [Python Variable Server Client](miscellaneous_trick_tools/Python-Variable-Server-Client) + 02. [TrickOps Sim Testing Framework](miscellaneous_trick_tools/TrickOps.md) 01. [Software Requirements Specification](software_requirements_specification/SRS) diff --git a/docs/documentation/miscellaneous_trick_tools/TrickOps.md b/docs/documentation/miscellaneous_trick_tools/TrickOps.md new file mode 100644 index 000000000..228dcd873 --- /dev/null +++ b/docs/documentation/miscellaneous_trick_tools/TrickOps.md @@ -0,0 +1,294 @@ +# Table of Contents +* [Requirements](#Requirements) +* [Features](#Features) +* [The YAML File](#The-YAML-File) +* [Learn by Example](#learn-trickops-with-an-example-workflow-that-uses-this-trick-repository) +* [File Comparisons](#compare---file-vs-file-comparisons) +* [Post-Run Analysis](#analyze---post-run-analysis) +* [Where does my output go?](#where-does-the-output-of-my-tests-go) +* [Other Useful Examples](#other-useful-examples) +* [The TrickOps Design](#regarding-the-design-why-do-i-have-to-write-my-own-script) +* [Tips & Best Practices](#tips--best-practices) + +# TrickOps + +TrickOps is shorthand for "Trick Operations", and is a `python3` framework that provides an easy-to-use interface for common testing and workflow actions that Trick simulation developers and users often run repeatedly. Good software developer workflows typically have a script or set of scripts that the developer can run to answer the question "have I broken anything?". The purpose of TrickOps is to provide the logic central to managing these tests while allowing each project to define how and and what they wish to test. Don't reinvent the wheel, use TrickOps! + +TrickOps is *not* a GUI, it's a set of python modules that you can `import` that let you build a testing framework for your Trick-based project with just a few lines of python code. + +## Requirements + +`python` version 3.X and the packages in `requirements.txt` are required to use TrickOps. Virtual environments make this easy, see [the python documentation on virtual environments](https://docs.python.org/3/library/venv.html) for information on how to create a `python3` virtual environment using the `requirements.txt` file included here. + +## Features + +TrickOps provides the following features: + 1. Multiple simultaneous sim builds, runs (with or without valgrind), logged data file comparisons, and post-run analyses + 2. Automatic project environment file sourcing + 3. Real-time progress bars for sim builds and runs with in-terminal curses interface + 4. Exit code management lets users easily define success & failure criteria + 5. Failed comparisons can optionally generate koviz error reports + 6. Sims, their runs, comparisons, etc. are defined in a single easy-to-read YAML file which TrickOps reads + +## The YAML File + +The YAML file is a required input to the framework which defines all of the tests your project cares about. This is literally a list of sims, each of which may contain runs, each run may contain comparisons and post-run analyses and so on. Here's a very simple example YAML file for a project with two sims, each having one run: + +```yaml +SIM_spacecraft: + path: sims/SIM_spacecraft + runs: + RUN_test/input.py: + returns: 0 + +SIM_visiting_vehicle: + path: sims/SIM_visiting_vehicle + runs: + RUN_test/input.py: + returns: 0 +``` +Simple and readable, this config file is parsed by `PyYAML` and adheres to all normal YAML syntactical rules. Additionally the Trick-Workflow expected convention for defining sims, runs, comparisons, etc. is described in the `TrickWorkflow` docstrings and also summarized in detail below: + +```yaml +globals: + env: <-- optional literal string executed before all tests, e.g. env setup + parallel_safety: <-- strict won't allow multiple input files per RUN dir + +SIM_abc: <-- required unique name for sim of interest, must start with SIM + path: <-- required SIM path relative to project top level + description: <-- optional description for this sim + labels: <-- optional list of labels for this sim, can be used to get sims + - model_x by label within the framework, or for any other project-defined + - verification purpose + build_command: <-- optional literal cmd executed for SIM_build, defaults to trick-CP + size: <-- optional estimated size of successful build output file in bytes + runs: <-- optional dict of runs to be executed for this sim, where the + RUN_1/input.py --foo: dict keys are the literal arguments passed to the sim binary + RUN_2/input.py: and the dict values are other run-specific optional dictionaries + ... described as follows ... + returns: <---- optional exit code of this run upon completion (0-255). Defaults + to 0 + compare: <---- optional list of vs. comparison strings to be + - a vs. b compared after this run is complete. This is extensible in that + - d vs. e all non-list values are ignored and assumed to be used to define + - ... an alternate comparison method in derived classes + analyze: <-- optional arbitrary string to execute as job in bash shell from + project top level, for project-specific post-run analysis + valgrind: <-- optional dict describing how to execute runs within valgrind + flags: <-- string of all flags passed to valgrind for all runs + runs: <-- list of literal arguments passed to the sim binary through + - RUN_1... valgrind + +non_sim_extension_example: + will: be ignored by TrickWorkflow parsing for derived classes to implement as they wish +``` + +Almost everything in this file is optional, but there must be at least one top-level key that starts with `SIM` and it must contain a valid `path: ` with respect to the top level directory of your project. Here, `SIM_abc` represents "any sim" and the name is up to the user, but it *must* begin with `SIM` since `TrickWorkflow` purposefully ignores any top-level key not beginning with `SIM` in order to allow for extensibility of the YAML file for non-sim tests specific to a project. + +There is *no limit* to number of `SIM`s, `runs:`, `compare:` lists, `valgrind` `runs:` list, etc. This file is intended to contain every Sim and and every sim's run, and every run's comparison and so on that your project cares about. Remember, this file represents the *pool* of tests, not necessarily what *must* be tested every time your scripts which use it run. + +## Learn TrickOps with an Example Workflow that uses this Trick Repository + +Included in TrickOps is `ExampleWorkflow.py`, which sets up a simple "build all sims, run all runs" testing script in less than 20 lines of code! It clones the Trick repo from https://github.com/nasa/trick.git, writes a YAML file describing the sims and runs to test, then uses TrickOps to build sims and run runs in parallel. To see it in action, simply run the script: + +```bash +cd trick/share/trick/trickops/ +./ExampleWorkflow.py +``` +When running, you should see output that looks like this: + +![ExampleWorkflow In Action](trickops_example.png) + +When running, you'll notice that tests occur in two phases. First, sims build in parallel up to three at a time. Then when all builds complete, sims run in parallel up to three at a time. Progress bars show how far along each build and sim run is at any given time. The terminal window will accept scroll wheel and arrow input to view current builds/runs that are longer than the terminal height. + +Looking inside the script, the code at top of the script creates a yaml file containing a large portion of the sims and runs that ship with trick and writes it to `/tmp/config.yml`. This config file will be input to the framework. At the bottom of the script is where the magic happens, this is where the TrickOps modules are used: + +```python +from TrickWorkflow import * +class ExampleWorkflow(TrickWorkflow): + def __init__( self, quiet, trick_top_level='/tmp/trick'): + # Real projects already have trick somewhere, but for this test, just clone it + if not os.path.exists(trick_top_level): + os.system('cd %s && git clone https://github.com/nasa/trick' % (os.path.dirname(trick_top_level))) + # Base Class initialize, this creates internal management structures + TrickWorkflow.__init__(self, project_top_level=trick_top_level, log_dir='/tmp/', + trick_dir=trick_top_level, config_file="/tmp/config.yml", cpus=3, quiet=quiet) + def run( self): + build_jobs = self.get_jobs(kind='build') + run_jobs = self.get_jobs(kind='run') + + builds_status = self.execute_jobs(build_jobs, max_concurrent=3, header='Executing all sim builds.') + runs_status = self.execute_jobs(run_jobs, max_concurrent=3, header='Executing all sim runs.') + self.report() # Print Verbose report + self.status_summary() # Print a Succinct summary + return (builds_status or runs_status or self.config_errors) +if __name__ == "__main__": + sys.exit(ExampleWorkflow(quiet=(True if len(sys.argv) > 1 and 'quiet' in sys.argv[1] else False)).run()) +``` +Let's look at a few key parts of the example script. Here, we create a new class arbitrarily named `ExampleWorkflow` which inherits from `TrickWorkflow`, which is a class provided by the `TrickWorkflow.py` module. As part of its class setup, it clones a new `trick` repo from GitHub and places it in `/tmp/`. Since '/tmp/trick' provides many example sims and runs, we eat our own dogfood here and use it to represent our `project_top_level` for example purposes. + +```python +from TrickWorkflow import * +class ExampleWorkflow(TrickWorkflow): + def __init__( self, quiet, trick_top_level='/tmp/trick'): + # Real projects already have trick somewhere, but for this test, just clone it + if not os.path.exists(trick_top_level): + os.system('cd %s && git clone https://github.com/nasa/trick' % (os.path.dirname(trick_top_level))) +``` +Our new class `ExampleWorkflow.py` can be initialized however we wish as long as it provides the necessary arguments to it's Base class initializer. In this example, `__init__` takes two parameters: `trick_top_level` which defaults to `/tmp/trick`, and `quiet` which will be `False` unless `quiet` is found in the command-line args to this script. The magic happens on the very next line where we call the base-class `TrickWorkflow` initializer which accepts four required parameters: + +```python + TrickWorkflow.__init__(self, project_top_level=trick_top_level, log_dir='/tmp/', + trick_dir=trick_top_level, config_file="/tmp/config.yml", cpus=3, quiet=quiet) +``` +The required parameters are described as follows: +* `project_top_level` is the absolute path to the highest-level directory of your project. The "top level" is up to the user to define, but usually this is the top level of your repository and at minimum must be a directory from which all sims, runs, and other files used in your testing are recursively reachable. +* `log_dir` is a path to a user-chosen directory where all logging for all tests will go. This path will be created for you if it doesn't already exist. +* `trick_dir` is an absolute path to the top level directory for the instance of trick used for your project. For projects that use trick as a `git` `submodule`, this is usually `/trick` +* `config_file` is the path to a YAML config file describing the sims, runs, etc. for your project. It's recommended this file be tracked in your SCM tool but that is not required. More information on the syntax expected in this file in the **The YAML File** section below. + +The optional parameters are described as follows: +* `cpus` tells the framework how many CPUs to use on sim builds. This translates directly to `MAKEFLAGS` and is separate from the maximum number of simultaneous sim builds. +* `quiet` tells the framework to suppress progress bars and other verbose output. It's a good idea to use `quiet=True` if your scripts are going to be run in a continuous integration (CI) testing framework such as GitHub Actions, GitLab CI, or Jenkins, because it suppresses all `curses` logic during job execution which itself expects `stdin` to exist. + +When `TrickWorkflow` initializes, it reads the `config_file` and verifies the information given matches the expected convention. If a non-fatal error is encountered, a message detailing the error is printed to `stdout` and the internal timestamped log file under `log_dir`. A fatal error will `raise RuntimeError`. + +Moving on to the next important lines of code in our `ExampleWorkflow.py` script. The `def run(self):` line declares a function whose return code on run is passed back to the calling shell via `sys.exit()`. This is where we use the functions given to us by inherting from `TrickWorkflow`: + + +```python + build_jobs = self.get_jobs(kind='build') + run_jobs = self.get_jobs(kind='run') + builds_status = self.execute_jobs(build_jobs, max_concurrent=3, header='Executing all sim builds.') + runs_status = self.execute_jobs(run_jobs, max_concurrent=3, header='Executing all sim runs.') +``` + +These four lines of code will build all simulations in our config file in parallel up to three at once, followed by running all simulation runs in parallel up to three at once. The `get_jobs()` function is the easiest way to get a set of jobs from the pool of jobs we defined in our config file of a certain `kind`. It will return a list of `Job` instances matching the `kind` given. For example `build_jobs = self.get_jobs(kind='build')` means "give me a list of every job which builds a simulation defined in my config file". These `Job` instances can then be passed as a list to the `execute_jobs()` function which manages their execution and collection of their return codes and output. `execute_jobs` will start the list of `Job`s given to it, running a maximum of `max_concurrent` at a time, and returns 0 (`False)` if all jobs success, 1 (`True`) if any job failed. `kind` can be: `'build'`, `'run'`, `'valgrind'`, or `'analysis'`. + +The last three lines simply print a detailed report of what was executed and manage the exit codes so that `ExampleWorkflow.py`'s return code can be trusted to report pass/fail status to the calling shell in the typical linux style: zero on success, non-zero on failure. + +```python + self.report() # Print Verbose report + self.status_summary() # Print a Succinct summary + return (builds_status or runs_status or self.config_errors) +``` + +The `ExampleWorkflow.py` uses sims/runs provided by trick to exercise *some* of the functionality provided by TrickOps. This script does not have any comparisons, post-run analyses, or valgrind runs defined in the YAML file, so there is no execution of those tests in this example. + +## `compare:` - File vs. File Comparisons + +In the `TrickWorkflow` base class, a "comparison" is an easy way to compare two logged data files via an `md5sum` compare. Many sim workflows generate logged data when their sims run and want to compare that logged data to a stored off baseline (a.k.a regression) version of that file to answer the question: "has my sim response changed?". TrickOps makes it easy to execute these tests by providing a `compare()` function at various levels of the architecture which returns 0 (`False`) on success and 1 (`True`) if the files do not match. + +In the YAML file, the `compare:` section under a specific `run:` key defines the comparison the user is interested in. For example consider this YAML file: + +``` +SIM_ball: + path: path/to/SIM_ball + runs: + RUN_foo/input.py: + RUN_test/input.py: + compare: + - path/to/SIM_/ball/RUN_test/log_a.csv vs. regression/SIM_ball/log_a.csv + - path/to/SIM_/ball/RUN_test/log_b.trk vs. regression/SIM_ball/log_b.trk +``` +In this example, `SIM_ball`'s run `RUN_foo/input.py` doesn't have any comparisons, but `RUN_test/input.py` contains two comparisons, each of which compares data generated by the execution of `RUN_test/input.py` to a stored off version of the file under the `regression/` directory relative to the top level of the project. The comparisons themselves can be executed in your python script via the `compare()` function in multiple ways. For example: + +```python +# Assuming we're in a python script using TrickWorkflow as a base class and RUN_test/input.py +# has already been executed locally so that logged data exists on disk... + +# OPTION 1: Execute compare() at the top level of TrickWorkflow +ret = self.compare() # Execute all comparisons for all runs across all sims + +# OPTION 2: Get the sim you care about and execute compare() on only that sim +s = self.get_sim('SIM_ball') # Get the sim we care about +ret = s.compare() # Execute all comparisons for all runs in 'SIM_ball' + +# OPTION 3: Get the run you care about and execute compare() at the run level +r = self.get_sim('SIM_ball').get_run('RUN_test/input.py') +ret = r.compare() # Execute all comparisons for run 'RUN_test/input.py' +``` + +In all three of these options, `ret` will be 0 (`False`) if all comparisons succeeded and 1 (`True`) if at least one comparison failed. You may have noticed the `get_jobs()` function does not accept `kind='comparison'`. This is because comparisons are not `Job` instances. Use `compare()` to execute comparisons and `get_jobs()` with `execute_jobs()` for builds, run, analyses, etc. + +### Koviz Utility Functions + +When a comparison fails, usually the developer's next step is to look at the differences in a plotting tool. TrickOps provides an interface to [koviz](https://github.com/nasa/koviz), which lets you quickly and easily generate error plot PDFs when a set of comparisons fail. This requires that the `koviz` binary be on your user `PATH` and that your `koviz` version is newer than 4/1/2021. Here are a couple examples of how to do this: + +```python +# Assuming we're in a python script using TrickWorkflow as a base class and have +# executed a compare() which has failed... + +# OPTION 1: Get all koviz report jobs for all failed comparisons in your YAML file +(koviz_jobs, failures) = self.get_koviz_report_jobs() +# Execute koviz jobs to generate the error plots, up to four at once +if not failures: + self.execute_jobs(koviz_jobs, max_concurrent=4, header='Generating koviz error plots') + +# OPTION 2: Get a single koviz report job by providing two directories to have koviz compare +(krj, failure) = self.get_koviz_report_job('path/to/test_data/', 'path/to/baseline_data/') +if not failure: + self.execute_jobs([krj], header='Generating koviz error plots') +``` + +If an error is encountered, like `koviz` or a given directory cannot be found, `None` is returned in the first index of the tuple, and the error information is returned in the second index of the tuple for `get_koviz_report_job()`. The `get_koviz_report_jobs()` function just wraps the singular call and returns a tuple of `( list_of_jobs, list_of_any_failures )`. Note that `koviz` accepts entire directories as input, not specific paths to files. Keep this in mind when you organize how regression data is stored and how logged data is generated by your runs. + +## `analyze:` - Post-Run Analysis + +The optional `analyze:` section of a `run:` is intended to be a catch-all for "post-run analysis". The string given will be transformed into a `Job()` instance that can be retrieved and executed via `execute_jobs()` just like any other test. All analyze jobs are assumed to return 0 on success, non-zero on failure. One example use case for this would be creating a `jupytr` notebook that contains an analysis of a particular run. + +## Where does the output of my tests go? + +All output goes to a single directory `log_dir`, which is a required input to the `TrickWorkflow.__init__()` function. Sim builds, runs, comparisons, koviz reports etc. are all put in a single directory with unique names. This is purposeful for two reasons: + +1. When a failure occurs, the user can always find verbose output in the same place +1. Easy integration into CI artifact collection mechanisms. Just collect `log_dir` to get all verbose output for successful and failed tests alike. + +Logging for the `TrickWorfklow` itself, including a list of all executed tests will automatically be created in `/log..txt` + +## Other Useful Examples + +```python +# Assuming you are within the scope of a class inheriting from TrickWorkflow ... +self.report() # Report colorized verbose information regarding the pool of tests +s = self.status_summary() # Print a succinct summary regarding the pool of tests +myvar = self.config['myvar']) # Access custom key 'myvar' in YAML file +tprint("My important msg") # Print to the screen and log a message internally +# Define my own custom job and run it using execute_jobs() +myjob = Job(name='my job', command='sleep 10', log_file='/tmp/out.txt', expected_exit_status=0) +self.execute_jobs([myjob]) +``` + +## Regarding the Design: Why do I have to write my own script? + +You may be thinking, "sure it’s great that it only takes a few lines of python code to use this framework, but why isn’t TrickOps just a generic cmdline interface that I can use? Why isn't it just this?": + +```bash +./trickops --config project_config.yml +``` + +This is purposeful -- handling every project-specific constraint is impossible. Here's a few examples of project-specific constraints that make a generic catch-all `./trickops` script very difficult to implement: +* "I want to fail testing on SIM_a, but SIM_b build is allowed to fail!" + - Solution: Project-specific script defines success with `sys.exit()` +* "I need to add project-specific key-value pairs in the YAML file!" + - Solution: Project specific script reads `self.config` to get these +* "I don’t want to use `koviz`, I want to generate errors plots with matlab!" + - Solution: Project specific script extends `class Comparison` +* "I have a pre-sim-build mechanism (like `matlab`) that complicates everything!" + - Solution: Project specific script runs `execute_jobs()` on custom `Job`s before normal sim builds are executed + + +## Tips & Best Practices + +* Commit your YAML file to your project. What gets tested will change over time, you want to track those changes. +* If your project requires an environment, it's usually a good idea to track a source-able environment file that users can execute in their shell. For example, if `myproject/.bashrc` contains your project environment, you should add `source .bashrc ;` to the `env:` section of `globals` in your YAML config file. This tells `TrickWorkflow` to add `source .bashrc ; ` before every `Job()`'s `command`. +* Make sure you execute your tests in an order that makes sense logically. The TrickOps framework will not automatically execute a sim build before a sim run for example, it's on the user to define the order in which tests run and which tests are important to them. +* Be cognizant of how many CPUs you've passed into `TricKWorkflow.__init__` and how many sims you build at once. Each sim build will use the `cpus` given to `TrickWorkflow.__init__`, so if you are building 3 sims at once each with 3 cpus you're technically requesting 9 cpus worth of build, so to speak. +* If `TrickWorkflow` encounters non-fatal errors while validating the content of the given YAML config file, it will set the internal member `self.config_erros` to be `True`. If you want your script to return non-zero on any non-fatal error, add this return code to your final script `sys.exit()`. +* Treat the YAML file like your project owns it. You can store project-specific information and retrieve that information in your scripts by accessing the `self.config` dictionary. Anything not recognized by the internal validation of the YAML file is ignored, but that information is still provided to the user. For example, if you wanted to store a list of POCS in your YAML file so that your script could print a helpful message on error, simply add a new entry `project_pocs: email1, email2...` and then access that information via `self.config['project_pocs']` in your script. + + +## More Information + +A lot of time was spent adding `python` docstrings to the `TrickWorkflow.py` and `WorkflowCommon.py` modules. This README does not cover all functionality, so please see the in-code documentation for more detailed information on the framework. + diff --git a/docs/documentation/miscellaneous_trick_tools/trickops_example.png b/docs/documentation/miscellaneous_trick_tools/trickops_example.png new file mode 100644 index 000000000..7644e2b8c Binary files /dev/null and b/docs/documentation/miscellaneous_trick_tools/trickops_example.png differ diff --git a/share/trick/trickops/ColorStr.py b/share/trick/trickops/ColorStr.py new file mode 100644 index 000000000..776b00b49 --- /dev/null +++ b/share/trick/trickops/ColorStr.py @@ -0,0 +1,63 @@ +# Simple color string utility class +class ColorStr: + + def __init__( self ): + self.CODE={ + 'ENDC':0, # RESET COLOR + 'BOLD':1, + 'UNDERLINE':4, + 'BLINK':5, + 'INVERT':7, + 'CONCEALD':8, + 'STRIKE':9, + 'GREY30':90, + 'GREY40':2, + 'GREY65':37, + 'GREY70':97, + 'GREY20_BG':40, + 'GREY33_BG':100, + 'GREY80_BG':47, + 'GREY93_BG':107, + 'DARK_RED':31, + 'RED':91, + 'RED_BG':41, + 'LIGHT_RED_BG':101, + 'DARK_YELLOW':33, + 'YELLOW':93, + 'YELLOW_BG':43, + 'LIGHT_YELLOW_BG':103, + 'DARK_BLUE':34, + 'BLUE':94, + 'BLUE_BG':44, + 'LIGHT_BLUE_BG':104, + 'DARK_MAGENTA':35, + 'PURPLE':95, + 'MAGENTA_BG':45, + 'LIGHT_PURPLE_BG':105, + 'DARK_CYAN':36, + 'AQUA':96, + 'CYAN_BG':46, + 'LIGHT_AUQA_BG':106, + 'DARK_GREEN':32, + 'GREEN':92, + 'GREEN_BG':42, + 'LIGHT_GREEN_BG':102, + 'BLACK':30 \ + } + + def getCodes( self ): + return self.CODE + + def termcode(self, num): + return '\033[%sm'%num + + def colorstr(self, astr, color): + return self.termcode(self.CODE[color])+astr+self.termcode(self.CODE['ENDC']) + + def showCodes( self ): + astr='yippy skippy' + codes = self.getCodes() + for key in sorted(codes.keys()): + print('%s: %s' % (key, self.colorstr( astr, key ))) + def cprint( self, astr, color ): + print(self.colorstr( astr, color )) diff --git a/share/trick/trickops/ExampleWorkflow.py b/share/trick/trickops/ExampleWorkflow.py new file mode 100755 index 000000000..118d7c28d --- /dev/null +++ b/share/trick/trickops/ExampleWorkflow.py @@ -0,0 +1,143 @@ +#!/usr/bin/env python3 +import os, sys + +# Create a valid yml config file describing which sims to consider +yml_content=( +""" +SIM_parse_s_define: + path: test/SIM_parse_s_define +SIM_python_namespace: + path: test/SIM_python_namespace + runs: + RUN_test/unit_test.py: +SIM_rti: + path: test/SIM_rti + runs: + RUN_test/unit_test.py: +SIM_segments: + path: test/SIM_segments + runs: + RUN_test/input.py: +SIM_stls: + path: test/SIM_stls + runs: + RUN_test/input.py: + RUN_test/unit_test.py: +SIM_swig_template_scoping: + path: test/SIM_swig_template_scoping +SIM_target_specific_variables: + path: test/SIM_target_specific_variables +SIM_test_abstract: + path: test/SIM_test_abstract + runs: + RUN_test/input.py: +SIM_test_dp: + path: test/SIM_test_dp + runs: + RUN_test/unit_test.py: + RUN_test/input.py: +SIM_test_dr: + path: test/SIM_test_dr + runs: + RUN_test/unit_test.py: +SIM_test_inherit: + path: test/SIM_test_inherit + runs: + RUN_test/input.py: +SIM_test_io: + path: test/SIM_test_io + runs: + RUN_test/unit_test.py: +SIM_test_ip: + path: test/SIM_test_ip + runs: + RUN_test/unit_test.py: +SIM_test_sched: + path: test/SIM_test_sched + runs: + RUN_test/input.py: + RUN_test/unit_test.py: +SIM_test_templates: + path: test/SIM_test_templates + runs: + RUN_test/unit_test.py: +SIM_threads: + path: test/SIM_threads + runs: + RUN_test/sched.py: + RUN_test/amf.py: + RUN_test/async.py: + RUN_test/unit_test.py: +SIM_threads_simple: + path: test/SIM_threads_simple + runs: + RUN_test/input.py: + RUN_test/sched.py: + RUN_test/async.py: +SIM_trickcomm: + path: test/SIM_trickcomm + runs: + RUN_test/input.py: +SIM_ball_L2: + path: trick_sims/Ball/SIM_ball_L2 +SIM_ball_L3: + path: trick_sims/Ball/SIM_ball_L3 +SIM_amoeba: + path: trick_sims/Cannon/SIM_amoeba +SIM_cannon_aero: + path: trick_sims/Cannon/SIM_cannon_aero +SIM_cannon_analytic: + path: trick_sims/Cannon/SIM_cannon_analytic +SIM_cannon_eulercromer: + path: trick_sims/Cannon/SIM_cannon_eulercromer +SIM_cannon_jet: + path: trick_sims/Cannon/SIM_cannon_jet +SIM_cannon_numeric: + path: trick_sims/Cannon/SIM_cannon_numeric +SIM_monte: + path: trick_sims/Cannon/SIM_monte +SIM_Ball++_L1: + path: trick_sims/SIM_Ball++_L1/ +SIM_contact: + path: trick_sims/SIM_contact +SIM_lander: + path: trick_sims/SIM_lander +SIM_msd: + path: trick_sims/SIM_msd +SIM_parachute: + path: trick_sims/SIM_parachute +SIM_rocket: + path: trick_sims/SIM_rocket +SIM_sat2d: + path: trick_sims/SIM_sat2d +SIM_satellite: + path: trick_sims/SIM_satellite +SIM_sun: + path: trick_sims/SIM_sun +SIM_wheelbot: + path: trick_sims/SIM_wheelbot +""") +f = open("/tmp/config.yml", "w") +f.write(yml_content) +f.close() + +from TrickWorkflow import * +class ExampleWorkflow(TrickWorkflow): + def __init__( self, quiet, trick_top_level='/tmp/trick'): + # Real projects already have trick somewhere, but for this test, just clone it + if not os.path.exists(trick_top_level): + os.system('cd %s && git clone https://github.com/nasa/trick' % (os.path.dirname(trick_top_level))) + # Base Class initialize, this creates internal management structures + TrickWorkflow.__init__(self, project_top_level=trick_top_level, log_dir='/tmp/', + trick_dir=trick_top_level, config_file="/tmp/config.yml", cpus=3, quiet=quiet) + def run( self): + build_jobs = self.get_jobs(kind='build') + run_jobs = self.get_jobs(kind='run') + + builds_status = self.execute_jobs(build_jobs, max_concurrent=3, header='Executing all sim builds.') + runs_status = self.execute_jobs(run_jobs, max_concurrent=3, header='Executing all sim runs.') + self.report() # Print Verbose report + self.status_summary() # Print a Succinct summary + return (builds_status or runs_status or self.config_errors) +if __name__ == "__main__": + sys.exit(ExampleWorkflow(quiet=(True if len(sys.argv) > 1 and 'quiet' in sys.argv[1] else False)).run()) diff --git a/share/trick/trickops/README.md b/share/trick/trickops/README.md new file mode 120000 index 000000000..859531364 --- /dev/null +++ b/share/trick/trickops/README.md @@ -0,0 +1 @@ +../../../docs/documentation/miscellaneous_trick_tools/TrickOps.md \ No newline at end of file diff --git a/share/trick/trickops/TrickWorkflow.py b/share/trick/trickops/TrickWorkflow.py new file mode 100644 index 000000000..30bd87a8f --- /dev/null +++ b/share/trick/trickops/TrickWorkflow.py @@ -0,0 +1,1467 @@ +""" +Collection of Trick-based utility classes. These classes inherit from generic +functionality provided by WorkflowCommon Provides a consistent framework for +testing multiple sims/runs/analysis/comparisons + +Requires: python3 and requirements.txt containing: + PyYAML # For reading input yml files + psutil # For child process acquisition +""" +import os, sys, threading, socket, abc, time, re, copy, subprocess, hashlib, inspect +import yaml # Provided by PyYAML + +from WorkflowCommon import * +import pprint +# Import Trick natively supported python variable server utilities +sys.path.append(os.path.abspath(os.path.join(os.path.dirname( + os.path.abspath(inspect.getsourcefile(lambda:0))), '../pymods'))) +from trick import variable_server +import pdb + +# This global is the result of hours of frustration and debugging. This is only used by doctest +# but appears to be the only solution to the problem of __file__ not being an absolute path in +# some cases for some python versions and how that interacts with this class's os.chdir() when its +# base class initializes. If you attempt to define this_trick locally in the doctest block, +# which was my original attempt, you will find that the value of this_trick is different inside +# the __init__ doctest evaluation compared to any other member function. I believe this is only +# the case when using python version < 3.9, according to the information found here: +# https://note.nkmk.me/en/python-script-file-path/ +# I do not like adding globals to "production code" just to facilitate a testing mechanism, but +# I don't know of any cleaner way way to do this. AND ANOTHER THING! doctest examples are not +# given for any function that prints, because we make use of color printing extensively in this +# module via the tprint function, and doctest is not well suited for handling color output. +# - Sincerely, a quite ornery Dan Jordan 4/2021 +this_trick = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../..')) + +class TrickWorkflow(WorkflowCommon): + """ + A Trick Workflow requires a path to your project top-level, path to your + trick directory and a path to a valid YAML file as input. The yaml file defines + the sim(s), run(s), valgrind run(s), comparison(s), etc. that are part of the + project workflow of interest. A project using Trick should define their own + Workflow class which inherits from this base class, where all + project-specific (but not Trick specific) content lives. + + Once TrickWorkflow.__init__() is called, an internal list of Sim() container + instances are created from the content of the yml file, that can then be + leveraged as your project sees fit. Some examples: + + # Get lists of jobs created from the content in the YAML file + builds = self.get_jobs(kind='build') + runs = self.get_jobs(kind='run') + analysis = self.get_jobs(kind='analysis') + valgrind = self.get_jobs(kind='valgrind') + + # Execute the build jobs, with a max of 3 building simultaneously + # ret will be 0 if all jobs success, 1 if any fail + ret = self.execute_jobs(builds, max_concurrent=3) + + # Job statuses are stored internally and can be queried after they've already + # executed, for example, after execute_jobs() finishes: + for b in builds: + print("Build job %s completed with status %d " % (b.name, b.get_status() is job.Status.SUCCESS)) + print("and ran command %s " % (b._command)) + + # All jobs store their status internally regardless of whether they have + # been executed already or not. Jobs that have not yet run have a status + # of Job.Status.NOT_STARTED. See WorkflowCommon.py for all statuses. + # Here's an example where we print all states for all jobs: + for j in (builds + runs + analysis + valgrind): + print("Job with command '%s' has status %d " % (j._command, j.get_status())) + + # Verbose reporting of container classes is built-in, for example: + self.report() # Report all sims, runs, comparisons, etc. recursively + # Get a sim's report by it's name from the YAML file + self.get_sim('mysim').report() + # Note that runs, comparisons, and analysis management classes and/or jobs also + # support the report() method + """ + def __init__(self, project_top_level, log_dir, trick_dir, config_file, cpus=3, quiet=False): + """ + Initialize this instance. + + >>> tw = TrickWorkflow(project_top_level=this_trick, log_dir='/tmp/', trick_dir=this_trick, config_file=os.path.join(this_trick,"share/trick/trickops/tests/trick_sims.yml")) + + Parameters + ---------- + project_top_level : str + Path to the top level of this project a.k.a root directory + log_dir : str + Path to directory where all test log output will be written + trick_dir : str + Path to root trick directory for this project + config_file : str + Path to YAML file describing this Trick Workflow + cpus : int + Maximum number of CPUs to use when running parallel jobs + quiet : bool + Flag for keeping verbose output to a minimum. Suppresses all progress bars + when true which is useful for running in a CI system where stdin isn't + available + """ + super().__init__(project_top_level=project_top_level, log_dir=log_dir, quiet=quiet) + # If not found in the config file, these defaults are used + self.defaults = {'cpus': 3, 'name': None, 'description': None, + 'build_command': 'trick-CP', 'size': 2200000, 'env': None} + self.config_errors = False + self.compare_errors = False # True if comparison errors were found + self.sims = [] # list of Sim() instances, filled out from config file + self.config_file = config_file # path to yml config + self.cpus = cpus # Number of CPUs this workflow should use when running + # 'loose' or 'strict' controls if multiple input files per RUN dir are allowed in YAML config file + self.parallel_safety = 'loose' + self.config = self._read_config(self.config_file) # Contains resulting Dict parsed by yaml.load + self.trick_dir = trick_dir + self.trick_host_cpu = self.get_trick_host_cpu() + self._validate_config() + + def create_test_suite(self): + """ + Create all Job()s from the internal self.sims structure + + Jobs are created "on-get". This function just gets everything TrickWorkflow + knows about with respect to Jobs but doesn't execute anything. + + >>> tw = TrickWorkflow(project_top_level=this_trick, log_dir='/tmp/', trick_dir=this_trick, config_file=os.path.join(this_trick,"share/trick/trickops/tests/trick_sims.yml")) + >>> tw.create_test_suite() + + """ + self.get_jobs(kind='build') + self.get_jobs(kind='run') + self.get_jobs(kind='analysis') + self.get_jobs(kind='valgrind') + + def get_trick_host_cpu(self): + """ + Get a string of the TRICK_HOST_CPU by running trick-gte + + >>> tw = TrickWorkflow(project_top_level=this_trick, log_dir='/tmp/', trick_dir=this_trick, config_file=os.path.join(this_trick,"share/trick/trickops/tests/trick_sims.yml")) + >>> tw.get_trick_host_cpu() is not None + True + + Returns + ------- + str or None + TRICK_HOST_CPU or None if it cannot be determined + """ + gte_cmd = [os.path.join(self.trick_dir, "bin/trick-gte" ), "TRICK_HOST_CPU"] + result = run_subprocess(gte_cmd, m_stdout=subprocess.PIPE, m_stderr=subprocess.PIPE) + self.trick_host_cpu = result.stdout.strip() + if self.trick_host_cpu == None or self.trick_host_cpu == '': + tprint("ERROR: Unable to determine TRICK_HOST_CPU, you may encounter problems " + "if you continue... ", 'DARK_RED') + self.trick_host_cpu = None + return self.trick_host_cpu + + def get_sim(self, identifier): + """ + Get a Sim() instance by unique identifier. This identifier must + be either the name (top level key) in self.config_file or the path + parameter + + >>> tw = TrickWorkflow(project_top_level=this_trick, log_dir='/tmp/', trick_dir=this_trick, config_file=os.path.join(this_trick,"share/trick/trickops/tests/trick_sims.yml")) + >>> sim = tw.get_sim('SIM_ball_L1') + + Parameters + ---------- + identifier : str + name or path to sim from top level of repo, as written in the YAML file + + Returns + ------- + Sim() or None + Instance of Sim() management class matching the path/name given or None + if one cannot be found + + Raises + ------ + TypeError + If identifier is not a str + """ + if type(identifier) != str: + raise TypeError('get_sim() only accepts a Sim name or Sim path from project top level') + for sim in self.sims: + if sim.sim_dir == identifier or sim.name == identifier: + return sim + return None + + def get_sims(self, labels): + """ + Get a list of Sim() instances by label or labels listed in self.config_file + + >>> tw = TrickWorkflow(project_top_level=this_trick, log_dir='/tmp/', trick_dir=this_trick, config_file=os.path.join(this_trick,"share/trick/trickops/tests/trick_sims.yml")) + >>> sims = tw.get_sims(['unit_test']) + + Parameters + ---------- + labels : str or list + label or labels that each sim must have to be returned by this function + + Returns + ------- + list + List of Sim() instances matching the label(s) given or [] if none can be + found + + Raises + ------ + TypeError + If labels is not a str or list + """ + sims_found = [] + ls = [] + if type(labels) == str: + ls = [labels] + elif type(labels) == list: + ls = [str(l) for l in labels] + else: + raise TypeError('get_sims() only accepts a label string or list of label strings') + for sim in self.sims: + if all(l in sim.labels for l in ls): + sims_found.append(sim) + return sims_found + + def _read_config(self, config_file): + """ + Read the yaml file into a dict and return it + + + Parameters + ---------- + config_file : str + path to YAML config file to be read + + Returns + ------- + dict or None + dictionary representation of YAML content as parsed by yaml.load() + or None if parsing failed + """ + try: + with open(config_file) as file: + y = yaml.load(file) + return y + except Exception as e: + tprint("Unable to parse config file: %s\nError: %s" % (config_file,e), 'DARK_RED') + return None + + def _validate_config(self): + """ + Sanity check what we've read from the yml config file and create internal Sim() + objects which populate the self.sims [] list. Makes sure some values + exist and paths are valid locations where applicable. The self.config dict + may be modified to add missing entries as part of this process. If errors + in the config file are detected, self.config_errors is set to True + + The yaml format expected by this framework is described as follows: + + globals: + env: <-- optional literal string executed before all tests, e.g. env setup + parallel_safety: <-- strict won't allow multiple input files per RUN dir + SIM_abc: <-- required unique name for sim of interest, must start with SIM + path: <-- required SIM path relative to project top level + description: <-- optional description for this sim + labels: <-- optional list of labels for this sim, can be used to get sims + - model_x by label within the framework, or for any other project-defined + - verification purpose + build_command: <-- optional literal cmd executed for SIM_build, defaults to trick-CP + size: <-- optional estimated size of successful build output file in bytes + runs: <-- optional dict of runs to be executed for this sim, where the + RUN_1/input.py --foo: dict keys are the literal arguments passed to the sim binary + RUN_2/input.py: and the dict values are other run-specific optional dictionaries + ... described as follows ... + returns: <---- optional exit code of this run upon completion (0-255). Defaults + to 0 + compare: <---- optional list of vs. comparison strings to be + - a vs. b compared after this run is complete. This is extensible in that + - d vs. e all non-list values are ignored and assumed to be used to define + - ... an alternate comparison method in a class extending this one + analyze: <-- optional arbitrary string to execute as job in bash shell from + project top level, for project-specific post-run analysis + valgrind: <-- optional dict describing how to execute runs within valgrind + flags: <-- string of all flags passed to valgrind for all runs + runs: <-- list of literal arguments passed to the sim binary through + valgrind + non_sim_extension_example: + will: be ignored by TrickWorkflow parsing for derived classes to implement as they wish + + Any top-level key not matching the SIM naming pattern is ignored purposefully to provide + users of this framework to extend the same YAML file for other non-trick tests + + Raises + ------ + RuntimeError + If self.config has insufficient content + """ + # All error messages should trigger self.config_errors to be True + def cprint(msg, color): + self.config_errors = True + tprint(msg, color) + c = copy.deepcopy(self.config) # Make a copy for extra saftey + if not c: # If entire config is empty + msg = "ERROR Config file %s is empty. Cannot continue." % (self.config_file) + cprint(msg, 'DARK_RED') + self._cleanup() + raise RuntimeError(msg) + # Check global parameters + if 'globals' not in c or not c['globals']: # If globals section is missing or None + self.env = '' + self.parallel_safety = 'loose' + else: + if 'env' not in c['globals'] or not c['globals']['env']: # If section is missing or None + self.env = '' + else: + self.env = c['globals']['env'] + # If env section is missing or None + if 'parallel_safety' not in c['globals'] or not c['globals']['parallel_safety']: + self.parallel_safety = 'loose' + elif c['globals']['parallel_safety'] != 'loose' and c['globals']['parallel_safety'] != 'strict': + cprint( "ERROR parallel_safety value of %s in config file %s is unsupported. Defaulting to" + " 'loose' and continuing..." % (c['globals']['parallel_safety'], self.config_file), + 'DARK_RED') + self.parallel_safety = 'loose' + else: + self.parallel_safety = c['globals']['parallel_safety'] + c.pop('globals', None) # Remove to iterate on the rest of the non-global content + all_sim_paths = [] # Keep a list of all paths for uniqueness check + # Check sub-parameters of SIM entries + for s in c: + if not c[s]: # If the structure is completely empty, skip it + cprint("ERROR: %s is empty!. Continuing but skipping this entire entry from %s." + % (s, self.config_file), 'DARK_RED') + self.config.pop(s) + continue + # If the structure doesn't start with SIM, ignore it and move on + if not s.lower().startswith('sim'): + continue + if type(c[s]) is not dict: + cprint("ERROR: SIM entry %s is not a dict!. SIM definitions must start with SIM, end" + " with :, and contain the path: key-value pair. Continuing but" + " ignoring this entry from %s." + % (s, self.config_file), 'DARK_RED') + self.config.pop(s) + continue + # If optional entries missing, or None, set defaults + if 'description' not in c[s] or not c[s]['description']: + self.config[s]['description'] = self.defaults['description'] + if 'build_command' not in c[s] or not c[s]['build_command']: + self.config[s]['build_command'] = self.defaults['build_command'] + if 'size' not in c[s] or not c[s]['size']: + self.config[s]['size'] = self.defaults['size'] + # SIM dir path check + if ('path' not in c[s] or not c[s]['path'] or not + os.path.exists(os.path.join(self.project_top_level, c[s]['path'])) ): + cprint("ERROR: %s's 'path' not found. Continuing but skipping this entire entry from %s." + % (s, self.config_file), 'DARK_RED') + self.config.pop(s) + continue + # SIM dir uniquness check + if c[s]['path'] in all_sim_paths: + cprint("ERROR: %s's 'path' is not unique in %s. Continuing but skipping this sim." + % (s, self.config_file), 'DARK_RED') + self.config.pop(s) + continue + if 'labels' not in c[s] or not c[s]['labels']: + self.config[s]['labels'] = [] + # Create internal object to be populated with runs, valgrind runs, etc + thisSim = TrickWorkflow.Sim(name=s, sim_dir=self.config[s]['path'], + description=self.config[s]['description'], labels=self.config[s]['labels'], + prebuild_cmd=self.env, build_cmd=self.config[s]['build_command'], + cpus=self.cpus, size=self.config[s]['size'], log_dir=self.log_dir) + all_sim_paths.append(c[s]['path']) + # RUN sanity checks + if 'runs' in c[s]: # If it's there... + if not c[s]['runs']: # but None, remove it + self.config[s].pop('runs') + elif type( c[s]['runs']) is not dict: # but None, remove it + cprint("ERROR: Run %s is not a dict! Make sure the 'RUN...:' ends with a : in config file %s." + " Continuing but skipping this run: section..." + % (c[s]['runs'], self.config_file), 'DARK_RED') + self.config[s].pop('runs') + continue + else: # If it's there and a valid list, check paths + all_run_paths = [] # Keep a list of all paths for uniqueness check + for r in c[s]['runs']: + just_RUN = r.split(' ')[0] + just_RUN_dir = r.split('/')[0] + if not os.path.exists(os.path.join(self.project_top_level, c[s]['path'], just_RUN)): + cprint("ERROR: %s's 'run' path %s not found. Continuing but skipping this run " + "from %s." % (s, just_RUN, self.config_file), 'DARK_RED') + self.config[s]['runs'].pop(r) + continue + if just_RUN_dir in all_run_paths and self.parallel_safety == 'strict': + cprint("ERROR: %s's run directory %s is not unique in %s. With global setting " + "parallel_safety: strict, you cannot have the same RUN directory listed " + "more than once per sim. Continuing but skipping this run." % + (s, r, self.config_file), 'DARK_RED') + self.config[s]['runs'].pop(r) + continue + # if the value of : has nothing specified under it, fill out defaults + if not c[s]['runs'][r]: + self.config[s]['runs'][r] = {} + self.config[s]['runs'][r]['returns'] = 0 + self.config[s]['runs'][r]['compare'] = None + self.config[s]['runs'][r]['analyze'] = None + # Have to update the dict we're iterating b/c we check more content a dozen lines down + c[s]['runs'][r] = dict(self.config[s]['runs'][r]) + elif 'returns' not in c[s]['runs'][r]: + self.config[s]['runs'][r]['returns'] = 0 + elif (type(c[s]['runs'][r]['returns']) != int or + c[s]['runs'][r]['returns'] < 0 or c[s]['runs'][r]['returns'] > 255): + cprint("ERROR: %s's run '%s' has invalid 'returns' value (must be 0-255). " + "Continuing but assuming this run is expected to return 0 in %s." % (s, r, self.config_file), + 'DARK_RED') + self.config[s]['runs'][r]['returns'] = 0 # Default to zero + # Create internal object to be added to thisSim + thisRun = TrickWorkflow.Run(sim_dir=self.config[s]['path'], input=r, + binary= 'S_main_' + self.trick_host_cpu + '.exe', prerun_cmd=self.env, + returns=self.config[s]['runs'][r]['returns'], + valgrind_flags=None, log_dir=self.log_dir) + # Handle 'compare' option, if given, if not, assume 0 + if 'compare' not in c[s]['runs'][r]: + self.config[s]['runs'][r]['compare'] = None + elif type(c[s]['runs'][r]['compare']) != list: + pass # Deliberately leave open for workflows to extend how comparisons are defined + else: # If it's a list, make sure it fits the 'path vs. path' format + for cmp in c[s]['runs'][r]['compare']: + if ' vs. ' not in cmp: + cprint("ERROR: %s's run %s comparison '%s' does not match expected pattern. Must be of " + "form: 'path/to/log1 vs. path/to/log2'. Continuing but ignoring this comparison in %s." + % (s, r, cmp, self.config_file), 'DARK_RED') + self.config[s]['runs'][r]['compare'].remove(cmp) + continue + lhs, rhs = [ s.strip() for s in cmp.split(' vs.') ] + thisRun.add_comparison(test_data=lhs, baseline_data=rhs) + # Handle 'analysze' option, if given, if not, assume 0 + if 'analyze' not in c[s]['runs'][r]: + self.config[s]['runs'][r]['analyze'] = None + elif type(c[s]['runs'][r]['analyze']) != str: + pass # Deliberately leave open for workflows to extend how analyze is defined + else: + thisRun.add_analysis(cmd=self.config[s]['runs'][r]['analyze']) + all_run_paths.append(just_RUN_dir) + thisSim.add_run(thisRun) + # SIM's valgrind RUN path checks + if 'valgrind' in c[s]: # If it's there... + if not c[s]['valgrind']: # but None, remove it + self.config[s].pop('valgrind') + elif type( c[s]['valgrind']) is not dict: # If it's the wrong type + cprint("ERROR: Valgrind entry %s is not a dict! Make sure 'valgrind:' ends with a : in " + "config file %s. Continuing but skipping this valgrind: section..." + % (c[s]['valgrind'], self.config_file), 'DARK_RED') + self.config[s].pop('valgrind') + else: # If it's there and a valid dict + if self.this_os == 'darwin': + cprint("ERROR: Valgrind entry for %s is not valid for platform: %s in " + "config file %s. Continuing but skipping this valgrind: section..." + % (s, self.this_os, self.config_file), 'DARK_RED') + self.config[s].pop('valgrind') + else: + if 'flags' not in c[s]['valgrind']: + self.config[s]['valgrind']['flags'] = '' + if 'runs' in c[s]['valgrind']: # If it's there... + if not c[s]['valgrind']['runs']: # but None, remove entire valgrind section + cprint("ERROR: %s's valgrind section has no 'run' paths. Continuing but skipping " + "this valgrind section from %s." % (s, self.config_file), 'DARK_RED') + self.config[s].pop('valgrind') + else: + for r in c[s]['valgrind']['runs']: # If it's there and a valid list, check paths + just_RUN = r.split(' ')[0] + if not os.path.exists(os.path.join(self.project_top_level, c[s]['path'], just_RUN)): + cprint("ERROR: %s's valgrind 'run' path %s not found. Continuing but skipping " + "this run from %s." % (s, just_RUN, self.config_file), 'DARK_RED') + self.config[s]['valgrind']['runs'].remove(r) + else: + # Create internal object to be added to thisSim + vRun = TrickWorkflow.Run(sim_dir=self.config[s]['path'], input=r, + binary= 'S_main_' + self.trick_host_cpu + '.exe', + prerun_cmd=self.env, valgrind_flags=self.config[s]['valgrind']['flags'], + log_dir=self.log_dir) + thisSim.add_run(vRun) + # Done building up thisSim, store it off for later + self.sims.append(thisSim) + if len(self.sims) < 1: # At minimum, one valid SIM structure must exist + msg = ("ERROR: After validating config file, there is insufficient information to continue." + " Check the syntax in config file %s and try again." + % (self.config_file) ) + cprint(msg, 'DARK_RED') + self._cleanup() + raise RuntimeError(msg) + self._validate_config_custom(copy.deepcopy(self.config)) + + def _validate_config_custom(self, config): + """ + Customization of config file validation. Intended to be extended in derived class. + If changes are needed to the config file, derived classes should edit self.config, not config. + + Parameters + ---------- + config : dict + deep copy of self.config for reading + """ + pass + + def report(self, indent=''): + """ + Recursively report all internal information + >>> tw = TrickWorkflow(project_top_level=this_trick, log_dir='/tmp/', trick_dir=this_trick, config_file=os.path.join(this_trick,"share/trick/trickops/tests/trick_sims.yml")) + """ + [sim.report() for sim in self.sims] + + def status_summary(self): + """ + Print a summary of all jobs executed, and return 'SUCCESS' if all were successful, 'FAILURE' + if any job was not successful + """ + all_builds = self.get_jobs(kind='build') + all_runs = self.get_jobs(kind='run') + all_analysis = self.get_jobs(kind='analysis') + all_comparisons = self.get_comparisons() + all_valgrind = self.get_jobs(kind='valgrind') + + executed_builds = [ build for build in all_builds if build.get_status() != Job.Status.NOT_STARTED ] + executed_runs = [ run for run in all_runs if run.get_status() != Job.Status.NOT_STARTED ] + executed_analysis = [ a for a in all_analysis if a.get_status() != Job.Status.NOT_STARTED ] + executed_comparisons = [ c for c in all_comparisons if c.status != Job.Status.NOT_STARTED ] + executed_valgrind = [ v for v in all_valgrind if v.get_status() != Job.Status.NOT_STARTED ] + + ok_builds = [ build for build in executed_builds if build.get_status() == Job.Status.SUCCESS ] + ok_runs = [ run for run in executed_runs if run.get_status() == Job.Status.SUCCESS ] + ok_analysis = [ a for a in executed_analysis if a.get_status() == Job.Status.SUCCESS ] + ok_comparisons = [ c for c in executed_comparisons if c.status == Job.Status.SUCCESS ] + ok_valgrind = [ v for v in executed_valgrind if v.get_status() == Job.Status.SUCCESS ] + + tprint( "SUMMARY:" ) + if executed_builds: + tprint( " {0} out of {1} builds succeeded".format(len(ok_builds),len(executed_builds))) + if executed_runs: + tprint( " {0} out of {1} runs succeeded".format(len(ok_runs),len(executed_runs))) + if executed_analysis: + tprint( " {0} out of {1} analyses succeeded".format(len(ok_analysis),len(executed_analysis))) + if executed_comparisons: + tprint( " {0} out of {1} comparisons succeeded".format(len(ok_comparisons),len(executed_comparisons))) + if executed_valgrind: + tprint( " {0} out of {1} valgrind runs succeeded".format(len(ok_valgrind),len(executed_valgrind))) + + if (executed_builds == ok_builds and executed_runs == ok_runs and executed_analysis == ok_analysis + and executed_comparisons == ok_comparisons and executed_valgrind == ok_valgrind) : + return 'SUCCESS' + else: + return 'FAILURE' + + def compare(self): + """ + Execute sim.compare() on all sims in self.sims + + >>> tw = TrickWorkflow(project_top_level=this_trick, log_dir='/tmp/', trick_dir=this_trick, config_file=os.path.join(this_trick,"share/trick/trickops/tests/trick_sims.yml")) + >>> tw.compare() + True + + Returns + ------- + bool + True if any comparison failed. False if all were successful. + """ + return any([sim.compare() for sim in self.sims]) + + def get_jobs(self, kind): + """ + Return a list of Jobs() from the self.sims structure of the kind given + + >>> tw = TrickWorkflow(project_top_level=this_trick, log_dir='/tmp/', trick_dir=this_trick, config_file=os.path.join(this_trick,"share/trick/trickops/tests/trick_sims.yml")) + >>> ret = tw.get_jobs(kind='build') + >>> len(ret) + 56 + >>> ret = tw.get_jobs(kind='run') + >>> ret = tw.get_jobs(kind='valgrind') + >>> ret = tw.get_jobs(kind='analysis') + + Parameters + ---------- + kind : str + Kind of jobs to return from internal self.sims structure: 'build', 'run', 'valgrind', or 'analysis' + + Returns + ------- + list + List of jobs of given kind + """ + jobs = [] + if kind == 'build' or kind == 'builds': + jobs = [ sim.get_build_job() for sim in self.sims ] + elif kind == 'run' or kind == 'runs': + for sim in self.sims: + jobs += sim.get_run_jobs(kind='normal') + elif kind == 'valgrind' or kind == 'valgrinds': + for sim in self.sims: + jobs += sim.get_run_jobs(kind='valgrind') + elif kind == 'analysis' or kind == 'analyses' or kind == 'analyze': + for sim in self.sims: + jobs += sim.get_analysis_jobs() + else: + raise TypeError('get_jobs() only accepts kinds: build, run, valgrind, analysis') + return (jobs) + + def get_comparisons(self): + """ + Return a list of all Comparison() instances from the self.sims structure + + >>> tw = TrickWorkflow(project_top_level=this_trick, log_dir='/tmp/', trick_dir=this_trick, config_file=os.path.join(this_trick,"share/trick/trickops/tests/trick_sims.yml")) + >>> ret = tw.get_comparisons() + >>> len(ret) + 1 + + Returns + ------- + list + List of Comparison objects + """ + return ([ c for sim in self.sims for run in sim.runs for c in run.comparisons ]) + + def get_unique_comparison_dirs(self): + """ + Loop over all comparisons for all sim runs and return a tuple of unique + directory comparisons. This is a utility function since many plotting + tools accept two run directories, not paths to individual log files. + Note that if a directory doesn't exist, that element of the tuple is None + see Comparison.get_dirnames() + + >>> tw = TrickWorkflow(project_top_level=this_trick, log_dir='/tmp/', trick_dir=this_trick, config_file=os.path.join(this_trick,"share/trick/trickops/tests/trick_sims.yml")) + >>> ret = tw.get_unique_comparison_dirs() + >>> len(ret) + 1 + + Returns + ------- + list + List of (test_dir, baseline_dir) tuples where one or either element + is None if that directory does not exist + """ + all_cmp_dirnames = [] # list of tuples ( test_dir, baseline_dir ) + for sim in self.sims: + for run in sim.runs: + for cmp in run.comparisons: + all_cmp_dirnames.append(cmp.get_dirnames()) + # Reduce full list to unique list + return(list(set(all_cmp_dirnames))) + + def get_koviz_report_job(self, test_dir, baseline_dir, pres=None): + """ + Given two RUN directories, generate a error/difference report koviz Job() instance + which when executed will generate a PDF in self.log_dir + + >>> tw = TrickWorkflow(project_top_level=this_trick, log_dir='/tmp/', trick_dir=this_trick, config_file=os.path.join(this_trick,"share/trick/trickops/tests/trick_sims.yml")) + >>> ret = tw.get_koviz_report_job('share/trick/trickops/tests/baselinedata/','share/trick/trickops/tests/testdata/') + >>> type(ret[0]) is Job + True + >>> ret[1] is None + True + + Parameters + ---------- + test_dir : str + path to directory with test logged data + baseline_dir : str + path to directory with baseline logged data + pres : str or None + passthrough option to koviz's -pres (presentation) option + + Returns + ------- + Tuple + (Job() instance for the run directories given or None if error encountered, + error details or None if successful) + """ + if os.system(self.env + ' which koviz > /dev/null 2>&1') != 0: + msg = "ERROR: koviz is not found in PATH. Returning None in get_koviz_report_job()" + tprint (msg, 'DARK_RED') + return None, msg + dirs = [test_dir, baseline_dir] + for dir in dirs: + if not os.path.exists(dir): + msg = "ERROR: %s not found, Returning None in get_koviz_report_job()" % (dir) + tprint(msg, 'DARK_RED') + return None, msg + cmd = (self.env + " koviz -platform offscreen -a") + if pres: + cmd+= (" -pres %s " % pres ) + cmd+= (" -pdf %s %s %s" % (os.path.join(self.log_dir, + (unixify_string(test_dir)+'_vs_' + unixify_string(baseline_dir)) + '.pdf'), + test_dir, baseline_dir) ) + name='koviz report %s vs. %s' % (test_dir, baseline_dir) + return(Job(name=name, command=cmd, log_file=os.path.join(self.log_dir,"."+unixify_string(name)), + expected_exit_status=0), None) + + def get_koviz_report_jobs(self): + ''' + Loop over all runs for all sims and generate a koviz pdf report job for each unique + run directory comparison found. + + >>> tw = TrickWorkflow(project_top_level=this_trick, log_dir='/tmp/', trick_dir=this_trick, config_file=os.path.join(this_trick,"share/trick/trickops/tests/trick_sims.yml")) + >>> ret = tw.get_koviz_report_jobs() + >>> type(ret[0]) is list + True + >>> type(ret[0][0]) is Job + True + >>> len(ret[1]) + 0 + + Returns + ------- + Tuple + List of 'koviz' commands executed, List of errors + ''' + koviz_jobs = [] + koviz_errors = [] + all_cmp_dirnames = self.get_unique_comparison_dirs() # List of (test_dir, baseline_dir) tuples + # Generate koviz reports for the given test_dir, baseline_dir pairs + for (test_dir, baseline_dir) in all_cmp_dirnames: + if test_dir and baseline_dir: # Will be None if dir is empty + job, error = self.get_koviz_report_job(test_dir, baseline_dir, pres='error') + if job: + koviz_jobs.append(job) + if error: + koviz_errors.append(error) + return koviz_jobs, koviz_errors + + class Sim(object): + """ + Management class for sim content read from yml config file. Each highest-level + key in the dict read will become a single instance of this management class + stored in the TrickWorkflow.sims list. + """ + def __init__(self, name, sim_dir, description=None, labels=[], prebuild_cmd='', + build_cmd='trick-CP', cpus=3, size=2200000, log_dir='/tmp'): + """ + Initialize this instance. + + >>> s = TrickWorkflow.Sim(name='alloc', sim_dir=os.path.join(this_trick, 'test/SIM_alloc_test')) + >>> s.name + 'alloc' + + Parameters + ---------- + name : str + Arbitrary name of this simulation + description : str + Arbitrary description of this sim + prebuild_cmd : str + Optional string to execute immediately before sim build, e.g. env sourcing + build_cmd : str + Literal string for build command, defaults to 'trick-CP' + cpus : int + Optional Number of CPUs to give via MAKEFLAGS for sim build + size : int + Optional estimated size of successful build output file, for progress bars + log_dir: str + Directory in which log files will be written + """ + self.name = name # Name of sim + self.sim_dir = sim_dir # Path to sim directory wrt to top level of project + self.description = description # Description of sim + self.labels = labels # Options list of user-specified labels associated w/ this sim + self.prebuild_cmd = prebuild_cmd # Optional string to execute in shell immediately before building + self.build_cmd = build_cmd # Build command for sim + self.cpus = cpus # Number of CPUs to use in build + self.size = size # Estimated size of successful build output in bytes + self.log_dir = log_dir # Directory for which log file should be written + self.build_job = None # Contains Build Job instance + self.runs = [] # List of normal Run instances + self.valgrind_runs = [] # List of valgrind Run instances + self.printer = ColorStr() # Color printer utility + + def get_build_job( self): + """ + Create the FileSizeJob(Job) instance if not already created and return it for this Sim + + >>> s = TrickWorkflow.Sim(name='alloc', sim_dir=os.path.join(this_trick, 'test/SIM_alloc_test')) + >>> s.get_build_job() #doctest: +ELLIPSIS + >> s = TrickWorkflow.Sim(name='alloc', sim_dir=os.path.join(this_trick, 'test/SIM_alloc_test')) + >>> s.get_run_jobs() # A sim has no runs by default + [] + + Parameters + ------- + kind : str + 'normal' for normal runs, 'valgrind' for valgrind runs + + Returns + ------- + list + List of SingleRun() job instances + """ + if (kind == 'valgrind'): + return ([r.get_run_job() for r in self.valgrind_runs]) + else: + return ([r.get_run_job() for r in self.runs]) + + def get_analysis_jobs( self): + """ + Collect all Job() instances for all analysis across all sim runs + + >>> s = TrickWorkflow.Sim(name='alloc', sim_dir=os.path.join(this_trick, 'test/SIM_alloc_test')) + >>> s.get_analysis_jobs() # A sim has no analysis jobs by default + [] + + Returns + ------- + list() + List of Job() instances for all analyses + """ + return ([r.analysis for r in self.runs if r.analysis]) + + def get_run(self, input): + """ + Get a Run() instance by unique full input name. This is the full + string intended to be passed to the binary. + + >>> s = TrickWorkflow.Sim(name='alloc', sim_dir=os.path.join(this_trick, 'test/SIM_alloc_test')) + >>> s.get_run('RUN_none/input.py') # This sim has no runs + + Parameters + ---------- + input : str + unique full input name + + Returns + ------- + Run() or None + Instance of Run() management class matching the input given or None + if one cannot be found + + Raises + ------ + TypeError + If input is not a str + """ + if type(input) != str: + raise TypeError('get_run() only accepts the unique key representing the entire input to' + ' the sim binary. Ex: "RUN_test/input.py --flags-too"') + for run in self.runs: + if run.input == input: + return run + return None + + def get_runs(self): + """ + Get all Run() instances associated with this sim + + >>> s = TrickWorkflow.Sim(name='alloc', sim_dir=os.path.join(this_trick, 'test/SIM_alloc_test')) + >>> s.get_runs() # This sim has no runs + [] + + """ + return self.runs + + def add_run( self, run): + """ + Append a new Run() instance to the internal run lists. Appends to valgrind + list if run.valgrind_flags is not None, appends to normal run list otherwise + + >>> s = TrickWorkflow.Sim(name='alloc', sim_dir=os.path.join(this_trick, 'test/SIM_alloc_test')) + >>> r = TrickWorkflow.Run(sim_dir=os.path.join(this_trick, 'test/SIM_alloc_test'), input='RUN_test/input.py', binary='S_main_Linux_x86_64.exe') + >>> s.add_run(r) + + Parameters + ------- + run : Run() + Instance to add + """ + if (run.valgrind_flags): + self.valgrind_runs.append(run) + else: + self.runs.append(run) + + def pop_run( self, input): + """ + Remove a run by its unique self.input value + + >>> tw = TrickWorkflow(project_top_level=this_trick, log_dir='/tmp/', trick_dir=this_trick, config_file=os.path.join(this_trick,"share/trick/trickops/tests/trick_sims.yml")) + >>> s = tw.get_sim('SIM_alloc_test') + >>> r = s.pop_run('RUN_test/input.py') + >>> r.input + 'RUN_test/input.py' + >>> len(s.runs) + 0 + + Returns + ------- + Run() + Instance in this sim's runs list matching self.input + """ + for i, run in enumerate(self.runs): + if run.input == input: + return self.runs.pop(i) + + def compare( self): + """ + Run compare() on all runs for this sim + + >>> s = TrickWorkflow.Sim(name='alloc', sim_dir=os.path.join(this_trick, 'test/SIM_alloc_test')) + >>> s.compare() # This sim has no comparisons, which means all comparisons succeed + False + + Returns + ------- + bool + False if all comparisons succeed, True if any failed + """ + return any([r.compare() for r in self.runs]) + + def report(self, indent=''): + """ + Report this sims information verbosely, ignoring members that are None + + Parameters + ------- + indent : str + prepend the report with this custom string + + """ + if self.name: + tprint(indent + "Name: " + self.name) + else: + tprint(indent + self.sim_dir) + if self.description: + tprint(indent + "Description: " + str(self.description)) + if self.labels: + tprint(indent + "Labels: " + str(', '.join(self.labels))) + tprint(indent + " Build: %-20s %-60s " %(self.build_job._translate_status() + if self.build_job else printer.colorstr('NOT RUN', 'DARK_YELLOW'), self.sim_dir)) + if self.runs: + tprint(indent + " Runs:") + for run in self.runs: + run.report(indent=indent+' ') + if self.valgrind_runs: + tprint(indent + " Valgrind Runs:") + for run in self.valgrind_runs: + run.report(indent=indent+' ') + + class Run(object): + """ + Management class for run content read from yml config file. Each key in the + runs: sub-dict will become a single instance of this management class + """ + def __init__(self, sim_dir, input, binary, prerun_cmd = '', returns=0, valgrind_flags=None, + log_dir='/tmp/'): + """ + Initialize this instance. + + >>> r = TrickWorkflow.Run(sim_dir=os.path.join(this_trick, 'test/SIM_alloc_test'), input='RUN_test/input.py', binary='S_main_Linux_x86_64.exe') + >>> r.input + 'RUN_test/input.py' + + Parameters + ---------- + sim_dir : str + path to sim directory from top level of repository for this run + input : str + literal argument to sim binary representing how this run is initiated + including command-line arguments + ex: RUN_test/input.py, or RUN_test/input.py --my-arg + binary : str + Name of executable, usually S_min.. bu platform specific + prerun_cmd : str + Optional string to execute immediately before sim run, e.g. env sourcing + valgrind_flags : str + If not None, use these flag and valgrind for this run + log_dir : str + Directory in which log files will be written + """ + self.sim_dir = sim_dir # Path to sim directory wrt to top level of project for this run + self.prerun_cmd = prerun_cmd # Optional string to execute in shell immediately before running (env) + self.input = input # Full RUN.../input.py --any-flags --as-well, relative to sim_dir + self.returns = returns # Expected exit code on success for this run + self.valgrind_flags = valgrind_flags # If not None, this run is to be run in valgrind w/ these flags + self.log_dir = log_dir # Dir where all logged output will go + self.just_input = self.input.split(' ')[0] # Strip flags if any + # Derive Just the "RUN_something" part of run_dir_path + self.just_run_dir = os.path.dirname(self.just_input) + # Derive Path to run directory wrt to top level of project + self.run_dir_path = os.path.join(self.sim_dir, self.just_run_dir) + # Populated later + self.binary = binary # Name of binary + self.run_job = None # SingleRun Job instance for this run + self.comparisons = [] # List of comparison objects associated with this run + self.analysis = None # Job instance of after-run-completes custom analysis + + def add_comparison(self, test_data, baseline_data): + """ + Given two data directories, add a new Comparison() instance to self.comparisons list + + >>> r = TrickWorkflow.Run(sim_dir='test/SIM_alloc_test', input='RUN_test/input.py', binary='S_main_Linux_x86_64.exe') + >>> r.add_comparison('share/trick/trickops/tests/baselinedata/log_a.csv','share/trick/trickops/tests/testdata/log_a.csv') + + Parameters + ---------- + test_dir : str + path to file containing test logged data + baseline_dir : str + path to file containing baseline logged data + """ + comparison = TrickWorkflow.Comparison( test_data, baseline_data ); + self.comparisons.append( comparison ) + + def add_analysis(self, cmd): + """ + Given an analysis command, add it as a post-run analysis for this run + + >>> r = TrickWorkflow.Run(sim_dir='test/SIM_alloc_test', input='RUN_test/input.py', binary='S_main_Linux_x86_64.exe') + >>> r.add_analysis('echo analysis goes here') + + Parameters + ---------- + cmd : str + literal string representing command to execute post-run + """ + if self.analysis: + tprint("WARNING: Overwriting analysis definition for %s's %s" % (self.sim_dir, + self.input), 'DARK_YELLOW') + logfile = (os.path.join(self.log_dir, unixify_string(self.sim_dir)) + +'_'+ unixify_string(self.input) + '_analysis.txt') + self.analysis = Job(name=textwrap.shorten(cmd, width=90), command=self.prerun_cmd + " " +cmd, + log_file=logfile, expected_exit_status=0) + + def compare( self): + """ + Execute all internal comparisons for this run + + >>> r = TrickWorkflow.Run(sim_dir='test/SIM_alloc_test', input='RUN_test/input.py', binary='S_main_Linux_x86_64.exe') + >>> r.compare() # No comparisons means success a.k.a 0 + False + + Returns + ------- + bool + False if all comparisons succeed, True if any failed + """ + return any([c.compare() != Job.Status.SUCCESS for c in self.comparisons]) + + def report(self, indent=''): + """ + Report this run's information verbosely, ignoring members that are None + + Parameters + ------- + indent : str + prepend the report with this custom string + + """ + tprint(indent + " %-20s %s" % (self.run_job._translate_status() + if self.run_job else printer.colorstr('NOT RUN', 'DARK_YELLOW'), + self.input)) + if self.comparisons: + tprint(indent + " Run Comparisons:") + for comparison in self.comparisons: + comparison.report(indent=indent+' ') + if self.analysis: + tprint(indent + " Run Analysis:") + tprint(indent + " " + self.analysis.report()) + + def get_run_job(self): + """ + Create if necessary and Return the SingleRun() job instance + + >>> r = TrickWorkflow.Run(sim_dir='test/SIM_alloc_test', input='RUN_test/input.py', binary='S_main_Linux_x86_64.exe') + >>> j = r.get_run_job() + + Returns + ------- + SingleRun() + SingleRun() Job instance for this run + """ + if not self.run_job: + name = 'Run ' + + cmd = "%s cd %s && " % (self.prerun_cmd, self.sim_dir) + sim_name = os.path.basename(os.path.normpath(self.sim_dir)) + logfile = os.path.join(self.log_dir, unixify_string(self.sim_dir)) + + if self.valgrind_flags: + cmd += ( "valgrind %s --log-file=%s " % (self.valgrind_flags, + (os.path.join(self.log_dir, sim_name) +'_valgrind_' + + unixify_string(self.input) + '.valgrind') )) + logfile += '_valgrind' + name += 'Valgrind ' + logfile += "_" + unixify_string(self.input) + '.txt' + cmd += (" ./%s %s" % (self.binary, self.input)) + name += self.sim_dir + ' ' + self.input + + self.run_job = SingleRun(name=name, command=(cmd), + expected_exit_status=self.returns, log_file=logfile) + return (self.run_job) + + class Comparison(object): + """ + Management class for a logged data comparison + Params: + test_data: path to file that represents test data (data generated by a run) + baseline_data: path to file that represents baseline data for a run + """ + def __init__(self, test_data, baseline_data): + """ + Initialize this instance. + + >>> c = TrickWorkflow.Comparison('share/trick/trickops/tests/baselinedata/log_a.csv','share/trick/trickops/tests/testdata/log_a.csv') + + Parameters + ---------- + test_data : str + Path to a single test logged data file + baseline_data : str + Path to a single baseline logged data file + """ + self.test_data = test_data # Test data file with respect to project top level + self.baseline_data = baseline_data # Baseline data file with respect to project top level + self.status = Job.Status.NOT_STARTED # Status of comparison + self.error = None # Error details if found + self.missing = [] # List of Strings with details of missing files if any + + def compare(self): + """ + Execute an md5sum hash comparison of self.test_data vs. self.baseline_data. + + >>> c = TrickWorkflow.Comparison('share/trick/trickops/tests/baselinedata/log_a.csv','share/trick/trickops/tests/testdata/log_a.csv') + >>> c.compare() == Job.Status.FAILED + True + + Returns + ------- + Job.Status ENUM + status of the comparison: Job.Status.SUCCESS on success, Job.Status.FAILED + """ + for hs in [self.test_data, self.baseline_data]: + if not os.path.exists(hs) : + self.missing.append(hs) + self.status = Job.Status.FAILED + if self.missing: + return self.status + if (hashlib.md5(open(self.test_data,'rb').read()).hexdigest() != + hashlib.md5(open(self.baseline_data,'rb').read()).hexdigest()): + self.status = Job.Status.FAILED + else: + self.status = Job.Status.SUCCESS + return self.status + + def get_status(self): + """ + Return the status member ENUM + + >>> c = TrickWorkflow.Comparison('share/trick/trickops/tests/baselinedata/log_a.csv','share/trick/trickops/tests/testdata/log_a.csv') + >>> c.get_status() == Job.Status.NOT_STARTED + True + + Returns + ------- + Job.Status ENUM + status of the comparison: Job.Status.SUCCESS on success, Job.Status.FAILED + on failure, Job.Status.NOT_STARTED if compare() hasn't been run + """ + return self.status + + def _translate_status(self): + """ + Utility function that takes in a status and colors it, for easier reporting + """ + text, color = { + Job.Status.NOT_STARTED: ('NOT RUN', 'DARK_YELLOW'), + Job.Status.SUCCESS: ('OK', 'DARK_GREEN'), + Job.Status.FAILED: ('FAIL', 'DARK_RED') }[self.get_status()] + return printer.colorstr(text, color) + + def get_dirnames(self): + """ + Get a tuple of this comparison's test directory and baseline directory, + not including the actual filename. If either directory does not exist, + return None for that element of the tuple + + >>> os.chdir(this_trick) + >>> c = TrickWorkflow.Comparison('share/trick/trickops/tests/baselinedata/log_a.csv','share/trick/trickops/tests/testdata/log_a.csv') + >>> c.get_dirnames() + ('share/trick/trickops/tests/baselinedata', 'share/trick/trickops/tests/testdata') + + Returns + ------- + tuple + ( , ) + """ + test_dirname = os.path.dirname(self.test_data) + baseline_dirname = os.path.dirname(self.baseline_data) + if not os.path.exists(test_dirname): + test_dirname = None + if not os.path.exists(baseline_dirname): + baseline_dirname = None + return (test_dirname, baseline_dirname) + + def report (self, indent=''): + """ + Report this comparison's information verbosely + + Parameters + ------- + indent : str + prepend the report with this custom string + + """ + string = indent + "%-22s %s" % (self._translate_status(), self.test_data) + if self.test_data in self.missing: + string += printer.colorstr(" (missing)", 'DARK_RED') + string += " vs. %s" % (self.baseline_data) + if self.baseline_data in self.missing: + string += printer.colorstr(" (missing)", 'DARK_RED') + tprint(string) + +class SimulationJob(Job): + """ + A Job which is a Trick simulation. + """ + + __metaclass__ = abc.ABCMeta + + @abc.abstractmethod + def _create_variables(self): + """ + Create and return a list of Variables to periodically sample + via the sim's variable server. + + Returns + ------- + [variable_server.Variable] + A list of variables to periodically sample. + """ + pass + + @abc.abstractmethod + def _connected_string(self): + """ + Get a string displaying status information. This method will + be only called after this instance has successfull connected + to the sim. + + Returns + ------- + str + A string containing status information. + """ + pass + + @abc.abstractmethod + def _connected_bar(self): + """ + Get a progress bar representing the current progress. This + method will be only called after this instance has successfull + connected to the sim. + + Returns + ------- + str + A progress bar representing the current progress. + """ + pass + + def start(self): + """ + Start this Simulation job. Attempts a connection to the sim variable server + in another thread after calling the base class Job() start() method + """ + super(SimulationJob, self).start() + self._connected = False + + # Finding a sim via PID can take several seconds. + # Do it on another thread so this method can return immediately. + # Sims that complete especially quickly may terminate before we + # can connect, so use a timeout and quit trying if the subprocess + # has finished. + def connect(): + import psutil + while self.get_status() is self.Status.RUNNING: + # The base-class start() call will have already populated self._process.pid, + # but that pid may be incompatible with the upcoming find_simulation call + # if the command spawns child processes (e.g. self._command = + # 'cd somewhere && ./S_main..' will result in a child of self._process.pid + # actually running S_main. So here we search self._process.pid's process tree + # for the first process whose name matches the typical S_main naming pattern: + # '.*S_main.. and give find_simulation() that PID, defaulting to + # self._process.pid if not found + sim_pid = self._process.pid + try: + children = psutil.Process(self._process.pid).children(recursive=True) + outerbreak = False + for child in children: + for i in child.cmdline(): + if re.search('.*S_main', i): + sim_pid = child.pid + outerbreak = True + break + if outerbreak: + break + except Exception as e: + sim_pid = self._process.pid + # Now connect to the sim_pid + try: + self._variable_server = variable_server.find_simulation( + pid=sim_pid, timeout=5) + self._variable_server.add_variables( + *self._create_variables()) + self._variable_server.set_period(0.1) + self._connected = True + return + except socket.timeout: + pass + + thread = threading.Thread(target=connect, + name='Looking for ' + self.name) + thread.daemon = True + thread.start() + + def get_status_string_line_count(self): + return super(SimulationJob, self).get_status_string_line_count() + 1 + + def _not_started_string(self): + return super(SimulationJob, self)._not_started_string() + '\n' + + def _running_string(self): + elapsed_time = super(SimulationJob, self)._running_string() + + if self._connected: + return (elapsed_time + self._connected_string() + '\n' + + self._connected_bar()) + + return (elapsed_time + '\n' + + create_progress_bar(0, 'Connecting')) + + def _success_string(self): + text = super(SimulationJob, self)._success_string() + if self._connected: + text += self._connected_string() + return text + '\n' + self._success_progress_bar + + def _failed_string(self): + text = super(SimulationJob, self)._failed_string() + if self._connected: + text += self._connected_string() + return text + '\n' + self._failed_progress_bar + + def die(self): + try: + self._variable_server.close() + except: + pass + super(SimulationJob, self).die() + + def __del__(self): + try: + self._variable_server.close() + except: + pass + +class SingleRun(SimulationJob): + """ + A regular (not Monte Carlo) sim. + """ + + def _create_variables(self): + self._tics = variable_server.Variable( + 'trick_sys.sched.time_tics', type_=float) + self._tics_per_sec = variable_server.Variable( + 'trick_sys.sched.time_tic_value', type_=float) + self._terminate_time = variable_server.Variable( + 'trick_sys.sched.terminate_time', type_=float) + return self._tics, self._tics_per_sec, self._terminate_time + + def _connected_string(self): + return ' {0} {1}'.format( + self._sim_time(), self._average_speed()) + + def _connected_bar(self): + progress = self._tics.value / self._terminate_time.value + return create_progress_bar( + progress, '{0:.1f}%'.format(100 * progress)) + + def _sim_time(self): + """ + Get a string displaying the sim time. + + Returns + ------- + str + A string for displaying sim time. + """ + return 'Sim Time: {0:7.1f} sec'.format( + self._tics.value / self._tics_per_sec.value) + + def _average_speed(self): + """ + Get a string displaying the average speedup. + + Returns + ------- + str + A string for displaying the ratio of sime time to real time. + """ + elapsed_time = ( + (self._stop_time if self._stop_time else time.time()) + - self._start_time) + return 'Average Speed: {0:4.1f} X'.format( + self._tics.value / self._tics_per_sec.value / elapsed_time) + +class MonteCarlo(SimulationJob): + """ + A Monte Carlo simulation. + + TODO: This is not currently tested or supported by the TrickWorkflow + management layer. However, this class has been used in a project in + the 2017-2020 timeframe and should still be functional. Reason for + not currently supporting it is mostly because the biggest users of + Trick monte-carlo have already moved away from it's internal master/ + slave architecture so there isn't much of a need. + """ + + def _create_variables(self): + self._total_runs = variable_server.Variable( + 'trick_mc.mc.actual_num_runs', type_=int) + self._finished_runs = variable_server.Variable( + 'trick_mc.mc.num_results', type_=int) + self._num_slaves = variable_server.Variable( + 'trick_mc.mc.num_slaves', type_=int) + return self._total_runs, self._finished_runs, self._num_slaves + + def _connected_string(self): + return ' {0} {1}'.format( + self._slave_count(), self._run_status()) + + def _connected_bar(self): + if self._total_runs.value > 0: + progress = (float(self._finished_runs.value) + / self._total_runs.value) + else: + progress = 0.0 + return create_progress_bar( + progress, '{0:.0f}%'.format(100 * progress)) + + def _slave_count(self): + return 'Slaves: {0:5d}'.format(self._num_slaves.value) + + def _run_status(self): + return 'Completed Runs: {0:>14}'.format('{0}/{1}'.format( + self._finished_runs.value, self._total_runs.value)) diff --git a/share/trick/trickops/WorkflowCommon.py b/share/trick/trickops/WorkflowCommon.py new file mode 100644 index 000000000..c49bbfd7b --- /dev/null +++ b/share/trick/trickops/WorkflowCommon.py @@ -0,0 +1,811 @@ +""" +Collection of common utility functions useful to software projects needing +scripting for testing. The WorkflowCommon class is intended to be inherited +from and customized on a per-project basis. Use execute_jobs() to manage +subprocesses to get progress bars and curses display logic for free! +""" + +import argparse +import curses, textwrap +import pdb, sys, datetime, time, socket, stat +import subprocess, signal, logging +import os, re, collections +import multiprocessing, platform +from contextlib import contextmanager +from ColorStr import ColorStr +from pathlib import Path + +# Create a global color printer +printer = ColorStr() + +# UTILITY FUNCTIONS +def run_subprocess(command, m_shell=False, m_cwd=None, m_stdout=subprocess.PIPE, m_stderr=subprocess.PIPE): + """ + Utility method for running a subprocess and returning stdout, stderr, and return code + + >>> result = run_subprocess(command='echo hi', m_shell=subprocess.PIPE, m_stdout=subprocess.PIPE) + >>> result.code + 0 + >>> result.stdout #doctest: +ELLIPSIS + 'hi... + + Parameters + ---------- + command : str + command to be run in subprocess instance + m_shell : bool + passthrough to subprocess shell parameter + m_cwd : bool + passthrough to subprocess cwd parameter + m_stdout : bool + passthrough to subprocess stdout parameter + m_stderr : bool + passthrough to subprocess stderr parameter + + Returns + ------- + namedtuple + Collection of process's final exit code, stdout, and stderr + """ + result = collections.namedtuple("Process", ["code", "stdout", "stderr"]) + p = subprocess.Popen(command, shell=m_shell, cwd=m_cwd, stdout=m_stdout, stderr=m_stderr) + result.stdout, result.stderr = p.communicate() + if (result.stdout != None): + result.stdout = result.stdout.decode(errors='ignore') + if (result.stderr != None): + result.stderr = result.stderr.decode(errors='ignore') + result.code = p.returncode + return result + +def validate_output_file(filename): + """ + Utility method for validating a file intended to contain some TBD content + can be created and the subdirectories to that path are created if they + don't already exist. + + >>> filename = validate_output_file('/tmp/aoeifmeganrsdfalk/b/c/d/foo.txt') + >>> filename + '/tmp/aoeifmeganrsdfalk/b/c/d/foo.txt' + + Parameters + ---------- + filename : str + Path to file or directory to validate + + Returns + ------- + str + Full path to given file or dir + """ + filename = os.path.abspath(filename) + os.makedirs(os.path.dirname(filename), exist_ok=True) + Path(filename).touch(exist_ok=True) + return (filename) + +def tprint(line, color='ENDC', verbosity='INFO'): + """ + Utility method for writing text to both the log and stdout, with optional + color for stdout + + Parameters + ---------- + line : str + Line of string to both print and log + color : str + Color/style of text. Options are listed in Colorstr() class. + """ + colorLine = printer.colorstr(line, color) + sys.stdout.write(colorLine+"\n") + if verbosity == 'INFO': + logging.info(line) + elif verbosity == 'DEBUG': + logging.debug(line) + elif verbosity == 'ERROR': + logging.error(line) + elif verbosity == 'WARNING': + logging.warning(line) + else: + pass + +def create_progress_bar(fraction, text): + """ + Utility method that creates a text-based progress bar + + >>> bar = create_progress_bar(fraction=0.5, text='hello') + >>> bar + '[=================================== hello ]' + + Parameters + ---------- + text : str + Text string to embed inside progress bar + fraction : float + Fraction of 1.0 representing how far progress bar is + + Returns + ------- + str + A text-based progress bar string of length 80 + """ + text = ' ' + text + ' ' + length = len(text) + bar = list('[{0:<78}]'.format('=' * min(78, int(round(fraction * 78))))) + index = int((len(bar) - length) / 2) + bar[index : index + length] = text + return ''.join(bar) + +def sanitize_cpus(num_cpus, num_tasks, fallback_cpus): + """ + Bounds num_cpus such that it is: + - no less than 1 + - no greater than the lesser of: + - num_tasks + - the number of logical CPUs (or fallback_cpus if that could + not be determined) + + Parameters + ---------- + num_cpus : int + the value to be sanitized + num_tasks : int + the number of runs + fallback_cpus : int + the upper bound to use when the number of logical CPUs cannot + be determined + + Returns + ------- + (int, str) + a tuple of: + - the sanitized value + - a description of the boundary violated (may be None) + """ + if num_cpus < 1: + return (1, 'minimum allowable value') + else: + try: + maximum = multiprocessing.cpu_count() + source = 'number of logical CPUs' + except: + maximum = 16 + source = 'maximum allowed value' + + if num_tasks < maximum: + maximum = num_tasks + source = 'number of runs' + + if num_cpus > maximum: + return (maximum, source) + + return (num_cpus, None) + +def unixify_string(string): + """ + Sanitizes a string making it nice for unix by processing special characters. + + Removes: ()!? and spaces, + Replaces with underscores: '/' and ' ' + + Parameters + ---------- + string : str + the string to sanitize + + Returns + ------- + str + the sanitized string + """ + return re.sub("['/]", '_', re.sub('[()!?,]', '', string)).replace(' ', '_') + +# CLASSES +class Job(object): + """ + Manages a given command intended to be run as a subprocess and provides methods + for getting status information. More specific types of Jobs should inherit from + this base clasee. + """ + enums = ['NOT_STARTED', 'RUNNING', 'SUCCESS', 'FAILED'] + Status = collections.namedtuple('Status', enums)(*(range(len(enums)))) + + _success_progress_bar = create_progress_bar(1, 'Success') + _failed_progress_bar = create_progress_bar(1, 'Failed') + + def _translate_status(self): + """ + Utility function that accepts a Job.Status, colors and formats it with exit + codes if FAIL. + """ + text, color = { + Job.Status.NOT_STARTED: ('NOT RUN', 'DARK_YELLOW'), + Job.Status.SUCCESS: ('OK', 'DARK_GREEN'), + Job.Status.FAILED: ('FAIL:' + str(self._expected_exit_status) + '/' + + str(self._exit_status), 'DARK_RED') }[self.get_status()] + return printer.colorstr(text, color) + + def __init__(self, name, command, log_file, expected_exit_status=0): + """ + Initialize this instance. + + Parameters + ---------- + name : str + The name of this job. + command : str + The command to execute when start() is called. + log_file : str + The file to which to write log information. + """ + self.name = name + self._command = command + self.log_file = log_file + self._log_file = None + self._process = None + self._start_time = None + self._stop_time = None + self._exit_status = None + self._expected_exit_status = expected_exit_status + + def start(self): + """ + Start this job. + """ + logging.debug('Executing command: ' + self._command) + self._start_time = time.time() + self._log_file = open(self.log_file, 'w') + self._process = subprocess.Popen( + self._command, stdout=self._log_file, stderr=self._log_file, + stdin=open(os.devnull, 'r'), shell=True, preexec_fn=os.setsid, + close_fds=True) + + def get_status(self): + """ + Get the current Job status. + + Returns + ------- + Status + Status.NOT_STARTED + This job has yet to be started. + Status.RUNNING + This job is running. + Status.SUCCESS + This job completed with an exit status of zero. + Status.FAILED + This job completed with a non-zero exit status. + """ + if self._process is None: + return self.Status.NOT_STARTED + + self._exit_status = self._process.poll() + if self._exit_status is None: + return self.Status.RUNNING + + if self._stop_time is None: + self._stop_time = time.time() + + return self.Status.SUCCESS if self._exit_status is self._expected_exit_status else self.Status.FAILED + + def get_expected_exit_status(self): + return self._expected_exit_status + + def get_exit_status(self): + return self._exit_status + + def get_status_string_line_count(self): + """ + Get the constant number of lines in the status string. + + Subclasses should override this if they add lines to the status string. + + Returns + ------- + The number of lines in the status string. + """ + return 1 + + def get_status_string(self): + """ + Get a string containing status information. + + ------- + str + Details of this job's progress. + """ + status = self.get_status() + if status is self.Status.NOT_STARTED: + return self._not_started_string() + if status is self.Status.RUNNING: + return self._running_string() + if status is self.Status.SUCCESS: + return self._success_string() + return self._failed_string() + + def _not_started_string(self): + """ + Get a string to display before this Job has been started. + + Returns + ------- + str + A string to be displayed when in the NOT_STARTED state. + """ + return 'Not started yet' + + def _running_string(self): + """ + Get a string to display while this Job is running. + + Returns + ------- + str + A string to be displayed when in the RUNNING state. + """ + return 'Elapsed Time: {0:7.1f} sec'.format( + time.time() - self._start_time) + + def _success_string(self): + """ + Get a string to display when this Job has finished successfully. + + Returns + ------- + str + A string to be displayed when in the SUCCESS state. + """ + return self._done_string() + + def _failed_string(self): + """ + Get a string to display when this Job has failed. + + Returns + ------- + str + A string to be displayed when in the FAILED state. + """ + return self._done_string() + + def _done_string(self): + """ + This class uses the same string for SUCCESS and FAILED, but + subclasses may differentiate. + + Returns + ------- + str + A string to be displayed when this Job is done. + """ + return 'Total Time: {0:7.1f} sec'.format( + self._stop_time - self._start_time) + + + def report( self, indent='', width=100): + """ + Return a colored report string for this job. + + Returns + ------- + str + A olored report string + """ + if self.get_status() == Job.Status.NOT_STARTED: + prepend = printer.colorstr('NOT RUN ', 'DARK_YELLOW') + elif self.get_expected_exit_status() == self.get_exit_status(): + prepend = printer.colorstr('OK ', 'DARK_GREEN') + else: + prepend = printer.colorstr('FAIL:%-10s ' % + (str(self.get_exit_status())+'/'+str(self.get_expected_exit_status())) , 'DARK_RED') + reportStr = indent + prepend + (" %-s" % ( textwrap.shorten(self.name, width=width-10))) + return reportStr + + + def die(self): + """ + Immediately kill this Job and all processes in its process group + by sending them the SIGKILL signal. + """ + try: + os.killpg(os.getpgid(self._process.pid), signal.SIGKILL) + self._process.wait() + except: + pass + +class FileSizeJob(Job): + """ + A build job is a subclass of Job() which estimates its progress via file size. + """ + def __init__(self, name, command, log_file, size): + """ + Initialize this instance. + + Parameters + ---------- + name : str + The name of this job. + command : str + The command to execute when start() is called. + log_file : str + The file to which to write log information. + size : int + The expected number size in bytes of the log_file when the + build is complete. Used to estimate progress. + """ + super(FileSizeJob, self).__init__(name, command, log_file) + self.size = size + + def get_status_string_line_count(self): + return super(FileSizeJob, self).get_status_string_line_count() + 1 + + def _not_started_string(self): + return super(FileSizeJob, self)._not_started_string() + '\n' + + def _running_string(self): + progress = os.path.getsize(self._log_file.name) / float(self.size) + return (super(FileSizeJob, self)._running_string() + '\n' + + create_progress_bar(progress, '{0:.1f}%'.format(100 * progress))) + + def _success_string(self): + return (super(FileSizeJob, self)._success_string() + '\n' + + self._success_progress_bar) + + def _failed_string(self): + return (super(FileSizeJob, self)._failed_string() + '\n' + + self._failed_progress_bar) + + +class WorkflowCommon: + """ + Base class for a typical software workflow + Capabilies this base class provides: + * Storage of global environment associated with this workflow + * Execute particular tests (Jobs) in parallel and collect their exit codes + * Provide log directory where output for jobs can be written + * Get installed packages, host, and other OS information + """ + def __init__( self, project_top_level, log_dir='/tmp/', log_level=logging.DEBUG, env='', quiet=False ): + """ + Initialize this instance. + + Parameters + ---------- + project_top_level : str + Path to the top level of this project a.k.a root directory + log_dir : str + Path to directory where all test log output will be written + log_file : logging ENUM + Logging level to use for logging.basicConfig + env : str + Literal string representing what should be run before all tests to provide + the project specific environment. Typically "source .bashrc;" or similar + quiet : bool + Flag for keeping verbose output to a minimum. Suppresses all progress bars + when true which is useful for running in a CI system where stdin isn't + available + """ + self.project_top_level = project_top_level + self.log_dir = log_dir # Where all logged output will go + # self.log is where all logging for python script layer goes + self.log = validate_output_file( os.path.join(self.log_dir,'log.' + + unixify_string(str(datetime.datetime.now()).replace(':','-')) + '.txt')) + self.env = env # Project environment literal string, e.g. "source bashrc" + self.creation_time = time.time() # When this instance was created + self.host_name = socket.gethostname() + self.platform = sys.platform + self.this_os = None # Intended to be retreived on on demand + # Suppress all normal printing, excluding errors/warnings of course + if quiet == None or quiet == False: + self.quiet = False + else: + self.quiet = True + logging.basicConfig(filename=self.log, level=log_level) + os.chdir(self.project_top_level) # Automatically chdir to top of project + + def _cleanup(self): + """ + Cleanup action to be used if exception is raised + + Removes the logging file. + """ + os.remove(self.log) + + def get_installed_packages(self, verbose=False): + """ + Utility function that returns a list of installed packages queried from the host + or None if packages cannot be determined + + Parameters + ---------- + verbose : bool + Flag for printing verbose output if packages can't be found + + Returns + ------- + list or None + A list of installed packages, or None if they cannot be determined + """ + self.this_os = self.get_operating_system() + if self.this_os == 'debian': + list_cmd = "dpkg-query -W -f='${binary:Package}\n'" + elif self.this_os == 'redhat': + list_cmd = "rpm -qa " + else: + return None + + result = run_subprocess(command=list_cmd, m_shell=True, + m_stdout=subprocess.PIPE, m_stderr=subprocess.PIPE) + if result.code != 0: + if verbose: + tprint("ERROR: Trouble finding platform packages, error:\n" + result.stderr, 'DARK_RED') + return None + return(result.stdout.strip().split("\n")) + + def get_operating_system(self): + """ + Get a lowercase string representing the operating system of this machine + + Returns + ------- + str + Returns one of these strings: debian, redhat, windows, darwin, unknown + """ + # Windows is easy + if platform.system().lower() == 'windows': + return 'windows' + # Darwin (Mac OSX) is easy + if platform.system().lower() == 'darwin': + return 'darwin' + # Linux is hard + # NOTE: sys.platform will not distinguish between flavors of linux and platform.linux_distribution() + # doesn't exist on all linux distributions! Also we can't rely on 'import distro' because it's + # only pip-installable at the moment. -Jordan 2/2021 + # Determine platform the old fashioned way, by seeing what package manager exists + rpm_exist = not run_subprocess(command='which rpm', m_shell=True, + m_stdout=subprocess.PIPE, m_stderr=subprocess.PIPE).code + apt_exist = not run_subprocess(command='which apt', m_shell=True, + m_stdout=subprocess.PIPE, m_stderr=subprocess.PIPE).code + if rpm_exist: + return 'redhat' + elif apt_exist: + return 'debian' + else: + return 'unknown' + + def execute_jobs(self, jobs, max_concurrent=None, header=None): + """ + Run jobs, blocking until all have returned. + + Parameters + ---------- + jobs : iterable of Job + The jobs to run. + max_concurrent : int + The maximum number of jobs to execute simultaneously. + header : str + Header text. + + Returns + ------- + bool + True if any job failed or was not run. + False if all jobs completed successfully. + """ + if not os.environ.get('TERM') and not self.quiet: + tprint( + 'The TERM environment variable must be set when the command\n' + 'line option --quiet is not used. This is usually set by one\n' + "of the shell's configuration files (.profile, .cshrc, etc).\n" + 'However, if this was executed via a non-interactive,\n' + "non-login shell (for instance: ssh ''), it\n" + 'may not be automatically set.', 'DARK_RED') + return True + + num_jobs = len(jobs) + if max_concurrent is None or max_concurrent < 1: + max_concurrent = num_jobs + + if header: + header += '\n' + else: + header = '' + + header += ( + 'Executing {0} total jobs, running up to {1} simultaneously.\n' + .format(num_jobs, max_concurrent) + + 'Press CTRL+C to terminate early.\n') + + logging.info(header) + + # Define the meat of this function in an inner function. + # This inner function will be called via curses.wrapper if + # status output is enabled. Otherwise, it will be called + # directly. See below. + def execute(stdscr=None): + + # stdscr is passed via curses.wrapper + if stdscr: + # Turn off the cursor. Not all terminals may support + # this. + try: + curses.curs_set(False) + except curses.error: + pass + + # Configure colors. Not all terminals may support + # this. + try: + curses.start_color() + curses.use_default_colors() + curses.init_pair(1, curses.COLOR_RED, -1) + curses.init_pair(2, curses.COLOR_GREEN, -1) + use_colors = True + except curses.error: + use_colors = False + + # Cause getch to be non-blocking. The arrow keys and + # mouse wheel are used to scroll the pad. We don't + # want to hang if the user doesn't type anything. + stdscr.timeout(0) + + # Nothing will be displayed without an initial call + # to refresh. + stdscr.refresh() + + # Create a pad for the header. It must have enough + # lines to contain all the content we intend to + # write. Text longer than the width wraps, consuming + # extra lines, so pick a realy big width that isn't + # likely to cause wrapping. We also need a final + # additional line for the cursor to end on. + header_pad = curses.newpad(header.count('\n') + 1, 1000) + header_pad.addstr(header) + + # Create a pad for the status. + # The total line count is: + # all job status strings + # + a line for each job name + # + a blank line after each status string + # + a final line for the cursor to end on + status_pad = curses.newpad( + sum(job.get_status_string_line_count() for job in jobs) + + 2 * len(jobs) + 1, + 1000) + + # The top visible status pad line. + # Used for scrolling. + top_line = 0 + header_height = header_pad.getmaxyx()[0] + status_height = status_pad.getmaxyx()[0] + + while any(job.get_status() in + [job.Status.NOT_STARTED, job.Status.RUNNING] + for job in jobs): + + # Start waiting jobs if cpus are available + waitingJobs = [job for job in jobs + if job.get_status() is job.Status.NOT_STARTED] + + if waitingJobs: + available_cpus = max_concurrent - sum(1 for job in jobs + if job.get_status() is job.Status.RUNNING) + + for i in range(min(len(waitingJobs), available_cpus)): + waitingJobs[i].start() + + # display the status if enabled + if stdscr: + status_pad.erase() + for i, job in enumerate(jobs): + # print the name + status_pad.addstr('Job {0:{width}d}/{1}: '.format( + i + 1, num_jobs, width=len(str(num_jobs)))) + status_pad.addstr(job.name + '\n', curses.A_BOLD) + + # print the status string + if use_colors: + # color the status string + status = job.get_status() + if status is job.Status.FAILED: + color = curses.color_pair(1) + elif status is job.Status.SUCCESS: + color = curses.color_pair(2) + else: + color = curses.color_pair(0) + status_pad.addstr( + job.get_status_string() + '\n\n', color) + else: + status_pad.addstr( + job.get_status_string() + '\n\n') + + # handle scrolling + while True: + key = stdscr.getch() + if key == -1: + # no input + break + if key == curses.KEY_UP: + top_line -= 1 + elif key == curses.KEY_DOWN: + top_line += 1 + + # prevent scrolling beyond the bounds of status_pad + screen_height, screen_width = stdscr.getmaxyx() + top_line = max( + 0, + min(top_line, + status_height - 2 - (screen_height - header_height))) + + # Resizing the terminal can cause the actual + # screen width or height to become smaller than + # what we already got from getmaxyx, resulting + # in a curses.error in these calls. Note that + # even calling getmaxyx again right here isn't + # fool-proof. Resizing is asynchronous (curses + # responds to it via a signal handler), so the + # size can always change between when we get it + # and when we use it. Best to just use what we + # have and ignore errors. + try: + header_pad.noutrefresh( + 0, 0, 0, 0, screen_height - 1, screen_width - 1) + status_pad.noutrefresh( + top_line, 0, header_height, 0, + screen_height - 1, screen_width - 1) + except curses.error: + pass + curses.doupdate() + + # take a nap + time.sleep(0.1) + # When done clear everything, without this subsequent calls + # to execute_jobs can show previous status bars if the number + # of jobs is less on the subsequent executions + if not self.quiet: + stdscr.clear() + + try: + if not self.quiet: + # wrapper takes care of initializing the terminal and + # restores it to a useable state regardless of how + # execute exits (even via exception) + curses.wrapper(execute) + else: + # not using curses, just call execute + execute() + + except BaseException as exception: + logging.exception('') + tprint( + 'An exception occurred. See the log for details.\n\n' + ' ' + repr(exception) + "\n\n" + 'Terminating all jobs. Please wait for cleanup to finish. ' + 'CTRL+C may leave orphaned processes.', 'DARK_RED', + 'ERROR') + + # kill all the jobs + for job in jobs: + job.die() + tprint('All jobs terminated.\n', 'DARK_RED') + + # print summary + summary = 'Job Summary\n' + for i, job in enumerate(jobs): + summary += 'Job {0:{width}d}/{1}: {2}\n{3}\n'.format( + i + 1, num_jobs, job.name, job.get_status_string(), + width=len(str(num_jobs))) + + logging.info(summary) + + for job in jobs: + text, color = { + job.Status.NOT_STARTED: ('was not run', 'GREY40'), + job.Status.SUCCESS: ('succeeded', 'DARK_GREEN'), + job.Status.FAILED: ('failed', 'DARK_RED') + }[job.get_status()] + + text = job.name + ' ' + text + + # Print the summary status even if self.quiet is True + tprint(text, color) + + return any(job.get_status() is not job.Status.SUCCESS for job in jobs) diff --git a/share/trick/trickops/requirements.txt b/share/trick/trickops/requirements.txt new file mode 100644 index 000000000..6a4f9f2d3 --- /dev/null +++ b/share/trick/trickops/requirements.txt @@ -0,0 +1,2 @@ +PyYAML +psutil diff --git a/share/trick/trickops/tests/.gitignore b/share/trick/trickops/tests/.gitignore new file mode 100644 index 000000000..e82b6da7a --- /dev/null +++ b/share/trick/trickops/tests/.gitignore @@ -0,0 +1 @@ +*_doctest_log.txt diff --git a/share/trick/trickops/tests/__init__.py b/share/trick/trickops/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/share/trick/trickops/tests/baselinedata/log_a.csv b/share/trick/trickops/tests/baselinedata/log_a.csv new file mode 100644 index 000000000..b48d10658 --- /dev/null +++ b/share/trick/trickops/tests/baselinedata/log_a.csv @@ -0,0 +1,7 @@ +sys.exec.out.time {s}, myvar {--} + 0, 0.1 + 1, 0.2 + 2, 0.3 + 3, 0.4 + 4, 0.5 + diff --git a/share/trick/trickops/tests/empty.yml b/share/trick/trickops/tests/empty.yml new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/share/trick/trickops/tests/empty.yml @@ -0,0 +1 @@ + diff --git a/share/trick/trickops/tests/errors.yml b/share/trick/trickops/tests/errors.yml new file mode 100644 index 000000000..e61b50def --- /dev/null +++ b/share/trick/trickops/tests/errors.yml @@ -0,0 +1,33 @@ +# YAML file with many errors for unit testing +# Global configuration parameters + +globals: + parallel_safety: unsupported_value + +extension_example: + should: be ignored by this framework + +# This sim exists, but has duplicate run entries which is an error +SIM_ball_L1: + path: trick_sims/Ball/SIM_ball_L1 + size: 6000 + runs: + RUN_test/input.py: + RUN_test/input.py: + +# This sim exists, but it's RUN does not +SIM_alloc_test: + path: test/SIM_alloc_test + runs: + RUN_buddy/input.py: + +# This one is has duplicate non-unique path which is an error +SIM_L1_ball: + path: trick_sims/Ball/SIM_ball_L1 + +# This sim doesn't exist +SIM_foobar: + path: test/SIM_foobar + runs: + RUN_hi/input.py: + diff --git a/share/trick/trickops/tests/run_tests.py b/share/trick/trickops/tests/run_tests.py new file mode 100755 index 000000000..02b94b2dd --- /dev/null +++ b/share/trick/trickops/tests/run_tests.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python3 +import argparse +import unittest +import os +import sys +import logging + +from io import StringIO + +import os, sys, pdb + +this_dir = os.path.abspath(os.path.dirname(__file__)) +sys.path.append(os.path.join(this_dir, '../')) +from WorkflowCommon import * + +def run_tests(args): + + loader = unittest.TestLoader() + + suites = loader.discover(start_dir=this_dir, pattern='test.py', top_level_dir=this_dir) + overall_suite = unittest.TestSuite() + for test_suite in suites: + if test_suite._tests: + overall_suite.addTests(test_suite) + + logger = logging.getLogger() + logger.handlers = [] + logger.disabled = True + + # Run all unit tests + print("Executing all python unit test suites...") + runner = unittest.TextTestRunner(verbosity=args.verbosity, buffer=True) + ut_results = runner.run(overall_suite) + + # Run all doc tests by eating our own dogfood + doctest_files = ['TrickWorkflow.py', 'WorkflowCommon.py'] + wc = WorkflowCommon(this_dir, quiet=True) + jobs = [] + log_prepend = '_doctest_log.txt' + for file in doctest_files: + job = ( Job(name='Running doctest on ' + file, command='python -m doctest -v ../%s' % file, + log_file=file+log_prepend)) + jobs.append(job) + wc.execute_jobs([job]) + + print("doctest verbose output can be found in *%s" % (log_prepend)) + return (len(ut_results.failures) > 0 or len(ut_results.errors) > 0 or + any(job.get_status() is not job.Status.SUCCESS for job in jobs)) + + +# Main execution block +if __name__ == '__main__': + # Create our Argument Parser + parser = argparse.ArgumentParser( + description="Execute all trickops python unit and doc tests. Must be run from this directory" + " inside a python3 environment with dependencies described by trickops/requirements.txt. " + " If koviz is not on your path, koviz-related unit tests will fail.") + parser.add_argument("-v", "--verbosity", type=int, + help="Reporting level when running from the console.", choices=[1, 2, 3, 4, 5], default=1) + + # Parse the arguments + args = parser.parse_args() + + # Execute our tests + sys.exit(run_tests(args)) diff --git a/share/trick/trickops/tests/test.py b/share/trick/trickops/tests/test.py new file mode 100644 index 000000000..f0fc3baa9 --- /dev/null +++ b/share/trick/trickops/tests/test.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python3 + +"""Unit test script to test local modules.""" + +import os, sys, pdb +import unittest + +import ut_WorkflowCommon +import ut_TrickWorkflow + +# Define load_tests function for dynamic loading using Nose2 +def load_tests(*args): + passed_args = locals() + suite = unittest.TestSuite() + suite.addTests(ut_TrickWorkflow.suite()) + suite.addTests(ut_WorkflowCommon.suite()) + return suite + +# Local module level execution only +if __name__ == '__main__': + suites = unittest.TestSuite() + suites.addTests(ut_TrickWorkflow.suite()) + suites.addTests(ut_WorkflowCommon.suite()) + + unittest.TextTestRunner(verbosity=2).run(suites) diff --git a/share/trick/trickops/tests/testconfig.py b/share/trick/trickops/tests/testconfig.py new file mode 100644 index 000000000..851b64e8e --- /dev/null +++ b/share/trick/trickops/tests/testconfig.py @@ -0,0 +1,8 @@ +# This file contains any globals all tests use, so that it can be changed +# in a single place if they ever need to be updated +import os, sys, pdb +# Global location of this trick instance, for all tests to use +module_rel_path = 'share/trick/trickops' +this_trick = os.path.abspath(os.path.join(os.getcwd(), '../../../..')) +sys.path.append(os.path.join(this_trick, module_rel_path )) +tests_dir = os.path.join(this_trick, module_rel_path, 'tests') diff --git a/share/trick/trickops/tests/testdata/log_a.csv b/share/trick/trickops/tests/testdata/log_a.csv new file mode 100644 index 000000000..17461dbb6 --- /dev/null +++ b/share/trick/trickops/tests/testdata/log_a.csv @@ -0,0 +1,6 @@ +sys.exec.out.time {s}, myvar {--} + 0, 0.0 + 1, 0.1 + 2, 0.2 + 3, 0.3 + 4, 0.4 diff --git a/share/trick/trickops/tests/trick_sims.yml b/share/trick/trickops/tests/trick_sims.yml new file mode 100644 index 000000000..a2ccb36df --- /dev/null +++ b/share/trick/trickops/tests/trick_sims.yml @@ -0,0 +1,223 @@ +# This is a file containing trick sims from this repository to be used +# for unit testing the trickops module. size, labels, comparisons, and +# valgrind entries are meaningless outside this unit testing framework +# and are only added here for internal tests +SIM_ball_L1: + path: trick_sims/Ball/SIM_ball_L1 + size: 6000 + runs: + RUN_test/input.py: + analyze: echo hi + compare: + - share/trick/trickops/tests/testdata/log_a.csv vs. share/trick/trickops/tests/baselinedata/log_a.csv + valgrind: + flags: -v + runs: + - RUN_test/input.py +SIM_alloc_test: + path: test/SIM_alloc_test + runs: + RUN_test/input.py: +SIM_default_member_initializer: + path: test/SIM_default_member_initializer +SIM_demo_inputfile: + path: test/SIM_demo_inputfile + labels: + - unit_test + runs: + RUN_test/unit_test.py: + RUN_test/input.py: +SIM_demo_sdefine: + path: test/SIM_demo_sdefine + labels: + - demo + - unit_test + runs: + RUN_test/input.py: + RUN_test/unit_test.py: +SIM_dynamic_sim_object: + path: test/SIM_dynamic_sim_object + runs: + RUN_test/input.py: +SIM_events: + path: test/SIM_events + labels: + - events + - unit_test + runs: + RUN_test/input.py: + RUN_test/unit_test.py: +SIM_exclusion_mechanisms: + path: test/SIM_exclusion_mechanisms + runs: + RUN_test/input.py: +SIM_isystem: + path: test/SIM_isystem +SIM_leaks: + path: test/SIM_leaks + runs: + RUN_test/input.py: +SIM_measurement_units: + path: test/SIM_measurement_units + runs: + RUN_test/input.py: +SIM_parse_s_define: + path: test/SIM_parse_s_define +SIM_python_namespace: + path: test/SIM_python_namespace + labels: + - python_namespace + - unit_test + runs: + RUN_test/unit_test.py: +SIM_rti: + path: test/SIM_rti + runs: + RUN_test/unit_test.py: + labels: + - unit_test +SIM_segments: + path: test/SIM_segments + runs: + RUN_test/input.py: +SIM_stls: + path: test/SIM_stls + labels: + - unit_test + runs: + RUN_test/input.py: + RUN_test/unit_test.py: + +SIM_swig_template_scoping: + path: test/SIM_swig_template_scoping +SIM_target_specific_variables: + path: test/SIM_target_specific_variables +SIM_test_abstract: + path: test/SIM_test_abstract + runs: + RUN_test/input.py: +SIM_test_dp: + path: test/SIM_test_dp + labels: + - unit_test + runs: + RUN_test/unit_test.py: + RUN_test/input.py: +SIM_test_dr: + path: test/SIM_test_dr + labels: + - unit_test + runs: + RUN_test/unit_test.py: +SIM_test_inherit: + path: test/SIM_test_inherit + runs: + RUN_test/input.py: +SIM_test_io: + path: test/SIM_test_io + labels: + - unit_test + runs: + RUN_test/unit_test.py: +SIM_test_ip: + path: test/SIM_test_ip + labels: + - unit_test + runs: + RUN_test/unit_test.py: +SIM_test_ip2: + path: test/SIM_test_ip2 + runs: + RUN_test/input.py: +SIM_test_sched: + path: test/SIM_test_sched + labels: + - unit_test + runs: + RUN_test/input.py: + RUN_test/unit_test.py: +SIM_test_templates: + path: test/SIM_test_templates + labels: + - unit_test + runs: + RUN_test/unit_test.py: +SIM_test_varserv: + path: test/SIM_test_varserv + labels: + - unit_test + runs: + RUN_test/realtime.py: + RUN_test/unit_test.py: +SIM_threads: + path: test/SIM_threads + labels: + - unit_test + runs: + RUN_test/sched.py: + RUN_test/amf.py: + RUN_test/async.py: + RUN_test/unit_test.py: +SIM_threads_simple: + path: test/SIM_threads_simple + runs: + RUN_test/input.py: + RUN_test/sched.py: + RUN_test/async.py: +SIM_trickcomm: + path: test/SIM_trickcomm + runs: + RUN_test/input.py: +SIM_ball_L2: + path: trick_sims/Ball/SIM_ball_L2 +SIM_ball_L3: + path: trick_sims/Ball/SIM_ball_L3 +SIM_amoeba: + path: trick_sims/Cannon/SIM_amoeba +SIM_cannon_aero: + path: trick_sims/Cannon/SIM_cannon_aero +SIM_cannon_analytic: + path: trick_sims/Cannon/SIM_cannon_analytic +SIM_cannon_eulercromer: + path: trick_sims/Cannon/SIM_cannon_eulercromer +SIM_cannon_jet: + path: trick_sims/Cannon/SIM_cannon_jet +SIM_cannon_numeric: + path: trick_sims/Cannon/SIM_cannon_numeric +SIM_monte: + path: trick_sims/Cannon/SIM_monte +SIM_ode_ball: + path: trick_sims/ODE/SIM_ode_ball +SIM_ode_buggy: + path: trick_sims/ODE/SIM_ode_buggy +SIM_ros_publisher: + path: trick_sims/ROS/SIM_ros_publisher +SIM_ros_subscriber: + path: trick_sims/ROS/SIM_ros_subscriber +SIM_Ball++_L1: + path: trick_sims/SIM_Ball++_L1/ +SIM_contact: + path: trick_sims/SIM_contact +SIM_lander: + path: trick_sims/SIM_lander +SIM_msd: + path: trick_sims/SIM_msd +SIM_parachute: + path: trick_sims/SIM_parachute +SIM_rocket: + path: trick_sims/SIM_rocket +SIM_sat2d: + path: trick_sims/SIM_sat2d +SIM_satellite: + path: trick_sims/SIM_satellite +SIM_sun: + path: trick_sims/SIM_sun +SIM_wheelbot: + path: trick_sims/SIM_wheelbot +SIM_ball_L1_er7_utils: + path: trick_source/er7_utils/sims/SIM_ball_L1 +SIM_grav: + path: trick_source/er7_utils/sims/SIM_grav + +non_sim_extension_example: + will: be ignored by TrickWorkflow parsing for derived classes to implement as they wish diff --git a/share/trick/trickops/tests/ut_TrickWorkflow.py b/share/trick/trickops/tests/ut_TrickWorkflow.py new file mode 100644 index 000000000..844e0f6c5 --- /dev/null +++ b/share/trick/trickops/tests/ut_TrickWorkflow.py @@ -0,0 +1,234 @@ +import os, sys +import unittest +import pdb +from testconfig import this_trick, tests_dir +from TrickWorkflow import * + +def suite(): + """Create test suite from TrickWorkflowTestCase unit test class and return""" + return unittest.TestLoader().loadTestsFromTestCase(TrickWorkflowTestCase) + +class TrickWorkflowTestCase(unittest.TestCase): + + def setUp(self): + # Nominal no-error when parsing the trick-sims config file scenario + self.instance = TrickWorkflow(project_top_level=this_trick, log_dir='/tmp/', + trick_dir=this_trick, config_file=os.path.join(tests_dir,"trick_sims.yml"), + quiet=True) + + def tearDown(self): + self.instance._cleanup() # Remove the log file this instance creates + del self.instance + self.instance = None + + def setUpWithEmptyConfig(self): + self.instance = TrickWorkflow(project_top_level=this_trick, log_dir='/tmp/', + trick_dir=this_trick, config_file=os.path.join(tests_dir,"empty.yml"), + quiet=True) + + def setUpWithErrorConfig(self): + self.tearDown() # Cleanup the instance we get by default + self.instance = TrickWorkflow(project_top_level=this_trick, log_dir='/tmp/', + trick_dir=this_trick, config_file=os.path.join(tests_dir,"errors.yml"), + quiet=True) + + def test_init_nominal(self): + self.assertEqual(self.instance.cpus, 3) + self.assertEqual(self.instance.parallel_safety, 'loose') + self.assertEqual(self.instance.config_errors, False) + self.instance.report() + build_jobs = self.instance.get_jobs('build') + self.assertEqual(len(build_jobs), 56) + self.assertEqual(len(self.instance.sims), 56) + run_jobs = self.instance.get_jobs('run') + self.assertEqual(len(run_jobs), 38) + + def test_init_empty_so_raises(self): + with self.assertRaises(RuntimeError): + self.setUpWithEmptyConfig() + + def test_init_errors_but_no_raise(self): + self.setUpWithErrorConfig() + self.assertTrue(self.instance.config_errors) + self.assertEqual(self.instance.parallel_safety , 'loose') + self.assertEqual(len(self.instance.sims), 2) + self.assertTrue(self.instance.config['extension_example']) + self.instance.report() + + def test_get_sim_nominal(self): + sim = self.instance.get_sim(identifier='SIM_ball_L1') + self.assertTrue(type(sim) == TrickWorkflow.Sim) + sim = self.instance.get_sim(identifier='trick_sims/Ball/SIM_ball_L1') + self.assertTrue(type(sim) == TrickWorkflow.Sim) + + def test_get_sim_not_found(self): + sim = self.instance.get_sim(identifier='SIM_doesnt_exist') + self.assertTrue(sim is None) + + def test_get_sim_raises(self): + with self.assertRaises(TypeError): + sim = self.instance.get_sim(identifier={}) + + def test_get_sims_nominal(self): + sims = self.instance.get_sims(labels='unit_test') + self.assertEqual(len(sims), 14) + sims = self.instance.get_sims(labels=['unit_test']) + self.assertEqual(len(sims), 14) + sims = self.instance.get_sims(labels=['unit_test', 'demo']) + self.assertEqual(len(sims), 1) + sims = self.instance.get_sims(labels=['unit_test', 'events']) + self.assertEqual(len(sims), 1) + sims = self.instance.get_sims(labels=['events']) + self.assertEqual(len(sims), 1) + + def test_get_sims_not_found(self): + sims = self.instance.get_sims(labels=['unit_test', 'noexitlabel']) + self.assertTrue(len(sims) == 0) + + def test_get_sims_raises(self): + with self.assertRaises(TypeError): + sims = self.instance.get_sims(identifier={}) + + def test_get_run_nominal(self): + sim = self.instance.get_sim(identifier='SIM_ball_L1') + run = sim.get_run('RUN_test/input.py') + + def test_get_unique_comparison_dirs(self): + ucd = self.instance.get_unique_comparison_dirs() + self.assertTrue(ucd[0] is not None) + self.assertTrue(ucd[0][0] is not None) + self.assertTrue(ucd[0][1] is not None) + + def test_get_koviz_report_jobs_nominal(self): + krj = self.instance.get_koviz_report_jobs() + self.assertTrue(isinstance(krj[0][0], Job)) + self.assertTrue(not krj[1]) + + def test_get_koviz_report_job_missing_dir(self): + krj = self.instance.get_koviz_report_job('share/trick/trickops/tests/testdata_noexist', + 'share/trick/trickops/tests/baselinedata') + self.assertTrue(krj[0] is None) + self.assertTrue('ERROR' in krj[1]) + + def test_status_summary_nominal(self): + summary = self.instance.status_summary() + # Nothing was run, so nothing could fail + self.assertEqual(summary, 'SUCCESS') + + def test_get_and_pop_run(self): + sim = self.instance.get_sim('SIM_ball_L1') + run = sim.get_run('RUN_test/input.py') + self.assertEqual(run.input, 'RUN_test/input.py') + run = sim.pop_run('RUN_test/input.py') + self.assertEqual(run.input, 'RUN_test/input.py') + self.assertEqual(len(sim.get_runs()), 0) + + def test_check_run_jobs(self): + sim = self.instance.get_sim('SIM_ball_L1') + normal_run_jobs = sim.get_run_jobs() + self.assertTrue('valgrind' not in normal_run_jobs[0]._command) + valgrind_run_jobs = sim.get_run_jobs(kind='valgrind') + self.assertTrue('valgrind' in valgrind_run_jobs[0]._command) + + def test_compare(self): + sim = self.instance.get_sim('SIM_ball_L1') + # Sim level comparison (test_data.csv vs. baseline_data.csv) will fail + self.assertEqual(sim.compare(), 1) + # Run level comparison (test_data.csv vs. baseline_data.csv) will fail + run = sim.get_run('RUN_test/input.py') + self.assertEqual(run.compare(), 1) + # Top level call to all comparisons will fail + self.assertEqual(self.instance.compare(), 1) + self.assertEqual(run.comparisons[0]._translate_status(), '\x1b[31mFAIL\x1b[0m') + + def test_get_jobs_nominal(self): + # Test all the permissive permutations + builds = self.instance.get_jobs('build') + self.assertEqual(len(builds), 56) + builds = self.instance.get_jobs('builds') + self.assertEqual(len(builds), 56) + runs = self.instance.get_jobs('run') + self.assertEqual(len(runs), 38) + runs = self.instance.get_jobs('runs') + self.assertEqual(len(runs), 38) + vg = self.instance.get_jobs('valgrind') + self.assertEqual(len(vg), 1) + vg = self.instance.get_jobs('valgrinds') + self.assertEqual(len(vg), 1) + a = self.instance.get_jobs('analysis') + self.assertEqual(len(a), 1) + a = self.instance.get_jobs('analyses') + self.assertEqual(len(a), 1) + a = self.instance.get_jobs('analyze') + self.assertEqual(len(a), 1) + + def test_get_jobs_raises(self): + with self.assertRaises(TypeError): + jobs = self.instance.get_jobs(kind='bucees') + + def test_get_comparisons_nominal(self): + c = self.instance.get_comparisons() + self.assertEqual(len(c), 1) + self.assertEqual(c[0]._translate_status(), '\x1b[33mNOT RUN\x1b[0m') + + def test_add_comparison(self): + sim = self.instance.get_sim('SIM_alloc_test') + run = sim.get_run('RUN_test/input.py') + run.add_comparison('share/trick/trickops/tests/baselinedata/log_a.csv', + 'share/trick/trickops/tests/testdata/log_a.csv') + self.assertTrue(len(run.comparisons) == 1) + + def test_add_analysis_nominal(self): + sim = self.instance.get_sim('SIM_alloc_test') + run = sim.get_run('RUN_test/input.py') + run.add_analysis('echo analysis goes here') + self.assertTrue( 'echo analysis goes here' in run.analysis._command) + + def test_add_analysis_warning(self): + sim = self.instance.get_sim('SIM_ball_L1') + run = sim.get_run('RUN_test/input.py') + run.add_analysis('echo overwriting analysis') + self.assertTrue( 'echo overwriting analysis' in run.analysis._command) + + def test_run_init(self): + r = TrickWorkflow.Run(sim_dir='test/SIM_alloc_test', input='RUN_test/input.py --someflag', + binary='S_main_Linux_x86_64.exe') + self.assertEqual(r.sim_dir, 'test/SIM_alloc_test') + self.assertEqual(r.prerun_cmd, '') + self.assertTrue(r.input == 'RUN_test/input.py --someflag') + self.assertEqual(r.returns, 0) + self.assertTrue(r.valgrind_flags is None) + self.assertEqual(r.log_dir, '/tmp/') + self.assertEqual(r.just_input,'RUN_test/input.py') + self.assertEqual(r.just_run_dir, 'RUN_test') + self.assertEqual(r.run_dir_path, 'test/SIM_alloc_test/RUN_test') + self.assertEqual(r.binary, 'S_main_Linux_x86_64.exe') + self.assertTrue(r.run_job is None) + self.assertEqual(len(r.comparisons), 0) + self.assertTrue(r.analysis is None) + + def test_comparison_init(self): + test_data = 'share/trick/trickops/tests/testdata/log_a.csv' + baseline_data ='share/trick/trickops/tests/baselinedata/log_a.csv' + c = TrickWorkflow.Comparison(test_data,baseline_data) + self.assertTrue(c.baseline_data == baseline_data) + self.assertTrue(c.test_data == test_data) + self.assertTrue(c.error is None) + + def test_run_compare_pass(self): + r = TrickWorkflow.Run(sim_dir='test/SIM_alloc_test', input='RUN_test/input.py --someflag', + binary='S_main_Linux_x86_64.exe') + # Use same data to get a pass + test_data = 'share/trick/trickops/tests/baselinedata/log_a.csv' + baseline_data = 'share/trick/trickops/tests/baselinedata/log_a.csv' + r.add_comparison(test_data, baseline_data) + self.assertEqual(r.compare(), 0) + + def test_run_compare_fail(self): + r = TrickWorkflow.Run(sim_dir='test/SIM_alloc_test', input='RUN_test/input.py --someflag', + binary='S_main_Linux_x86_64.exe') + # Use same data to get a pass + test_data = 'share/trick/trickops/tests/testdata/log_a.csv' + baseline_data = 'share/trick/trickops/tests/baselinedata/log_a.csv' + r.add_comparison(test_data, baseline_data) + self.assertEqual(r.compare(), 1) diff --git a/share/trick/trickops/tests/ut_WorkflowCommon.py b/share/trick/trickops/tests/ut_WorkflowCommon.py new file mode 100644 index 000000000..ca0c9bf95 --- /dev/null +++ b/share/trick/trickops/tests/ut_WorkflowCommon.py @@ -0,0 +1,99 @@ +import os, sys +import unittest, time +import pdb +from testconfig import this_trick, tests_dir +from WorkflowCommon import * + +def suite(): + """Create test suite from WorkflowCommonTestCase unit test class and return""" + return unittest.TestLoader().loadTestsFromTestCase(WorkflowCommonTestCase) + +class WorkflowCommonTestCase(unittest.TestCase): + + def setUp(self): + # Nominal instance, using this Trick as top level project + self.instance = WorkflowCommon(project_top_level=this_trick, log_dir='/tmp/', quiet=True) + # Nominal job that just echo's 'hi', this creates it, it doesn't execute it + self.job_nominal = Job(name='testname', command='echo hi', + log_file='/tmp/WorkflowCommonTestCase_hi.txt') + self.job_that_fails = Job(name='testname', command='echo hi', + log_file='/tmp/WorkflowCommonTestCase_hi.txt') + pass + + def tearDown(self): + os.remove(self.instance.log) + del self.instance + self.instance = None + + def test_run_subprocess(self): + result = run_subprocess(command='echo hi', m_shell=subprocess.PIPE, m_stdout=subprocess.PIPE) + self.assertEqual(result.code, 0) + self.assertTrue('hi' in result.stdout ) + + def test_validate_output_file(self): + dir = '/tmp/aoeifmeganrsdfalk/' + rest = 'b/c/d/foo.txt' + file = os.path.join(dir, rest) + filename = validate_output_file(file) + self.assertEqual(filename, file) + self.assertTrue(os.path.exists(file)) + # Clean up everything we just created + import shutil + shutil.rmtree(dir) + + def test_create_progress_bar(self): + bar = create_progress_bar(fraction=0.25, text='hello') + self.assertTrue('hello' in bar) + self.assertTrue('====================' in bar) + self.assertTrue('=====================' not in bar) + self.assertTrue(len(bar) == 80) + + def test_sanitize_cpus(self): + cpus = sanitize_cpus(num_cpus=3, num_tasks=4, fallback_cpus=2) + # This check is weird but it's b/c the function acts differently per machine + self.assertTrue(cpus[0] == 3 or cpus[0] == 2) + + def test_unixify_string(self): + string = unixify_string("hi/ there ()! bud?") + self.assertEqual(string, "hi__there__bud") + + def test_init_nominal(self): + self.assertEqual(self.instance.project_top_level, this_trick) + self.assertEqual(self.instance.log_dir, '/tmp/') + self.assertEqual(self.instance.env, '') + self.assertTrue(self.instance.creation_time < time.time()) + self.assertTrue(self.instance.host_name != '' ) + self.assertTrue(self.instance.this_os == None ) + self.assertTrue(self.instance.quiet == True ) + self.assertTrue(os.getcwd() == this_trick ) + self.assertTrue(self.job_nominal.name == 'testname') + + def test_get_operating_system_nominal(self): + ''' + This test will respond differently per-platform. Nominal checks + here should pass for all "supported platforms" + ''' + self.assertTrue(self.instance.get_operating_system() != 'unknown') + + def test_get_installed_packages_nominal(self): + ''' + This test will respond differently per-platform. Nominal checks + here should pass for all "supported platforms" + ''' + pkgs = self.instance.get_installed_packages() + self.assertTrue(len(pkgs) > 0) + + def test_job_nominal(self): + self.instance.execute_jobs([self.job_nominal], max_concurrent=1) + self.assertTrue(self.job_nominal.get_status() == Job.Status.SUCCESS) + f = open('/tmp/WorkflowCommonTestCase_hi.txt', 'r') + self.assertTrue(f.readlines()[0].strip() == 'hi') + f.close() + + def test_job_nominal_quiet(self): + self.instance.quiet = True + self.instance.execute_jobs([self.job_nominal], max_concurrent=1) + self.assertTrue(self.job_nominal.get_status() == Job.Status.SUCCESS) + f = open('/tmp/WorkflowCommonTestCase_hi.txt', 'r') + self.assertTrue(f.readlines()[0].strip() == 'hi') + f.close()