diff --git a/.bumpversion.cfg b/.bumpversion.cfg new file mode 100644 index 0000000000000000000000000000000000000000..641061899f9774fc1160d11b5f7d703112a2e9d1 --- /dev/null +++ b/.bumpversion.cfg @@ -0,0 +1,18 @@ +[bumpversion] +current_version = 0.2.2 +commit = True +tag = True + +[bumpversion:file:VERSION] + +[bumpversion:file:.github/workflows/deploy_python_package.yml] +search = PACKAGE_VERSION: {current_version} +replace = PACKAGE_VERSION: {new_version} + +[bumpversion:file:pyproject.toml] +search = version = "{current_version}" +replace = version = "{new_version}" + +[bumpversion:file:laborchestrator/__init__.py] +search = __version__ = "{current_version}" +replace = __version__ = "{new_version}" diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000000000000000000000000000000000000..2ffc016ca7d8d2768211600fa6f831ccc70dc820 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,31 @@ +# http://editorconfig.org + +root = true + +[*] +indent_style = space +indent_size = 4 +trim_trailing_whitespace = true +insert_final_newline = true +charset = utf-8 +end_of_line = lf +max_line_length = 120 + +[*.py] +indent_size = 4 + +[*.toml] +indent_size = 4 + +[*.bat] +indent_style = tab +end_of_line = crlf + +[LICENSE] +insert_final_newline = false + +[*.md] +trim_trailing_whitespace = false + +[Makefile] +indent_style = tab diff --git a/.env.dev b/.env.dev new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000000000000000000000000000000000000..68a25f1cac0791570b66f970131a5b1cb03ddf14 --- /dev/null +++ b/.flake8 @@ -0,0 +1,7 @@ +[flake8] +ignore = E203, W503 +select = B,B9,C,E,F,W +max-complexity = 10 +max-line-length = 120 +per-file-ignores = + test/*:S101 diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..5566ceeefb63a30c543982ab5af865f0cf585b4d --- /dev/null +++ b/.gitattributes @@ -0,0 +1,38 @@ +# this file is used by git large file support (git-lfs) to move +# large / binary files to a special location / database +*.jar filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.ZIP filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tar.bz2 filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.png filter=lfs diff=lfs merge=lfs -text +*.PNG filter=lfs diff=lfs merge=lfs -text +*.jpg filter=lfs diff=lfs merge=lfs -text +*.JPG filter=lfs diff=lfs merge=lfs -text +*.jpeg filter=lfs diff=lfs merge=lfs -text +*.JPEG filter=lfs diff=lfs merge=lfs -text +*.pdf filter=lfs diff=lfs merge=lfs -text +*.PDF filter=lfs diff=lfs merge=lfs -text +*.mp3 filter=lfs diff=lfs merge=lfs -text +*.MP3 filter=lfs diff=lfs merge=lfs -text +*.wav filter=lfs diff=lfs merge=lfs -text +*.WAV filter=lfs diff=lfs merge=lfs -text +*.mp4 filter=lfs diff=lfs merge=lfs -text +*.mov filter=lfs diff=lfs merge=lfs -text +*.ods filter=lfs diff=lfs merge=lfs -text +*.xls filter=lfs diff=lfs merge=lfs -text +*.xlsx filter=lfs diff=lfs merge=lfs -text +*.XLSX filter=lfs diff=lfs merge=lfs -text +*.odt filter=lfs diff=lfs merge=lfs -text +*.doc filter=lfs diff=lfs merge=lfs -text +*.DOC filter=lfs diff=lfs merge=lfs -text +*.docx filter=lfs diff=lfs merge=lfs -text +*.DOCX filter=lfs diff=lfs merge=lfs -text +*.sqlite filter=lfs diff=lfs merge=lfs -text +*.SQLITE filter=lfs diff=lfs merge=lfs -text +*.sqlite3 filter=lfs diff=lfs merge=lfs -text +*.SQLITE3 filter=lfs diff=lfs merge=lfs -text + +# converting LF+CR to LF +* text=auto \ No newline at end of file diff --git a/.github/workflows/deploy_python_package.yml b/.github/workflows/deploy_python_package.yml new file mode 100644 index 0000000000000000000000000000000000000000..5175c4490d49fd209b4e3e2a618dee145cdf18af --- /dev/null +++ b/.github/workflows/deploy_python_package.yml @@ -0,0 +1,58 @@ +# s. https://github.com/pallets/flask +name: Lab Orchestrator Python Package Push + +on: + push: + # Publish `master` as Docker `latest` image. + branches: + - main + - develop + #- seed + + # Publish `v1.2.3` tags as releases. + tags: + - v* + + # Run tests for any PRs. + pull_request: + +env: + PACKAGE_VERSION: 0.2.2 + PACKAGE_NAME: laborchestrator-$PACKAGE_VERSION + +jobs: + deploy: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v3 + with: + python-version: '3.x' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install build + - uses: actions/checkout@v3 + - name: Build package + run: | + cd SiLA + echo "Building SiLA python package ...." + ls -Al + python -m build + # Generate hashes used for provenance. + - name: generate hash + id: hash + run: | + echo "Generating hash ...." + cd SiLA + ls -Al + echo "dist: " + ls -Al dist + cd dist && echo "hash=$(sha256sum * | base64 -w0)" >> $GITHUB_OUTPUT + - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce + with: + path: ./SiLA/dist + name: ${{ env.PACKAGE_NAME }} diff --git a/.github/workflows/pages_static_html.yml b/.github/workflows/pages_static_html.yml new file mode 100644 index 0000000000000000000000000000000000000000..b8a27e46ef6b2a2907a5d635c33fa99b8f01dde7 --- /dev/null +++ b/.github/workflows/pages_static_html.yml @@ -0,0 +1,49 @@ +# Simple workflow for deploying static content to GitHub Pages +name: Deploy static content to Pages + +on: + # Runs on pushes targeting the default branch + push: + branches: ["main"] + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages +permissions: + contents: read + pages: write + id-token: write + +# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. +# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. +concurrency: + group: "pages" + cancel-in-progress: false + +jobs: + # Single deploy job since we're just deploying + deploy: + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Setup Pages + uses: actions/configure-pages@v3 + - name: Build Pages + run: | + cd docs + echo "Building documentation ...." + make html + ls -Al + # - name: Upload artifact + # uses: actions/upload-pages-artifact@v1 + # with: + # # Upload entire _built + # path: 'docs/_built' + # - name: Deploy to GitHub Pages + # id: deployment + # uses: actions/deploy-pages@v1 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..6861fb649fa1bdcac845b58af7f55c773cb176c6 --- /dev/null +++ b/.gitignore @@ -0,0 +1,116 @@ +# this file is used by git to ignore certain files or directories +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +# MANIFEST +*.spec + +# (virtual) environments +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# dotenv +.env + +# Cython debug symbols +cython_debug/ + +# SQLite database +*.sqlite +*.sqlite3 + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + +# PyCharm /IntelliJ Idea family of suites +.idea/ +*.iml + +# IDE settings - Visual Stuido code +.vscode/ + +# PyCharm +.idea + +# Mac stuff +.DS_Store + +# Generated files +bin/ +.build_* + +# Certificate files +*.pem +*.cer +*.crt +*.key diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 0000000000000000000000000000000000000000..232dc2944c135a42270bbd4d4357b84047bfdb8b --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,161 @@ +# Config file for GitLab CI pipeline + +image: python:3.12 + +# Change pip's cache directory to be inside the project directory since we can +# only cache local items. +variables: + PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip" + SOURCE_DIR : "$CI_PROJECT_DIR/laborchestrator" + DOCS_DIR : "docs" + DOCS_SOURCE_DIR : "docs/source" + DOCS_BUILD_DIR : "docs/_build" + +stages: + - build + - compliance + - test + - build_and_publish_docs + - publish_pypi + +# Configuration -------------------------------------------------------------------- + +# Pip's cache doesn't store the python packages +# https://pip.pypa.io/en/stable/reference/pip_install/#caching +# +# If you want to also cache the installed packages, you have to install +# them in a virtualenv and cache it as well. +cache: + paths: + - .cache/pip + - venv/ + +before_script: + - python -V # Print out python version for debugging + - pip install virtualenv + - virtualenv venv + - source venv/bin/activate + +.parallel_python_jobs: + parallel: + matrix: + - python_version: ["3.9", "3.10"] + +.run_on_all_branches: + only: + refs: + - develop + - merge_requests + - triggers + - master + - main + - tags + - /^v[0-9]+.[0-9]+.*/ + +.run_on_master: + only: + refs: + - triggers + - master + - main + +.run_on_release_tag: + only: + refs: + - tags + - /^v[0-9]+.[0-9]+.*/ + +.run_on_manual: + only: + refs: + - manual + +# Jobs: Build ----------------------------------------------------------------- + +build-package: + stage: build + script: + - pip install twine + - pip install setuptools + - python setup.py bdist_wheel + - TWINE_PASSWORD=${CI_JOB_TOKEN} TWINE_USERNAME=gitlab-ci-token python -m twine upload --repository-url ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/pypi dist/* + only: + - tags + +# build: +# stage: build +# script: +# - PYTHON_VERSION=${python_version} PYPI_URL=${pip_repository_index} make pull +# - PYTHON_VERSION=${python_version} PYPI_URL=${pip_repository_index} make build +# - PYTHON_VERSION=${python_version} PYPI_URL=${pip_repository_index} make push +# extends: +# - .docker_login +# - .run_on_all_branches +# - .parallel_python_jobs + +# Jobs: Compliance ------------------------------------------------------------ + +#compliance: +# stage: compliance +# script: +# - poetry run invoke format --check +# - poetry run invoke lint +# - poetry run invoke security-bandit +# extends: +# - .run_on_all_branches +# - .parallel_python_jobs +# - .use_generated_docker_image + +# Jobs: Test ------------------------------------------------------------------ + +# ============================= TEST ======================== +test: + script: + #- pip install tox flake8 + #- tox -e py39,flake8 + - pip install . + # for the tests we need pythonLab and pythonlabscheduler + - pip install pythonlab --index-url https://gitlab.com/api/v4/projects/17484832/packages/pypi/simple + - pip install pythonlabscheduler --index-url https://gitlab.com/api/v4/projects/25166382/packages/pypi/simple + - pip install pytest + - pytest + extends: + - .run_on_all_branches + - .parallel_python_jobs + +# Jobs: Docs ----------------------------------------------------------------------- + +pages: + stage: build_and_publish_docs + script: + - pip install sphinx myst_parser python_docs_theme + - pip install . + #- cd docs ; make html + - sphinx-apidoc -e -P -o $DOCS_SOURCE_DIR $SOURCE_DIR + - sphinx-build -b html $DOCS_DIR $DOCS_BUILD_DIR + - mv $DOCS_BUILD_DIR public + artifacts: + paths: + - public + extends: + - .run_on_master + +# Jobs: Publish --------------------------------------------------------------- + + +release_pypi: + variables: + python_version: "3.9" + stage: publish_pypi + script: + - | + poetry run invoke release-twine \ + --tag_name="${tag_name}" \ + --pypi_user="${pypi_user}" \ + --pypi_pass="${pypi_pass}" \ + --pypi_publish_repository="${pypi_publish_repository}" \ + --pip_repository_index="${pip_repository_index}" + extends: + #- .run_on_release_tag + - .run_on_manual + #- .use_generated_docker_image diff --git a/AUTHORS.md b/AUTHORS.md new file mode 100644 index 0000000000000000000000000000000000000000..ea60cb84463c2114dbd1d55c3f66cd4b444d2133 --- /dev/null +++ b/AUTHORS.md @@ -0,0 +1,18 @@ + +# Acknowledgements and Credits + +The Lab Orchestrator project thanks + + +Contributors +------------ + +* Stefan Maak - Stefan made the first implementation + +* Mickey Kim ! Thanks for the phantastic cookiecutter template ! + + +Development Lead +---------------- + +* mark doerr \ No newline at end of file diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md new file mode 100644 index 0000000000000000000000000000000000000000..bc7e68e8df6dce6b4621be3d11e6e395506276c7 --- /dev/null +++ b/DEVELOPMENT.md @@ -0,0 +1,130 @@ +Development +=========== + +Get Started! +------------ + +Ready to contribute? Here\'s how to set up [laborchestrator]{.title-ref} for local development. + +1. Clone the [laborchestrator]{.title-ref} repo from + GitLab: + + ``` {.shell} + $ git clone https://gitlab.com/opensourcelab/laborchestrator.git + ``` + +3. Start your virtualenv and install dependencies: + + + # create a virtual environment and activate it then run to install development dependencies: + + pip install -e .[dev] + + # run unittests + + invoke test # use the invoke environment to manage development + + +4. Create a branch for local development: + + ``` {.shell} + $ git checkout -b feature/IssueNumber_name-of-your-bugfix-or-feature + + # please do not use the '#' in branch names ! + ``` + +Now you can make your changes locally. + +1. When you\'re done making changes, check that your changes pass the + tests, including testing other Python versions, with tox: + + ``` {.shell} + $ tox + ``` + +2. Commit your changes and push your branch to GitLab: + + ``` {.shell} + $ git add . + $ git commit -m "fix: Your detailed description of your changes." + $ git push origin feature/IssueNumber_name-of-your-bugfix-or-feature + ``` + +3. Submit a merge request through GitLab + +Merge Request Guidelines +------------------------ + +Before you submit a merge request, check that it meets these guidelines: + +1. The merge request should only include changes relating to one + ticket. +2. The merge request should include tests to cover any added changes + and check that all existing and new tests pass. +3. If the merge request adds functionality, the docs should be updated. + Put your new functionality into a function with a docstring, and add + the feature to the list in README.rst. +4. The team should be informed of any impactful changes. + +Documentation +------------- + +The Sphinx Documentation Sytem is used, + +markdown is supported via the mystparser ( + +) + +To build the documentation, run + +> \$ invoke docs + +Tips +---- + +1. To run a subset of tests: + + ``` {.shell} + $ pytest tests.test_laborchestrator + ``` + +Deploying to Gitlab/Github/PyPI Package Registry +--------------------------------------------------- + +For every release: + +1. Update HISTORY.md + +2. Update version number (can also be patch or major): + + pre-commit hooks can be either installed with the provided script + or with the [pre-commit package](https://pre-commit.com) + + + ``` {.shell} + bumpversion --verbose patch + ``` + +3. Run the static analysis and tests: + + ``` {.shell} + tox + ``` + +4. Commit the changes: + + ``` {.shell} + git add HISTORY.md + git commit -m "Changelog for upcoming release <#.#.#>" + ``` + +5. Push the commit: + + ``` {.shell} + git push + ``` + +6. Add the release tag (version) on GitLab: +https://gitlab.com/opensourcelab/laborchestrator/-/tags + +The GitLab CI pipeline will then deploy to PyPI if tests pass. diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..2dbe09c114aa121983faeb6c16a4008987c759e5 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,54 @@ +ARG PYTHON_BASE + +FROM python:${PYTHON_BASE} as base +ARG PYPI_URL + +RUN mkdir /laborchestrator +WORKDIR /laborchestrator + +# copy build files +COPY pyproject.toml poetry.lock README.rst setup.cfg setup.py VERSION /laborchestrator/ +COPY laborchestrator /laborchestrator/laborchestrator + +ENV PYTHONDONTWRITEBYTECODE=1 \ + PYTHONUNBUFFERED=1 \ + POETRY_NO_INTERACTION=1 \ + POETRY_VIRTUALENVS_CREATE=0 \ + PIP_INDEX_URL=$PYPI_URL + +#RUN sed -i 's|http://|https://artifactory.aws.gel.ac/artifactory/apt_|g' /etc/apt/sources.list + +# libcurl4-gnutls-dev is necessary for Pysam. See PCA-179 +RUN apt-get update -qq && apt-get install -qqy -f \ + build-essential \ + libbz2-dev \ + libffi-dev \ + liblzma-dev \ + libpq-dev \ + libsasl2-dev \ + libyaml-dev \ + libcurl4-gnutls-dev \ + nano \ + zlib1g-dev \ + && pip install -Iv --prefer-binary --index-url $PYPI_URL --upgrade \ + pip \ + setuptools \ + poetry==1.2.1 \ + poetry-plugin-export + +# Use poetry to resolve dependencies, output to requirements.txt and requirements_dev.txt, and pip to install +RUN poetry export --without dev --without-hashes -f requirements.txt -o requirements.txt \ + && poetry export --only dev --without-hashes -f requirements.txt -o requirements_dev.txt \ + && python -m pip install --prefer-binary --index-url $PYPI_URL -r requirements.txt \ + && python -m pip install --prefer-binary --index-url $PYPI_URL -e . + +FROM base as test +ARG PYPI_URL + +WORKDIR /laborchestrator +COPY tests /laborchestrator/tests + +# required to make sure pytest runs the right coverage checks +ENV PYTHONPATH . + +RUN python -m pip install --prefer-binary --index-url $PYPI_URL -r requirements_dev.txt .[tests] diff --git a/HISTORY.md b/HISTORY.md new file mode 100644 index 0000000000000000000000000000000000000000..7195687ab0630eb11356dbd1b04f41caeb40cf9a --- /dev/null +++ b/HISTORY.md @@ -0,0 +1,50 @@ + +# History of laborchestrator + + +* 0.0.1 (2023-09-28) + + + +feature/clean_up +---------------- + +Features: + + - 81325a0 Appl. new cookiecutter structure + - 71e2706 High-level intro to orchestration and scheduling, ChatGPT generated + - bd88bf3 Safer offset for schedules + +Bug fixes: + + - 4e73d16 Fixed bug with incomplete removal of nodes after runtime decisions + - a53a0a6 Fixed error with incorrect handling of if_nodes in wfg_mag#nager + - a257301 Fixed min_waiting times. removed printouts + - fe8a8af Fixed the permanent-rescheduling-bug. + - b84ca43 Killed an evil bug setting origin devices wrong + - a18bc05 Minor changes + - 7748011 Removed some unused imports + +Other changes: + + - ff4f6b5 Adapted tests. all run successfully + - 69d43cc Added main method to pythonlab reader to test reading processes + - 52859ab Added test data files + - ed1015a Cleaned up hidden files + - 4ef6fa1 Clean_up finished + - f7065df Enabled use of other process data locations + - 015c071 Feat:minor changes + - 1a32f58 Finding processes in a directory is now a Processreader interface method + - 23be134 Fixed bug with flexible process collection module + - 77e495a Fix:merged wfg-manager fix + - 3230172 Got marians assay process ready + - 0910537 Minor changes + - ac97ecf Minor changes + - e19cdc1 Minor changes + - 96118b4 Moved dev_tools to lara_processes repo + - 3a498dd Moved utility.py + - e3663ca Simulation test is working again + - 98a097d Some purging of lara-specific stuff + + ...more commits omitted + diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000000000000000000000000000000000000..6275dd9508f1f1857606b43c4398d9bf4b79e88d --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,4 @@ +# add files to be included in package here +include VERSION +include *.txt +include laborchestrator/sila_server/generated/*/*.xml diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..acac32467ad5cf0cba9284ce9fca67b8c335e86a --- /dev/null +++ b/Makefile @@ -0,0 +1,89 @@ +CODE_PATHS := laborchestrator + +GITLAB_USERNAME ?= +GITLAB_TOKEN ?= + +PYTHON_VERSION ?= 3.9## default python build to 3.9 + +# Originally CI_PROJECT_NAME came from the CI, but it is safer if this comes directly from pyproject.toml +CI_PROJECT_NAME := $(shell grep -i name pyproject.toml | head -1 | awk -F'"' '{print $$2}' | tr "[A-Z]" "[a-z]" | tr "_" "-") +CI_COMMIT_REF_NAME ?= $(shell git symbolic-ref --short -q HEAD) +CI_COMMIT_REF_NAME := $(subst /,-,$(CI_COMMIT_REF_NAME)) +CI_COMMIT_BEFORE_SHA := $(shell git rev-parse HEAD^1) +CI_COMMIT_SHA ?= $(shell git rev-parse HEAD) +CI_COMMIT_SHORT_SHA ?= $(shell git rev-parse --short HEAD) +CURRENT_LOCATION ?= $(shell pwd) +CI_ENVIRONMENT_NAME ?= +CI_DOCKER_BUILD_ARGS ?= + +PYPI_URL ?= https://artifactory.aws.gel.ac/artifactory/api/pypi/pypi/simple + +CI_REGISTRY ?= registry.gitlab.com +CI_REGISTRY_IMAGE ?= ${CI_REGISTRY}/https://gitlab.com/opensourcelab/laborchestrator +DOCKER_IMAGE_NAMESPACE := ${CI_REGISTRY_IMAGE}/${CI_PROJECT_NAME} +DOCKER_IMAGE_TAG ?= ${DOCKER_IMAGE_NAMESPACE}:py${PYTHON_VERSION}-${CI_COMMIT_REF_NAME} + +.PHONY: build test + +help: ## Prints this help/overview message + @awk 'BEGIN {FS = ":.*?## "} /^[a-z0-9_-]+:.*?## / {printf "\033[36m%-17s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) + +_CMD_DOCKER_BUILD := \ + docker build \ + ${CI_DOCKER_BUILD_ARGS} \ + +## construct the docker image with the git branch inc. into the tag +.build_${PYTHON_VERSION}: Dockerfile laborchestrator clean__py + ${_CMD_DOCKER_BUILD} \ + --platform linux/amd64 \ + --build-arg PYTHON_BASE=${PYTHON_VERSION} \ + --build-arg PYPI_URL=${PYPI_URL} \ + --cache-from ${DOCKER_IMAGE_TAG} \ + --tag ${DOCKER_IMAGE_TAG} \ + --target test \ + . + touch $@ + +build: .build_${PYTHON_VERSION} ## build local python3.9 image. Define PYTHON_VERSION to select other python version. + +push: build + docker push ${DOCKER_IMAGE_TAG} + +pull: + docker pull ${DOCKER_IMAGE_TAG} || echo "No pre-made image available" + +test: ## run pytest on mounted-in tests (change tests, no rebuild!) + mkdir -p $(shell pwd)/bin && \ + docker run \ + --platform linux/amd64 \ + --volume $(shell pwd)/bin/:/laborchestrator/bin/ \ + --volume $(shell pwd):/laborchestrator:rw \ + ${DOCKER_IMAGE_TAG} \ + pytest --cov-report xml:/laborchestrator/bin/coverage.xml tests/ + +test_shell: ## enter test docker image Bash + mkdir -p $(shell pwd)/bin && \ + docker run -it \ + --platform linux/amd64 \ + --volume $(shell pwd)/bin/:/laborchestrator/bin/ \ + --volume $(shell pwd):/laborchestrator:rw \ + ${DOCKER_IMAGE_TAG} \ + /bin/bash + +# cleaning up -------------------------------------------------------- +clean: clean__docker clean__py clean__reports + +clean__py: ## clean python temp files + find . -iname *.pyc -delete + find . -iname __pycache__ -delete + find . -iname .cache -delete + +clean__docker: ## Clean up all docker images generated by this repo + rm -rf .*sentinel + for image in \ + $$(docker images --format "{\{.Repository}\}:{\{.Tag}\}\t{\{.ID}\}" | grep -e "${DOCKER_IMAGE_NAMESPACE}" | awk '{print $$2}'); do \ + docker rmi -f $$image; \ + done + +clean__reports: + rm -rf ${PATH_REPORTS} diff --git a/README.md b/README.md index db845e3cc4dede58b3d6d32f6d27643cd77874e3..155fd1e5e73b8affa53414b880298504597a0df8 100644 --- a/README.md +++ b/README.md @@ -1,92 +1,39 @@ -# LabOrchestrator +# Lab Orchestrator +General Purpose Orchestrator for Scientific Laboratories. It collaborates nicely with SiLA servers and pythonLab as process description language. +## Features -## Getting started - -To make it easy for you to get started with GitLab, here's a list of recommended next steps. - -Already a pro? Just edit this README.md and make it your own. Want to make it easy? [Use the template at the bottom](#editing-this-readme)! - -## Add your files - -- [ ] [Create](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#create-a-file) or [upload](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#upload-a-file) files -- [ ] [Add files using the command line](https://docs.gitlab.com/ee/gitlab-basics/add-file.html#add-a-file-using-the-command-line) or push an existing Git repository with the following command: - -``` -cd existing_repo -git remote add origin https://gitlab.com/opensourcelab/laborchestrator.git -git branch -M main -git push -uf origin main -``` - -## Integrate with your tools - -- [ ] [Set up project integrations](https://gitlab.com/opensourcelab/laborchestrator/-/settings/integrations) - -## Collaborate with your team - -- [ ] [Invite team members and collaborators](https://docs.gitlab.com/ee/user/project/members/) -- [ ] [Create a new merge request](https://docs.gitlab.com/ee/user/project/merge_requests/creating_merge_requests.html) -- [ ] [Automatically close issues from merge requests](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#closing-issues-automatically) -- [ ] [Enable merge request approvals](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/) -- [ ] [Automatically merge when pipeline succeeds](https://docs.gitlab.com/ee/user/project/merge_requests/merge_when_pipeline_succeeds.html) - -## Test and Deploy - -Use the built-in continuous integration in GitLab. - -- [ ] [Get started with GitLab CI/CD](https://docs.gitlab.com/ee/ci/quick_start/index.html) -- [ ] [Analyze your code for known vulnerabilities with Static Application Security Testing(SAST)](https://docs.gitlab.com/ee/user/application_security/sast/) -- [ ] [Deploy to Kubernetes, Amazon EC2, or Amazon ECS using Auto Deploy](https://docs.gitlab.com/ee/topics/autodevops/requirements.html) -- [ ] [Use pull-based deployments for improved Kubernetes management](https://docs.gitlab.com/ee/user/clusters/agent/) -- [ ] [Set up protected environments](https://docs.gitlab.com/ee/ci/environments/protected_environments.html) - -*** +## Installation -# Editing this README + pip install laborchestrator --index-url https://gitlab.com/api/v4/projects/39006834/packages/pypi/simple -When you're ready to make this README your own, just edit this file and use the handy template below (or feel free to structure it however you want - this is just a starting point!). Thank you to [makeareadme.com](https://www.makeareadme.com/) for this template. +## Usage -## Suggestions for a good README -Every project is different, so consider which of these sections apply to yours. The sections used in the template are suggestions for most open source projects. Also keep in mind that while a README can be too long and detailed, too long is better than too short. If you think your README is too long, consider utilizing another form of documentation rather than cutting out information. + laborchestrator --help -## Name -Choose a self-explaining name for your project. +## Development -## Description -Let people know what your project can do specifically. Provide context and add a link to any reference visitors might be unfamiliar with. A list of Features or a Background subsection can also be added here. If there are alternatives to your project, this is a good place to list differentiating factors. + git clone gitlab.com/opensourcelab/laborchestrator -## Badges -On some READMEs, you may see small images that convey metadata, such as whether or not all the tests are passing for the project. You can use Shields to add some to your README. Many services also have instructions for adding a badge. + # create a virtual environment and activate it then run -## Visuals -Depending on what you are making, it can be a good idea to include screenshots or even a video (you'll frequently see GIFs rather than actual videos). Tools like ttygif can help, but check out Asciinema for a more sophisticated method. + pip install -e .[dev] -## Installation -Within a particular ecosystem, there may be a common way of installing things, such as using Yarn, NuGet, or Homebrew. However, consider the possibility that whoever is reading your README is a novice and would like more guidance. Listing specific steps helps remove ambiguity and gets people to using your project as quickly as possible. If it only runs in a specific context like a particular programming language version or operating system or has dependencies that have to be installed manually, also add a Requirements subsection. + # run unittests -## Usage -Use examples liberally, and show the expected output if you can. It's helpful to have inline the smallest example of usage that you can demonstrate, while providing links to more sophisticated examples if they are too long to reasonably include in the README. + invoke test # use the invoke environment to manage development + -## Support -Tell people where they can go to for help. It can be any combination of an issue tracker, a chat room, an email address, etc. +## Documentation -## Roadmap -If you have ideas for releases in the future, it is a good idea to list them in the README. +The Documentation can be found here: [https://opensourcelab.gitlab.io/laborchestrator](https://opensourcelab.gitlab.io/laborchestrator) or [laborchestrator.gitlab.io](laborchestrator.gitlab.io/) -## Contributing -State if you are open to contributions and what your requirements are for accepting them. -For people who want to make changes to your project, it's helpful to have some documentation on how to get started. Perhaps there is a script that they should run or some environment variables that they need to set. Make these steps explicit. These instructions could also be useful to your future self. +## Credits -You can also document commands to lint the code or run tests. These steps help to ensure high code quality and reduce the likelihood that the changes inadvertently break something. Having instructions for running tests is especially helpful if it requires external setup, such as starting a Selenium server for testing in a browser. +This package was created with [Cookiecutter](https://github.com/audreyr/cookiecutter) + and the [gitlab.com/opensourcelab/software-dev/cookiecutter-pypackage](https://gitlab.com/opensourcelab/software-dev/cookiecutter-pypackage) project template. -## Authors and acknowledgment -Show your appreciation to those who have contributed to the project. -## License -For open source projects, say how it is licensed. -## Project status -If you have run out of energy or time for your project, put a note at the top of the README saying that development has slowed down or stopped completely. Someone may choose to fork your project or volunteer to step in as a maintainer or owner, allowing your project to keep going. You can also make an explicit request for maintainers. diff --git a/TODOs.md b/TODOs.md new file mode 100644 index 0000000000000000000000000000000000000000..3648467aa4b6bf4e7ff37c0cd3d2b4ec95ecb391 --- /dev/null +++ b/TODOs.md @@ -0,0 +1,3 @@ +# laborchestrator TODOs + +* \ No newline at end of file diff --git a/VERSION b/VERSION new file mode 100644 index 0000000000000000000000000000000000000000..ee1372d33a29e27945406f0527f8af8e6ee119c9 --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +0.2.2 diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000000000000000000000000000000000000..cb90b47ff197ac0353480858614029570a3f4735 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,31 @@ +# docker-compose.yml for Lab Orchestrator +# to build the images: docker-compose build +# to build start everything: docker-compose up --build +# to build start everything after code change: docker-compose up --build + +version: '3.9' + +#networks: + #container_network + +services: + laborchestrator: + build: + context: . + #image: registry.gitlab.com/ + #volumes: + # - : + #ports: + # - 8000:8000 + #expose: + # - 8000 + #networks: + # - container_network + env_file: + - ./.env.dev + command: + #sh -c "" + +#volumes: + + diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..ea03c621d52eddae458aabae7e364387bd1170f9 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,216 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = _build + +# User-friendly check for sphinx-build +ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) +$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) +endif + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " applehelp to make an Apple Help Book" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " xml to make Docutils-native XML files" + @echo " pseudoxml to make pseudoxml-XML files for display purposes" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + @echo " coverage to run coverage check of the documentation (if enabled)" + +.PHONY: clean +clean: + rm -rf $(BUILDDIR)/* + +.PHONY: html +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +.PHONY: dirhtml +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +.PHONY: singlehtml +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +.PHONY: pickle +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +.PHONY: json +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +.PHONY: htmlhelp +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +.PHONY: qthelp +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/sila_python.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/sila_python.qhc" + +.PHONY: applehelp +applehelp: + $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp + @echo + @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." + @echo "N.B. You won't be able to view it unless you put it in" \ + "~/Library/Documentation/Help or install it in your application" \ + "bundle." + +.PHONY: devhelp +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/sila_python" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/sila_python" + @echo "# devhelp" + +.PHONY: epub +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +.PHONY: latex +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +.PHONY: latexpdf +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +.PHONY: latexpdfja +latexpdfja: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through platex and dvipdfmx..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +.PHONY: text +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +.PHONY: man +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +.PHONY: texinfo +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +.PHONY: info +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +.PHONY: gettext +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +.PHONY: changes +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +.PHONY: linkcheck +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +.PHONY: doctest +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." + +.PHONY: coverage +coverage: + $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage + @echo "Testing of coverage in the sources finished, look at the " \ + "results in $(BUILDDIR)/coverage/python.txt." + +.PHONY: xml +xml: + $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml + @echo + @echo "Build finished. The XML files are in $(BUILDDIR)/xml." + +.PHONY: pseudoxml +pseudoxml: + $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml + @echo + @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." diff --git a/docs/_static/LARA_logo.svg b/docs/_static/LARA_logo.svg new file mode 100644 index 0000000000000000000000000000000000000000..89acfad6b96a119124752f1d45f82da7ef208e6c --- /dev/null +++ b/docs/_static/LARA_logo.svg @@ -0,0 +1,296 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/_static/custom.css-tpl b/docs/_static/custom.css-tpl new file mode 100644 index 0000000000000000000000000000000000000000..83609f1c2de119a7870f3e7e27227965316e3e46 --- /dev/null +++ b/docs/_static/custom.css-tpl @@ -0,0 +1,28 @@ +dl { + margin: 1em 0; +} + +dl.class > dt, +dl.method > dt, +dl.exception > dt, +dl.function > dt { + padding: 6px; +} + +dl.class > dt, dl.exception > dt { + color: hsl(195, 60%, 52%); + background: hsl(195, 62%, 94%); + border-top: solid 3px hsl(195, 69%, 68%); + margin: 6px 0; +} + +dl.method > dt, dl.function > dt { + color: #555; + background: #f0f0f0; + border-left: solid 3px #ccc; + margin-bottom: 6px; +} + +code.sig-prename, code.sig-name { + font-weight: bold; +} diff --git a/docs/architecture/0_introduction.md b/docs/architecture/0_introduction.md new file mode 100644 index 0000000000000000000000000000000000000000..c59c2fbc075e5ab3918ea8c611afa3679ff0940e --- /dev/null +++ b/docs/architecture/0_introduction.md @@ -0,0 +1,62 @@ +**Title: Overview of Lab Orchestrators and Schedulers in Lab Automation** + +**Introduction:** +Lab automation has become an integral part of modern laboratories, streamlining processes and enhancing efficiency. Two critical components in the realm of lab automation are Lab Orchestrators and Schedulers. While they share the goal of optimizing workflows, these tools serve distinct functions and work in tandem to ensure smooth and efficient laboratory operations. + +**Lab Scheduler:** + +**Functionality:** +A Lab Scheduler is a software tool designed to manage the execution of tasks within a laboratory automation system. It focuses on the allocation of resources and the sequencing of tasks to optimize the utilization of instruments and robotic devices. The primary goal of a scheduler is to ensure that experiments or processes are executed in a timely and resource-efficient manner. + +**Key Tasks:** +1. **Task Prioritization:** Schedulers assign priorities to tasks based on predefined criteria, such as project deadlines, experiment urgency, or regulatory requirements. High-priority tasks are scheduled to run earlier, ensuring critical timelines are met. + +2. **Resource Allocation:** They manage the allocation of resources, such as robotic arms, instruments, and incubators, to tasks. Efficient resource allocation minimizes idle time and maximizes the throughput of the lab. + +3. **Dynamic Scheduling:** Lab Schedulers often employ dynamic scheduling strategies, adapting to real-time changes in task priorities, resource availability, and unforeseen events. This adaptability is crucial for handling the dynamic nature of lab environments. + +4. **Dependency Management:** Schedulers consider task dependencies, ensuring that tasks are executed in the correct sequence. For example, a sample preparation task might depend on the completion of an earlier sample collection task. + +5. **Error Handling:** They incorporate mechanisms for handling errors or failures during task execution. This could involve rescheduling tasks, reallocating resources, or triggering alerts for manual intervention. + +**Lab Orchestrator:** + +**Functionality:** +A Lab Orchestrator, on the other hand, is a higher-level system that oversees the entire lab automation workflow. It goes beyond the immediate scheduling concerns addressed by a Lab Scheduler and focuses on the coordination and integration of various instruments, modules, and lab processes. + +**Key Tasks:** +1. **Workflow Design and Management:** Orchestrators facilitate the design and management of complex lab workflows. They allow users to define the entire experimental process, including the sequence of tasks, required instruments, and data flow. + +2. **Integration with Lab Instruments:** Orchestrators integrate seamlessly with different lab instruments and devices, providing a unified interface for controlling and monitoring their operation. This includes interactions with robotic arms, plate readers, incubators, and other specialized equipment. + +3. **Data Management:** Lab Orchestrators manage the flow of data within the lab automation system. This includes handling data generated during experiments, storing it in appropriate databases, and ensuring its availability for analysis. + +4. **User Interface:** They provide a user-friendly interface for researchers to design experiments, monitor progress, and analyze results. The interface typically abstracts the complexity of underlying processes, allowing researchers to focus on the scientific aspects of their work. + +5. **Error Handling and Exception Management:** Orchestrators handle errors or exceptions at a higher level, often involving more extensive error recovery strategies. They may trigger the re-execution of specific workflows or prompt users to intervene in case of critical failures. + +**Interaction Between Lab Orchestrators and Schedulers:** + +While Lab Orchestrators and Schedulers serve different purposes, they complement each other to create a cohesive and efficient lab automation system. + +1. **Task Handover:** + - The Lab Orchestrator defines the overall workflow and hands over specific tasks to the Lab Scheduler for execution. The Scheduler, in turn, optimizes the scheduling of these tasks based on priorities, dependencies, and resource availability. + +2. **Dynamic Adaptation:** + - Lab Orchestrators and Schedulers often collaborate in dynamically adapting to changes. The Orchestrator may trigger a rescheduling process when there is a change in the experimental plan, and the Scheduler adjusts the task execution accordingly. + +3. **Data Exchange:** + - Orchestrators and Schedulers share information about task completion, resource usage, and overall workflow progress. This data exchange ensures that both components have a real-time understanding of the laboratory environment. + +4. **User Interaction:** + - Orchestrators provide an intuitive interface for users to design experiments, while Schedulers offer tools for real-time monitoring and adjustment. User feedback and interventions may prompt the Orchestrator to update the overall workflow or the Scheduler to adapt the task schedule. + +5. **Error Handling:** + - In the event of errors or failures, the Orchestrator may trigger higher-level recovery mechanisms, and the Scheduler may handle immediate rescheduling or resource reallocation. + +**Conclusion:** +In the realm of lab automation, both Lab Orchestrators and Schedulers play indispensable roles, each addressing specific aspects of the laboratory workflow. While Orchestrators focus on the holistic design and coordination of experiments, Schedulers dive into the nitty-gritty of task scheduling and resource optimization. Together, these tools form a synergistic partnership, ensuring that lab automation systems operate seamlessly, efficiently, and in accordance with the scientific goals of the laboratory. + + +(Credit: ChatGPT and [Labforward](https://www.labforward.io/blog/lab-orchestrators-and-schedulers-in-lab-automation)) + diff --git a/docs/architecture/1_architecture_overview.md b/docs/architecture/1_architecture_overview.md new file mode 100755 index 0000000000000000000000000000000000000000..d6b2b936e22ad1882a457912b9f00b64b6ffb96a --- /dev/null +++ b/docs/architecture/1_architecture_overview.md @@ -0,0 +1,67 @@ +# PythonLabOrchestrator Architecture - Overview + +The **PythonLabOrchestrator** is a generic lab automation application to run one or several processes on a lab device setup with a given process, e.g. denoted as [PythonLab Process](https://gitlab.com/opensourcelab/pythonlab) and scheduled by the [PythonLabScheduler](https://gitlab.com/opensourcelab/pythonlabscheduler). +The lab device setup and the labware locations are described by a configuration file or by a database, the process can be load via the command line interface, a SiLA interface or a (web-based) GUI. + +The **PythonLabOrchestrator** loads a pythonLab process, converts it into a workflow graph representation and asks the *PythonLabScheduler* for an optimal schedule. +The so calculated schedule will be used to address lab automation entities, like lab devices or evaluation piplines, and initate a task of a process step. Multiple parallel executions are possible, if the lab setup/environment allows it. Processes can be stopped, resumed individually. Errors can be recovered. +If such a process is stopped, resumed, delayed by a definded duration threshold, a re-scheduling will be initiated by the orchestrator, addressing the PythonLabScheduler. This also happens after an error occurred or got recovered. + +The *PythonLabOrchestrator also keeps track of the following aspects: +- addressing all entities with SiLA communication +- directing the data to the right tables in the LARA database, enriching with metadata / semantic annotations +- position and type of the labware (with their barcodes/IDs) in the automation system (it will notify, if, e.g. a labware is not suited for a given operation) +- positon of the lids +- annotating the steps with metadata (semantics for later interpretation) +- movement of samples (e.g. during sample transfer) +- logging of the whole process, the movements of the labware and samples +- error handling (exception handling, error minimisation) +- recovery of the lab automation system in case of a complete power loss (keeping the state of the system, to be able to restart the system from a given state in the process) + + +## program architecture + +The **PythonLabOrchestrator** is devided into four functional units, the core orchestrator, the command line interface, the SiLA interface and the (webbased) GUI. +The core should be very stable and as much as possible decoupled from the input interfaces to enable highly reliable processing. + + + +### core orchestrator + +The core orchestrator consists of five main parts: +- A container [SchedulingInstance]() storing all relevant information on the workflow and schedule in convenient ways. +- A worker [WorkerInterface]() dispatching commands to the lab devices (e.g. via SiLA) according to schedule and step order +- An overseer [WorkerObserver]() tracking and logging the progress of running process steps +- An workflow graph manager [WFGManager]() updating the workflow graph according to runtime decisions and measurement results +- A schedule manager [ScheduleManager]() managing the server connection to a **PythonLabScheduler** and initializing re-scheduling + +These elements communicate intentively. The worker has two methods, that get called, when a process step should start and when it finished. +Those two methods should be overwritten by a user, so they match their lab environment. One example for the LARA-platform [GreifswaldWorker]() +and one for general SiLA servers [NotExistingYet]() are given. + + +### SiLA interface / API + +**PythonLabOrchestrator** has a SiLA server, offering the following services and features: + +features: +- OrchestratorController +- PauseController +- SimulationController + +### command line interface + +### Graphical User Interface (GUI) + + - process control (load, start, stop, pause, add) + - gantt chart (planned / present) + - process tree / workflow graph (future / history) + - detailed information for each step + - error recovery + - logging / setp history (with filters for events) + - histroy of labware / sample movement + +## Inputs + +- PythonLab Process +- lab configuration diff --git a/docs/authors.rst b/docs/authors.rst new file mode 100644 index 0000000000000000000000000000000000000000..2930f4af26ac433ba9bee403fb0d6a3c336934f4 --- /dev/null +++ b/docs/authors.rst @@ -0,0 +1,4 @@ +Authors +======== + +.. include:: ../AUTHORS.md diff --git a/docs/conf.py b/docs/conf.py new file mode 100755 index 0000000000000000000000000000000000000000..5659296432794d23691ad008fe7bd0962359889c --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,178 @@ +#!/usr/bin/env python +# +# laborchestrator documentation build configuration file, created by +# sphinx-quickstart on Fri Jun 9 13:47:02 2017. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +# If extensions (or modules to document with autodoc) are in another +# directory, add these directories to sys.path here. If the directory is +# relative to the documentation root, use os.path.abspath to make it +# absolute, like shown here. +# +import os +import sys +sys.path.insert(0, os.path.abspath('..')) + +import laborchestrator + +# -- General configuration --------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.viewcode', + 'sphinx.ext.napoleon', + 'sphinx.ext.todo', + 'myst_parser', + 'python_docs_theme', + #'sphinx.ext.mathjax', +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +source_suffix = ['.rst', '.md'] +#source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = 'Lab Orchestrator' +copyright = "2023, mark doerr" +author = "mark doerr" + +# The version info for the project you're documenting, acts as replacement +# for |version| and |release|, also used in various other places throughout +# the built documents. +# +# The short X.Y version. +version = laborchestrator.__version__ +# The full version, including alpha/beta/rc tags. +release = laborchestrator.__version__ + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = "en" + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This patterns also effect to html_static_path and html_extra_path +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = False + + +# -- Options for HTML output ------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +#html_theme = 'alabaster' +html_theme = "python_docs_theme" + +# Theme options are theme-specific and customize the look and feel of a +# theme further. For a list of options available for each theme, see the +# documentation. +# +# html_theme_options = { +# "logo": "LARA_logo.svg", +# "show_powered_by": False, +# "font_family": "sans-serif", +# "head_font_family": "Lato, sans-serif", +# "page_width": "1280px", +# "sidebar_width": "200px", +# "code_font_size": ".85em", +# "font_size": ".9em", +# "link": "hsl(195, 60%, 20%)", +# } + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +html_logo = "_static/LARA_logo.svg" + +# -- Options for HTMLHelp output --------------------------------------- + +# Output file base name for HTML help builder. +htmlhelp_basename = 'laborchestratordoc' + + +# -- Options for LaTeX output ------------------------------------------ + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # 'preamble': '', + + # Latex figure (float) alignment + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass +# [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'laborchestrator.tex', + 'Lab Orchestrator Documentation', + author, 'manual'), +] + + +# -- Options for manual page output ------------------------------------ + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'laborchestrator', + 'Lab Orchestrator Documentation', + [author], 1) +] + + +# -- Options for Texinfo output ---------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'laborchestrator', + 'Lab Orchestrator Documentation', + author, + 'laborchestrator', + 'One line description of project.', + 'Miscellaneous'), +] + + + diff --git a/docs/development.rst b/docs/development.rst new file mode 100644 index 0000000000000000000000000000000000000000..339b9480cc7a532b6b1409f7b2b637b38e67fd38 --- /dev/null +++ b/docs/development.rst @@ -0,0 +1 @@ +.. include:: ../DEVELOPMENT.md diff --git a/docs/history.rst b/docs/history.rst new file mode 100644 index 0000000000000000000000000000000000000000..0473b9b8bb84217923f05e530864b4d328c9ae20 --- /dev/null +++ b/docs/history.rst @@ -0,0 +1,4 @@ +HISTORY +======== + +.. include:: ../HISTORY.md diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..6d9971357bace950936ac652ea848d69b617dd13 --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,21 @@ +Welcome to Lab Orchestrator's documentation! +====================================================================== + +.. toctree:: + :glob: + :maxdepth: 2 + :caption: Contents: + + readme + installation + usage + source/modules + development + authors + history + +Indices and tables +================== +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/docs/installation.rst b/docs/installation.rst new file mode 100644 index 0000000000000000000000000000000000000000..9c81e47ea5785c33ce970dc1368ec07a6e3e7982 --- /dev/null +++ b/docs/installation.rst @@ -0,0 +1,28 @@ +.. highlight:: shell + +============ +Installation +============ + + +Stable release +-------------- + +To install Lab Orchestrator, run this command in your terminal: + +.. code-block:: console + + $ pip install laborchestrator + +This is the preferred method to install Lab Orchestrator, as it will always install the most recent stable release. + +If you don't have `pip`_ installed, this `Python installation guide`_ can guide +you through the process. + +.. _pip: https://pip.pypa.io +.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/ + + +From source +----------- + diff --git a/docs/readme.rst b/docs/readme.rst new file mode 100644 index 0000000000000000000000000000000000000000..cbfbfbb4ac7335e3324bdcd4d4a7de5de8df3a5c --- /dev/null +++ b/docs/readme.rst @@ -0,0 +1,4 @@ +README +======= + +.. include:: ../README.md diff --git a/docs/usage.rst b/docs/usage.rst new file mode 100644 index 0000000000000000000000000000000000000000..41b47986824041bc31df0caadc13ad2decfe23ff --- /dev/null +++ b/docs/usage.rst @@ -0,0 +1,7 @@ +===== +Usage +===== + +To use Lab Orchestrator in a project:: + + import laborchestrator diff --git a/install-pre-commit-hook.py b/install-pre-commit-hook.py new file mode 100755 index 0000000000000000000000000000000000000000..f4ca2383b9645a660a896898ed149248a8016aea --- /dev/null +++ b/install-pre-commit-hook.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python + + +# shall be replaced by https://pre-commit.com/ + +import os +import stat +import sys +from os.path import dirname, join + +hook_file = join(dirname(__file__), ".git", "hooks", "pre-commit") + +# short-running tests +pytest_target_files = " ".join( + [ + "tests/test_*.py", + ] +) + +# write pre-commit hook script to .git/hooks/pre-commit +with open(hook_file, "w", encoding="utf-8") as fp: + fp.write( + f"""\ +#!/bin/sh +set -ex + +# ensure correct environment is used (workaround for PyCharm) +alias python={sys.executable} + +python -m isort --check-only . +python -m black --check . +python -m pflake8 . +python -m pytest {pytest_target_files} +""" + ) + +# make script executable +os.chmod(hook_file, os.stat(hook_file).st_mode | stat.S_IEXEC) diff --git a/jupyter/README.md b/jupyter/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9ba9e682a4e1e97ac2856607e2e1e44743345291 --- /dev/null +++ b/jupyter/README.md @@ -0,0 +1,2 @@ +# laborchestrator jupyter Demo notebooks + diff --git a/laborchestrator/__init__.py b/laborchestrator/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..c3fb0cba9ccca8efc3efe87b0dab0949ef86b0a4 --- /dev/null +++ b/laborchestrator/__init__.py @@ -0,0 +1,5 @@ +"""Top-level package for pythonLab Orchestrator.""" + +__author__ = """mark doerr""" +__email__ = "mark@uni-greifswald.de" +__version__ = "0.2.2" diff --git a/laborchestrator/database_integration/__init__.py b/laborchestrator/database_integration/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..abdecdfb29344eb5b677436859a1b9371e1829ac --- /dev/null +++ b/laborchestrator/database_integration/__init__.py @@ -0,0 +1,7 @@ +from laborchestrator.database_integration.status_db_interface import StatusDBInterface +from laborchestrator.database_integration.status_db_dummy import StatusDBDummyImplementation + +__all__ = [ + "StatusDBInterface", + "StatusDBDummyImplementation", +] diff --git a/laborchestrator/database_integration/status_db_dummy.py b/laborchestrator/database_integration/status_db_dummy.py new file mode 100755 index 0000000000000000000000000000000000000000..4947cc9cbfce1e4a19179dd61bdedcb6469b0eb6 --- /dev/null +++ b/laborchestrator/database_integration/status_db_dummy.py @@ -0,0 +1,71 @@ +""" +A mockup implementation of the status database interface. +It does basically nothing and will be used if no database support is set up for the orchestrator. +""" +from typing import Optional, List, Tuple + +from laborchestrator.database_integration import StatusDBInterface +from laborchestrator.structures import ProcessStep, ContainerInfo + + +class StatusDBDummyImplementation(StatusDBInterface): + def position_empty(self, device: str, pos: int) -> bool: + return True + + def get_available_processes(self) -> List[Tuple[str, str]]: + return [] + + def add_process_to_db(self, name: str, src: str) -> str: + return "" + + def create_experiment(self, process_id: str) -> str: + return "Hello World" + + def get_all_positions(self, device: str) -> List[int]: + return [0] + + def get_container_at_position(self, device: str, pos: int) -> Optional[ContainerInfo]: + return None + + def moved_container(self, source_device: str, source_pos: int, target_device: str, target_pos: int, + barcode: Optional[str] = None): + pass + + def unlidded_container(self, cont_info: ContainerInfo, lid_device: str, lid_pos: int): + pass + + def lidded_container(self, cont_info: ContainerInfo, lid_device: Optional[str] = None, lid_pos: Optional[int] = None): + pass + + def get_cont_info_by_barcode(self, barcode: str) -> Optional[ContainerInfo]: + return None + + def add_container(self, cont: ContainerInfo): + pass + + def remove_container(self, cont: ContainerInfo): + pass + + def set_barcode_at_position(self, barcode: str, device: str, pos: int): + pass + + def set_barcode(self, cont: ContainerInfo): + pass + + def update_lid_position(self, cont: ContainerInfo): + pass + + def get_estimated_duration(self, step: ProcessStep, confidence=.95) -> Optional[float]: + return None + + def get_estimated_durations(self, steps: List[ProcessStep], confidence=.95) -> List[Optional[float]]: + return [None] * len(steps) + + def safe_step_to_db(self, step: ProcessStep, container_info: ContainerInfo, experiment_uuid: str): + pass + + def write_server_certificate(self, device_name, cert: str) -> None: + pass + + def get_server_certificate(self, device_name) -> str: + return "" diff --git a/laborchestrator/database_integration/status_db_interface.py b/laborchestrator/database_integration/status_db_interface.py new file mode 100755 index 0000000000000000000000000000000000000000..5a8251b686a2e7d9172d8ef756b96c5ccf09ea23 --- /dev/null +++ b/laborchestrator/database_integration/status_db_interface.py @@ -0,0 +1,221 @@ +""" +The formal interface a database must implement to enable +""" +from typing import Optional, List, Tuple +from laborchestrator.structures import ContainerInfo, ProcessStep +from abc import ABC, abstractmethod + + +class StatusDBInterface(ABC): + #def __init__(self): + # pass + + @classmethod + def __subclasshook__(cls, subclass): + # todo: can't i do this with an iteration? + return (hasattr(subclass, 'get_all_positions') + and callable(subclass.get_all_positions) + and hasattr(subclass, 'add_process_to_db') + and callable(subclass.add_process_to_db) + and hasattr(subclass, 'get_available_processes') + and callable(subclass.get_available_processes) + and hasattr(subclass, 'get_process') + and callable(subclass.get_process) + and hasattr(subclass, 'create_experiment') + and callable(subclass.create_experiment) + and hasattr(subclass, 'moved_container') + and callable(subclass.moved_container) + and hasattr(subclass, 'unlidded_container') + and callable(subclass.unlidded_container) + and hasattr(subclass, 'lidded_container') + and callable(subclass.lidded_container) + and hasattr(subclass, 'get_cont_info_by_barcode') + and callable(subclass.get_cont_info_by_barcode) + and hasattr(subclass, 'add_container') + and callable(subclass.add_container) + and hasattr(subclass, 'remove_container') + and callable(subclass.remove_container) + and hasattr(subclass, 'set_barcode_at_position') + and callable(subclass.set_barcode_at_position) + and hasattr(subclass, 'set_barcode') + and callable(subclass.set_barcode) + and hasattr(subclass, 'update_lid_position') + and callable(subclass.update_lid_position) + and hasattr(subclass, 'get_estimated_duration') + and callable(subclass.get_estimated_duration) + and hasattr(subclass, 'set_barcode_at_position') + and callable(subclass.safe_step_to_db) + or NotImplemented) + + @abstractmethod + def get_all_positions(self, device: str) -> List[int]: + """ + Provides the indices of all positions in the specified device (independent of emptiness). + :param device: + :return: + """ + + def add_process_to_db(self, name: str, src: str) -> str: + """ + + :param name: + :param src: + :return: the uuid of the created process + """ + + def get_available_processes(self) -> List[Tuple[str, str]]: + """ + + :return: A List of tuples [process name, process uuid] for all processes in the db + """ + + def get_process(self, process_uuid: str) -> str: + """ + + :param process_uuid: The unique identifier of the process in the db + :return: the source code defining the process + """ + + def create_experiment(self, process_id: str) -> str: + """ + + :param process_id: the unique id of the process running in this experiment + :return: the uuid of the created experiment + """ + + @abstractmethod + def get_container_at_position(self, device: str, pos: int) -> Optional[ContainerInfo]: + """ + Checks if there is a container at a specified position. + :param device: + :param pos: + :return: None if there is no container at the specified position. All available information otherwise. + """ + + @abstractmethod + def position_empty(self, device: str, pos: int) -> bool: + """ + Checks whether the specified position is empty + :param device: Name of the device + :param pos: Index of the slot + :return: + """ + + @abstractmethod + def moved_container(self, source_device: str, source_pos: int, target_device: str, target_pos: int, + barcode: Optional[str] = None): + """ + Saves a movement of a container (specified by its prior position) to the database + :param source_device: + :param source_pos: + :param target_device: + :param target_pos: + :param barcode: + :return: + """ + + @abstractmethod + def unlidded_container(self, cont_info: ContainerInfo, lid_device: str, lid_pos: int): + """ + Saves the removal of the lid and its new position to the database + :param cont: + :param lid_device: + :param lid_pos: + :return: + """ + pass + + @abstractmethod + def lidded_container(self, cont_info: ContainerInfo, lid_device: Optional[str], lid_pos: Optional[int]): + """ + Saved to the database, that a containers lid was put back on. The position of the used lid can be specified, + so the database will/can check that it was the correct one. + :param cont: + :param lid_device: + :param lid_pos: + :return: + """ + pass + + @abstractmethod + def get_cont_info_by_barcode(self, barcode: str) -> Optional[ContainerInfo]: + """ + Retrieves all available information for the container with the given barcode + :param barcode: + :return: + """ + + @abstractmethod + def add_container(self, cont: ContainerInfo): + """ + Adds a container to the database with its starting position as the current position. + :param cont: The container to add + :return: nothin + """ + + @abstractmethod + def remove_container(self, cont: ContainerInfo): + """ + Marks the given container as removed from the platform. The information is still kept in the database + :param cont: The container to mark removed + :return: nothing + """ + + @abstractmethod + def set_barcode(self, cont: ContainerInfo): + """ + Sets the barcode of an existing container. Assumes the barcode is already saved in the ContainerInfo. + :param cont: + :return: + """ + + @abstractmethod + def update_lid_position(self, cont: ContainerInfo): + """ + Sets the barcode of an existing container. Assumes the barcode is already saved in the ContainerInfo. + :param cont: + :return: + """ + + @abstractmethod + def get_estimated_duration(self, step: ProcessStep, confidence=.95) -> Optional[float]: + """ + Checks the database for similar jobs to estimate the duration of a job. + :param step: the job of which the duration shall be estimated + :param confidence: chance, that the actual duration is less or equal the estimated duration. + :return: duration in seconds or None if no information was found in the database + """ + + @abstractmethod + def get_estimated_durations(self, steps: List[ProcessStep], confidence=.95) -> List[Optional[float]]: + """ + Same as get_estimated_duration(), but with many steps at once to save time + :return: The list will match the length and order of the given steps + """ + + @abstractmethod + def safe_step_to_db(self, step: ProcessStep, container_info: ContainerInfo, experiment_uuid: str): + """ + Saves a finished job to the database. It automatically recognizes move jobs. + :param experiment_uuid: The uuid of the experiment in the database, this step belongs to + :param container_info: information about the processed container + :param step: the structures.ProcessStep to save + :return: nothing + """ + + @abstractmethod + def write_server_certificate(self, device_name, cert: str) -> None: + """ + + :param device_name: + :param cert: + :return: + """ + + @abstractmethod + def get_server_certificate(self, device_name) -> str: + """ + + :param device_name: + :return: + """ diff --git a/laborchestrator/engine/__init__.py b/laborchestrator/engine/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..45c6bcc051fd44386154ffabfa18cd413511ab1f --- /dev/null +++ b/laborchestrator/engine/__init__.py @@ -0,0 +1,11 @@ +from .wfg_manager import WFGManager +from .schedule_manager import ScheduleManager +from .worker_interface import WorkerInterface +from .worker_observer import WorkerObserver + +__all__ = [ + 'ScheduleManager', + 'WFGManager', + 'WorkerInterface', + "WorkerObserver" +] diff --git a/laborchestrator/engine/schedule_manager.py b/laborchestrator/engine/schedule_manager.py new file mode 100755 index 0000000000000000000000000000000000000000..cf1e1051a5339823a88880ac7e9f8f109744016d --- /dev/null +++ b/laborchestrator/engine/schedule_manager.py @@ -0,0 +1,350 @@ +""" +The ScheduleManager oversees the scheduling and rescheduling process. It should be notified, when the current schedule +gets invalid for some reason. It starts and tries to maintain a connection to a scheduler_server. From this server +it requests new schedules when required. It uses the WFG class to encode and decode messages to the scheduling server. +""" + +import time +import traceback +from threading import Thread +from enum import Enum +from datetime import datetime +from typing import Dict, List, Optional, Tuple +import networkx as nx +import warnings +from sila2.framework import CommandExecutionStatus, SilaConnectionError +from laborchestrator.structures import SchedulingInstance, ProcessStep, Schedule, ProcessExecutionState +from laborchestrator.logging_manager import StandardLogger as Logger +try: + from pythonlabscheduler.sila_server import Client as SchedulerClient +except ModuleNotFoundError: + from sila2.client import SilaClient as SchedulerClient +from laborchestrator.workflowgraph import WorkFlowGraph as wfg +from laborchestrator.database_integration import StatusDBInterface + + +class ScheduleQuality(Enum): + OPTIMAL = 0 + FEASIBLE = 1 + INFEASIBLE = 2 + NOT_FOUND = 3 + UNKNOWN = 4 + + +AUTOMATIC_RESCHEDULING = True + + +class ScheduleManager: + _scheduler_client: Optional[SchedulerClient] = None + _rescheduling: bool + _schedule_optimal: bool + _schedule_valid: bool + jssp: SchedulingInstance + _hold_rescheduling: bool + time_limit_short = 2 + time_limit_long = 5 + db_client: Optional[StatusDBInterface] + schedule_quality: ScheduleQuality = ScheduleQuality.UNKNOWN + + def __init__(self, jssp: SchedulingInstance, db_client: Optional[StatusDBInterface] = None): + self._rescheduling = False + self._changing_wfg = False + self._hold_rescheduling = False + self._schedule_valid = True + self._schedule_optimal = True + self.jssp = jssp + self.db_client = db_client + Thread(target=self._manage_rescheduling, daemon=True).start() + Thread(target=self.keep_horizon, daemon=True).start() + Thread(target=self.scheduler_heart_beat, daemon=True).start() + + def _manage_rescheduling(self): + while True: + if not (self.is_rescheduling() or self._hold_rescheduling): + try: + if self._scheduler_client is not None: + # try to find a solution quickly + if not self._schedule_optimal or not self._schedule_valid: + self._reschedule(time_limit=self.time_limit_short, try_hard=False) + # if the schedule is still invalid try hard to find a solution + if not self._schedule_valid: + self._reschedule(time_limit=self.time_limit_long, try_hard=True) + except Exception as ex: + Logger.error(ex, traceback.print_exc()) + time.sleep(.1) + + def _reschedule(self, time_limit: float, try_hard: bool) -> None: + """ + Handles the flags and result from a scheduling attempt + :param time_limit: + :param try_hard: + :return: + """ + try: + start = time.time() + Logger.info("rescheduling") + self._rescheduling = True + # when the schedule gets marked invalid again during rescheduling, we will have to reschedule again + was_valid = self._schedule_valid + self._schedule_valid = True + algo_info = self.scheduler_client.SchedulingService.CurrentAlgorithm.get() + max_problem_size = algo_info.MaxProblemSize + J = self.extract_near_future(max_problem_size) + self.jssp.future = J + Logger.info(f"{time.time()-start}: problem size: {len(J)}, max problem size: {max_problem_size}") + # check whether there is anything to schedule + if not J or len(J) == 0: + self._schedule_optimal = True + return + if self.db_client: + not_started = [job for job in J.values() if not job.start] + guesses = self.db_client.get_estimated_durations(not_started) + for job, guess in zip(not_started, guesses): + if guess: + job.duration = guess + Logger.info(f'{time.time()-start}:time guessing done') + # the actual scheduling attempt + schedule, quality = self._get_schedule_from_scheduler(time_limit, J) + if quality in ScheduleQuality.__members__: + self.schedule_quality = ScheduleQuality[quality] + else: + Logger.error(f"{quality} is no member of SolutionQuality.") + Logger.info(f'{time.time()-start}:got the {quality} schedule from scheduler') + if schedule is None: + self.schedule_quality = ScheduleQuality.NOT_FOUND + success = False + # set the validity back + self._schedule_valid = was_valid + else: + self.jssp.set_schedule(schedule) + success = True + self._schedule_optimal = True + return success + except Exception as ex: + l(ex, traceback.print_exc()) + finally: + self._rescheduling = False + + def _get_schedule_from_scheduler(self, time_limit: float, J: Dict[str, ProcessStep]) -> ( + Tuple[Optional[Schedule], str] + ): + """ + Requests a schedule from the scheduler server + :param time_limit: limit for the computation time + :param J: dictionary of jobs to schedule + :return: A schedule if the scheduler found one, else None + """ + scheduler_client = self.scheduler_client + if scheduler_client: + sila_wfg = wfg.create_sila_structure_from_jobs(J.values(), self.jssp.combined_wfg) + processes = [p for name, p in self.jssp.process_by_name.items() if name in self._processes_to_schedule] + wfg.add_waiting_dummies(sila_wfg, processes) + Logger.debug(datetime.now()) + cmd = scheduler_client.SchedulingService.ComputeSchedule(sila_wfg, time_limit) + start = time.time() + # wait for the command to finish and get the result + while not cmd.done: + time.sleep(.05) + if self._scheduler_client is None: + Logger.warning("Scheduler went offline while scheduling command was running.") + break + # if the server is restarted while this command is running, + # this would be an infinite loop and block rescheduling + if time.time() > start + 2 * time_limit + 10: + Logger.warning(f"Interupting solver after {time.time() - start} seconds" + f" exceeding time limit of {time_limit} seconds too much.") + break + Logger.debug(datetime.now()) + if cmd.status == CommandExecutionStatus.finishedSuccessfully: + result = cmd.get_responses().Result + sila_schedule = result.Schedule + # the quality is either "OPTIMAL", "FEASIBLE" or "INFEASIBLE" + schedule_quality = result.SolutionQuality + schedule = wfg.create_schedule_from_sila_struct(sila_schedule) + return schedule, schedule_quality + return None, "INFEASIBLE" + + @property + def _processes_to_schedule(self) -> list[str]: + # get the current state of every process + states = self.jssp.process_stati_by_name + + # those get always scheduled + states_to_schedule = {ProcessExecutionState.RUNNING} + # i no preocess is running, the paused and scheduled processes are included in the schedule + if ProcessExecutionState.RUNNING not in states.values(): + states_to_schedule.add(ProcessExecutionState.PAUSED) + # scheduled processes are mainly used to check, how the schedule would look + states_to_schedule.add(ProcessExecutionState.SCHEDULED) + # if none is active, add all finished processes (for a nice view of everything) + if ProcessExecutionState.SCHEDULED not in states.values() and\ + ProcessExecutionState.PAUSED not in states.values(): + states_to_schedule.add(ProcessExecutionState.FINISHED) + return [name for name, state in states.items() if state in states_to_schedule] + + def extract_near_future(self, n_steps: int) -> Dict[str, ProcessStep]: + g = self.jssp.combined_wfg + J_old = self.jssp.definite_step_by_id + # filter out the processes, we do not want to schedule + to_schedule = self._processes_to_schedule + J_old = {idx: job for idx, job in J_old.items() if job.process_name in to_schedule} + # first add all started jobs + J = {idx: job for idx, job in J_old.items() if job.start is not None} + # collect the containers that are no reagents + C = {name: cont for name, cont in self.jssp.container_info_by_name.items() if not cont.is_reagent} + + # TODO should steps with ancestors with opacity < 1 be included in schedules? + # sort all descendants of current job in topological order + # first sort all jobs + topo_sort = list(nx.topological_sort(g)) + # filter out started jobs + topo_sort = list(filter(lambda n: n in J_old, topo_sort)) + # second separate the unfinished jobs of each container + real_decendants = [] + for name in C: + descent = nx.descendants(g, name) + real_decendants.extend(descent) + + count = 0 + for idx in topo_sort: + if count > n_steps: + break + job = J_old[idx] + if idx in real_decendants: + count += 1 + J[idx] = job + + # iteratively fill in all prerequisites of already added jobs + operable = self.jssp.operable + unsafe = set(J.keys()) + while len(unsafe) > 0: + idx = unsafe.pop() + # never add container nodes + for idx_o in operable[idx].prior: + if idx_o not in J: + # add only definite jobs (other operable nodes are linked as well) + if idx_o in J_old: + J[idx_o] = J_old[idx_o] + unsafe.add(idx_o) + return J + + def keep_horizon(self): + last_scheduled = 0 + while True: + time.sleep(4 * self.time_limit_short + 10) + try: + if self._rescheduling: + continue + if not self.scheduler_client: + continue + algo_info = self.scheduler_client.SchedulingService.CurrentAlgorithm.get() + max_problem_size = algo_info.MaxProblemSize + J = self.extract_near_future(max_problem_size) + scheduled = self.jssp.schedule.keys() + + # check whether at least a third of the available steps for each available container is scheduled + def check_schedule() -> bool: + for name, process in self.jssp.process_by_name.items(): + if name in self._processes_to_schedule: + for cont in process.containers: + if not cont.is_reagent: + available = [idx for idx, job in J.items() if not job.start and + cont.name in job.cont_names] + in_schedule = sum(idx in scheduled for idx in available) + if in_schedule < len(available)/3: + Logger.info(f"for container {cont.name} only {in_schedule} jobs are scheduled." + f" Reschedule to get {len(available)}") + return False + return True + # mark the schedule invalid if it does not contain enough steps per container + if not check_schedule(): + self.mark_schedule_invalid() + except Exception as ex: + Logger.error(ex, traceback.print_exc()) + + def configure_lab(self, yaml_file: str) -> bool: + scheduler_client = self.scheduler_client + if scheduler_client: + scheduler_client.LabConfigurationController.LoadJobShopFromFile(ConfigurationFile=yaml_file) + return True + return False + + @property + def scheduler_client(self) -> Optional[SchedulerClient]: + # check if the scheduler is alive before returning it + self.is_connected_to_scheduler() + return self._scheduler_client + + def try_scheduler_connection(self, timeout=5): + """ + Tries to find a scheduler in the network and establishes connection in both directions + :return: + """ + try: + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + # Any warnings issued here will be suppressed + self._scheduler_client = SchedulerClient.discover(server_name='Scheduler', insecure=True, + timeout=timeout) + Logger.info("Connected scheduler and worker :-)") + except TimeoutError as ex: + Logger.warning("Could not discover suitable scheduler server.") + + def is_connected_to_scheduler(self) -> bool: + """ + Tests and returns whether a scheduler server is connected + :return: + """ + if self._scheduler_client is None: + return False + try: + self._scheduler_client.SiLAService.ServerName.get() + return True + except SilaConnectionError: + self._scheduler_client = None + Logger.warning(f"The scheduler seems to have gone offline") + return False + + def scheduler_heart_beat(self): + """ + Frequently checks if the scheduler server is still online while the scheduler client is not None. + """ + while True: + time.sleep(5) + # check whether the server to an existing client is still online + self.is_connected_to_scheduler() + # find a scheduler server if necessary + if self._scheduler_client is None: + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + # Any warnings issued here will be suppressed + self.try_scheduler_connection() + + + def hold_rescheduling(self): + self._hold_rescheduling = True + + def continue_rescheduling(self): + self._hold_rescheduling = False + + def mark_schedule_suboptimal(self): + if AUTOMATIC_RESCHEDULING: + self._schedule_optimal = False + + def mark_schedule_invalid(self, enforce: bool = AUTOMATIC_RESCHEDULING): + """ + Sets the schedule to invalid and thereby triggers a rescheduling. If AUTOMATIC_RESCHEDULING is deactivated + this will not happen unless the parameter enforce is set (which for example happens after a process is started). + """ + if enforce: + self._schedule_valid = False + + def is_rescheduling(self): + return self._rescheduling + + def schedule_executable(self) -> bool: + if not self._schedule_valid: + return False + if self._rescheduling: + return False + return True diff --git a/laborchestrator/engine/wfg_manager.py b/laborchestrator/engine/wfg_manager.py new file mode 100755 index 0000000000000000000000000000000000000000..6df71172bcae90bd024d89bbdab91b29d41d2141 --- /dev/null +++ b/laborchestrator/engine/wfg_manager.py @@ -0,0 +1,213 @@ +import time +from threading import Thread +import traceback +from laborchestrator.logging_manager import StandardLogger as Logger +from typing import Dict +import networkx as nx +from laborchestrator.structures import ( + SchedulingInstance, MoveStep, StepStatus, Computation, Variable, IfNode, UsedDevice +) +from .schedule_manager import ScheduleManager + + +class WFGManager: + jssp: SchedulingInstance + + def __init__(self, jssp: SchedulingInstance, schedule_manager: ScheduleManager): + self.jssp = jssp + self.schedule_manager = schedule_manager + # the thread running frequent checks on the wfg + t = Thread(target=self._wfg_checker, daemon=True) + t.start() + + def _wfg_checker(self): + while True: + try: + # stop rescheduling until changes in the wfg are complete + self.schedule_manager.hold_rescheduling() + self._check_wfg() + except Exception as ex: + Logger.error(ex, traceback.print_exc()) + finally: + self.schedule_manager.continue_rescheduling() + time.sleep(.2) + + def _check_wfg(self): + changed_something = True + while changed_something: + changed_something = False + try: + for idx, var in self.jssp.definite_var_by_id.items(): + vars_ready = all([self.jssp.operable[name].status == StepStatus.FINISHED for name in var.prior]) + waits = var.status == StepStatus.WAITING + if waits and vars_ready: + changed_something = True + self._set_variable(var, self.jssp.operable) + for idx, computation in self.jssp.definite_computation_by_id.items(): + vars_ready = all( + [self.jssp.operable[name].status == StepStatus.FINISHED for name in computation.prior]) + waits = computation.status == StepStatus.WAITING + if waits and vars_ready: + changed_something = True + self._do_computation(computation, self.jssp.operable) + for idx, if_node in self.jssp.definite_if_node_by_id.items(): + vars_ready = all( + [self.jssp.operable[name].status == StepStatus.FINISHED for name in if_node.prior]) + waits = if_node.status == StepStatus.WAITING + if waits and vars_ready: + changed_something = True + self._eval_if(if_node, self.jssp.operable) + except Exception as ex: + Logger.warning(f"workflow graph check failed: {traceback.print_exc()}") + self.set_origins() + + def set_origins(self): + """ + The origin device types of MoveJobs are convenient to have, but might change at runtime. + This method sets all the definite ones. + :return: + """ + jobs = self.jssp.definite_step_by_id + g = self.jssp.combined_wfg + topo = list(nx.topological_sort(g)) + for idx, job in jobs.items(): + if isinstance(job, MoveStep): + if job.origin_device is None: + ancestors = nx.ancestors(g, idx) + past_move_indices = [n for n in ancestors if n in jobs and isinstance(jobs[n], MoveStep) and + job.cont == jobs[n].cont] + if past_move_indices: + sorted_anc = sorted(past_move_indices, key=lambda u: topo.index(u)) + last_move = jobs[sorted_anc[-1]] + # set the origin to where the container is after the last (at that point) movement + origin = last_move.target_device + else: + origin = self.jssp.container_info_by_name[job.cont].start_device + job.used_devices.append(UsedDevice(origin.device_type, tag='origin', preferred=origin.preferred)) + self.complete_preferences() + + def complete_preferences(self): + """ + Writes the preferences of for used devices into origin and destination of adjacent MoveSteps + (if uniquely determined). This method is just for convenience. + :return: + """ + J = self.jssp.definite_step_by_id + for step in J.values(): + prior_jobs = [idx_o for idx_o in step.prior if idx_o in J] + if len(prior_jobs) == 1: + # in this it is clear, what the last step will have been + step_o = J[prior_jobs[0]] + if isinstance(step, MoveStep) and not isinstance(step_o, MoveStep): + if step_o.main_device.preferred != step.main_device.preferred: # avoid barcode_read steps + step.origin_device.preferred = step_o.main_device.preferred + if not isinstance(step, MoveStep) and isinstance(step_o, MoveStep): + step_o.target_device.preferred = step.main_device.preferred + if step.is_start and isinstance(step, MoveStep): + cont = self.jssp.container_info_by_name[step.cont] + step.origin_device.preferred = cont.start_device.name + + def _eval_if(self, if_node: IfNode, operable: Dict[str, Variable]): + try: + kwargs = {} + for idx in if_node.prior: + var = operable[idx] + if isinstance(var, Variable) or isinstance(var, Computation): + kwargs[var.var_name] = var.result + Logger.debug(f"input for evaluation of {if_node.name} is: {kwargs}") + Logger.info(f"input for evaluation of {if_node.name} is: {kwargs}") + # todo this is preliminary and will be done properly when variables are handled properly + try: + if_node.decision = bool(if_node.evaluation(**kwargs)) + except Exception as ex: + Logger.error(f"evaluation of if_node {if_node.name} failed: {ex}\n{traceback.print_exc()}") + Logger.warning("Setting decision to False (better than no decision at all)") + # A wrong decision is better than no decision + if_node.decision = False + Logger.info(f"decision is {if_node.decision}") + if_node.status = StepStatus.FINISHED + # consider the effect on the subtrees in the wfg + changed = [] + nodes = self.jssp.operable + # set the opacities of the direct successor nodes to 0 or 1 according to the made decision + for idx in if_node.false_tree: + if if_node.decision: + # if there is no other connection to that node, it will never be visited + if nodes[idx].prior == [if_node.name]: + nodes[idx].opacity = 0 + changed.append(idx) + # remove the precedence constraint to the if-node + nodes[idx].prior.remove(if_node.name) + else: + nodes[idx].opacity = 1 + if not isinstance(nodes[idx], IfNode): + changed.append(idx) + for idx in if_node.true_tree: + if if_node.decision: + nodes[idx].opacity = 1 + changed.append(idx) + else: + if nodes[idx].prior == [if_node.name]: + nodes[idx].opacity = 0 + changed.append(idx) + nodes[idx].prior.remove(if_node.name) + # set the opacities of the indirect successor nodes + while len(changed) > 0: + newly_changed = [] + for idx, node in nodes.items(): + if node.opacity < 1: + priors_changed = any(i in changed for i in node.prior) + if priors_changed: + # calculate the maximum opacity of predecessors (if nodes count half) + max_prior = 0 + for i in node.prior: + if isinstance(nodes[i], IfNode): + max_prior = max(max_prior, nodes[i].opacity/2) + else: + max_prior = max(max_prior, nodes[i].opacity) + # change the opacity if necessary + if not max_prior == node.opacity: + Logger.warning(f"setting opacity of {node.name} to {max_prior}") + node.opacity = max_prior + # we must not set the opacity of steps after an if_node to 1 + if not (isinstance(nodes[idx], IfNode) and max_prior == 1): + newly_changed.append(idx) + changed = newly_changed + self.jssp.update_reagent_opacity() + # clean up references to nodes that will not be operated + for node in nodes.values(): + # remove obsolete priors + node.prior = [idx for idx in node.prior if nodes[idx].opacity > 0] + # remove nodes that will never be operated + if node.opacity == 0: + self.jssp.remove_operable(node.name) + self.schedule_manager.mark_schedule_invalid() + except Exception as ex: + Logger.error(f"evaluation of if_node {if_node.name} failed: {ex}\n{traceback.print_exc()}") + if_node.status = StepStatus.ERROR + + @staticmethod + def _set_variable(var: Variable, operable): + try: + assert len(var.prior) == 1 + var.result = operable[var.prior[0]].result + var.status = StepStatus.FINISHED + except Exception as ex: + Logger.error(f"setting of variable {var.name} failed {ex}\n{traceback.print_exc()}") + var.status = StepStatus.ERROR + + @staticmethod + def _do_computation(computation: Computation, operable): + try: + kwargs = {} + for idx in computation.prior: + var = operable[idx] + kwargs[var.var_name] = var.result + computation.result = computation.evaluation(**kwargs) + computation.status = StepStatus.FINISHED + except Exception as ex: + Logger.error(f"evaluation of computation {computation.name} failed: {ex}\n{traceback.print_exc()}" + f"Setting result to None") + # setting this to None and FINISHED is better than error, since such an error is hard to resolve + computation.result = None + computation.status = StepStatus.FINISHED diff --git a/laborchestrator/engine/worker_interface.py b/laborchestrator/engine/worker_interface.py new file mode 100755 index 0000000000000000000000000000000000000000..75ce87391c3b6e6a2e543afc845399c00cbd1c1c --- /dev/null +++ b/laborchestrator/engine/worker_interface.py @@ -0,0 +1,265 @@ +""" +An interface for a worker, that the worker server and its features communicate with. +""" +from __future__ import annotations +import time +import traceback +from typing import Optional, Union, Dict, List, NamedTuple, Any, Tuple +from threading import Thread +from datetime import datetime +from abc import ABC +from datetime import timedelta +import inspect +import asyncio + +from sila2.client import ClientObservableCommandInstance +from sila2.framework import CommandExecutionStatus +from laborchestrator.structures import ( + SchedulingInstance, ProcessStep, ScheduledAssignment, StepStatus, MoveStep, SMProcess) +from laborchestrator.engine import ScheduleManager +from laborchestrator.logging_manager import StandardLogger as Logger +from laborchestrator.database_integration import StatusDBInterface + + +class WorkerInterface: + # this serves as an easy point to process step stati and results + observation_handlers: Dict[str, Observable] + simulation_mode: bool = False + db_client: StatusDBInterface + + def __init__(self, jssp: SchedulingInstance, schedule_manager: ScheduleManager, db_client: StatusDBInterface): + self.observation_handlers = {} + self.jssp = jssp + self.schedule_manager = schedule_manager + self.db_client = db_client + # start the dash app to control the worker + Thread(daemon=True, target=self._work).start() + + def _work(self): + while True: + try: + # it is safer to sort the assignments by start time. It might be, that due to some computational delays + # there are more jobs due than a device can handle. In that case, we should start the one that's + # scheduled first + if self.schedule_manager.schedule_executable(): + schedule = self.jssp.schedule + sorted_schedule = sorted(schedule.keys(), key=lambda idx: schedule[idx].start) + for step_id in sorted_schedule: + if step_id not in self.jssp.step_by_id: + continue + step = self.jssp.step_by_id[step_id] + # check whether all prerequisites for the executing the step are fulfilled + if self.job_is_due(step, schedule[step_id]): + # mark the step as started now + step.status = StepStatus.RUNNING + step.start = datetime.today() + # for movement steps, this is the point where we decide the position in destination device + if isinstance(step, MoveStep): + labware = self.jssp.container_info_by_name[step.cont] + # the position is unknown in the beginning of the experiment + if not step.origin_device.name == labware.current_device: + Logger.warning(f"the predicted place ({step.origin_device}) and the labware " + f"current position({labware.current_device}) diverged." + f"using the labware position") + step.origin_device.name = labware.current_device + step.origin_pos = labware.current_pos + step.destination_pos = self.determine_destination_position(step) + # call the execution interface method for step execution + try: + if self.simulation_mode: + observable = self.simulate_process_step(step_id, schedule[step_id].device, step.data) + else: + observable = self.execute_process_step(step_id, schedule[step_id].device, step.data) + if step_id in self.observation_handlers: + Logger.warning(f"There is already a observable handler for step {step_id}.") + self.observation_handlers[step_id] = observable + except Exception as ex: + Logger.error(f"failed to start execution of step {step_id}:{ex}" + f"\n{traceback.print_exc()}") + except Exception as ex: + Logger.error(ex, traceback.print_exc()) + time.sleep(.5) + + def check_prerequisites(self, process: SMProcess) -> Tuple[bool, str]: + """ + This method will be called when a process is started (Not when it is resumed) + :param process: The process object, that just started + :return: A report (as string) of problems found + """ + message = f"No problems observed for {process.name}. The process can start" + return True, message + + def execute_process_step(self, step_id: str, device: str, device_kwargs: Dict[str, Any]) -> Observable: + """ + Gets called, when the time for step has come, all prerequisites for the step are fulfilled and when the + assigned device has capacity. Overwrite it, to make something happen. + :param device_kwargs: arguments to be forwarded to the server + :param step_id: + :param device: + :return: + """ + Logger.info(f"It is time to start process step {step_id} on {device} ") + # todo inherit from this method to actually make something happen (other than simulation) + return self.simulate_process_step(step_id, device, device_kwargs) + + def simulate_process_step(self, step_id: str, device: str, device_kwargs: Dict[str, Any]) -> Observable: + observable = DummyHandler(duration=max(self.jssp.definite_step_by_id[step_id].duration / 10 - .5, 1)) + observable.run_protocol(None) + return observable + + def process_step_finished(self, step_id: str, result: Optional[NamedTuple]): + """ + Gets called, when the corresponding step finished. Overwrite it, to make something happen. + :param step_id: + :param result: Might be None + :return: + """ + Logger.info(f"step {step_id} has finished") + if step_id not in self.jssp.step_by_id: + Logger.error(f"Step {step_id} seems to have been removed from the orchestrator") + return + # save the relocation to the database and the container_info + step = self.jssp.step_by_id[step_id] + experiment_uuid = self.jssp.process_by_name[step.process_name].experiment_uuid + for cont_name in step.cont_names: + container = self.jssp.container_info_by_name[cont_name] + self.db_client.safe_step_to_db(step, container, experiment_uuid) + if isinstance(step, MoveStep): + container.current_device = step.target_device.name + container.current_pos = step.destination_pos + cont_in_db = self.db_client.get_cont_info_by_barcode(container.barcode) + if cont_in_db: + origin_device, origin_pos = cont_in_db.current_device, cont_in_db.current_pos + if not (origin_device, origin_pos) == (step.origin_device.name, step.origin_pos): + Logger.warning(f"database and wfg have diverged. db: {origin_device, origin_pos}, wfg: {step}") + else: + origin_device, origin_pos = step.origin_device.name, step.origin_pos + Logger.debug("tell the database, that we move a container") + self.db_client.moved_container(origin_device, origin_pos, + step.target_device.name, step.destination_pos, + barcode=container.barcode) + + def job_is_due(self, step: ProcessStep, assignment: ScheduledAssignment): + """ + checks whether the given job should be stated now + :param step: the job to investigate + :param assignment: the job's scheduled assignment to time and device + :return: + """ + # check whether the job is still waiting + if not step.status == StepStatus.WAITING: + return False + # check whether the schedule says, that it's time to start + if assignment.start >= datetime.today(): + return False + # check whether the corresponding process is running or was removed + if step.process_name not in self.jssp.running_processes_names: + return False + # check whether all necessary prior jobs are done (container and machine precedences) + operable = self.jssp.operable + prior = step.prior + assignment.machine_prior + if not all(operable[idx_o].status == StepStatus.FINISHED for idx_o in prior): + return False + return True + + def determine_destination_position(self, step: MoveStep) -> Optional[int]: + """ + The position in the destination device is set at runtime according to free space. + By default, it is the next free one or a given preference (if that is free) + :param step: + :return: index of the position or None if none is available + """ + device_name = step.target_device.name + # get all slots in the device + device_slots = self.db_client.get_all_positions(device_name) + # if there is a preference and that slot is empty, take that + if step.pref_dest_pos: + if self.db_client.position_empty(device_name, step.pref_dest_pos): + return step.pref_dest_pos + else: + Logger.warning(f"Container {step.cont} should go to slot {step.pref_dest_pos}, but there is" + f"{self.db_client.get_container_at_position(device_name, step.pref_dest_pos)}") + for slot in device_slots: + # take the first empty slot + if self.db_client.position_empty(device_name, slot): + return slot + # return None, if there is no empty slot + return None + + +class ObservableProtocolHandler(ABC): + """ + The class is supposed be an interface for protocols consisting of different SiLA commands or non-SilA commands, + so they can be treated the same way as observable SiLA-commands. + """ + + def __init__(self): + self._status = CommandExecutionStatus.waiting + + def run_protocol(self, client: Any, **kwargs)\ + -> Union[ObservableProtocolHandler, ClientObservableCommandInstance]: + """ + :return: + """ + Thread(daemon=True, target=self._run_protocol, args=[client], kwargs=kwargs).start() + return self + + def _run_protocol(self, client, **kwargs): + self._status = CommandExecutionStatus.running + try: + if inspect.iscoroutinefunction(self._protocol): + Logger.debug("running protocol in async fashion") + asyncio.run(self._protocol(client, **kwargs)) + else: + self._protocol(client, **kwargs) + except Exception as ex: + self._status = CommandExecutionStatus.finishedWithError + Logger.error("protocol error:", traceback.print_exc()) + return + self._status = CommandExecutionStatus.finishedSuccessfully + + def _protocol(self, client, **kwargs): + """ This is where protocols should be defined, that can not be written as a single observable command. """ + pass + + @property + def status(self) -> CommandExecutionStatus: + """ + provides the current status of protocol execution + :return: 0,1,2,3 for waiting, running, success, error + """ + if isinstance(self._status, CommandExecutionStatus): + return self._status + return CommandExecutionStatus(self._status) + + def get_remaining_time(self) -> timedelta: + """ + provides the remaining time of protocol execution + :return: remaining time in seconds (float) + """ + return timedelta(seconds=0) + + def get_responses(self): + return None + + @property + def done(self): + return self._status in [CommandExecutionStatus.finishedSuccessfully, CommandExecutionStatus.finishedWithError] + + +# this comes from either a direct observable SiLA command or a wrapper with the same interface +# made for more complicated executions +Observable = Union[ClientObservableCommandInstance, ObservableProtocolHandler] + + +class DummyHandler(ObservableProtocolHandler): + """ + A dummy implementation of ObservableProtocolHandler to simulate running real process steps + """ + def __init__(self, duration: float = 0): + super(DummyHandler, self).__init__() + self.duration = duration + + def _protocol(self, client, **kwargs): + time.sleep(self.duration) + diff --git a/laborchestrator/engine/worker_observer.py b/laborchestrator/engine/worker_observer.py new file mode 100755 index 0000000000000000000000000000000000000000..df26e795a851727cfff78eda4103bccf8d0bd88d --- /dev/null +++ b/laborchestrator/engine/worker_observer.py @@ -0,0 +1,118 @@ +""" +This class observes running process steps. When delays or errors occur, jobs get finished the WFGManager, +WorkerInterface or ScheduleManager are notified accordingly. +""" +from laborchestrator.structures import SchedulingInstance, StepStatus, ProcessStep +from laborchestrator.engine import WFGManager, ScheduleManager, WorkerInterface +import time +from threading import Thread +from datetime import datetime +import traceback +from laborchestrator.logging_manager import StandardLogger as Logger +from typing import Tuple, Set + + +class WorkerObserver: + worker: WorkerInterface + wfg_manager: WFGManager + schedule_manager: ScheduleManager + jssp: SchedulingInstance + observed_jobs: Set[str] + + def __init__(self, wfg_manager: WFGManager, schedule_manager: ScheduleManager, jssp: SchedulingInstance, + worker: WorkerInterface): + self.jssp = jssp + self.wfg_manager = wfg_manager + self.schedule_manager = schedule_manager + self.worker = worker + self.observe_thread = Thread(daemon=True, target=self.observe) + self.observe_thread.start() + self.observed_jobs = set() + + def observe(self): + """ + Master thread + :return: + """ + while True: + try: + time.sleep(.2) + steps_to_observe = list(self.worker.observation_handlers.keys()) + for step_id in steps_to_observe: + # check for new started jobs + if step_id not in self.observed_jobs: + # start a thread to observe this job + Thread(daemon=True, target=self._observe_protocol, args=[step_id]).start() + + except Exception as ex: + Logger.warning(f"Worker got an exception: {ex}, {traceback.print_exc()}") + + def _observe_protocol(self, step_id: str): + Logger.debug(f"start observing execution of {step_id}") + self.observed_jobs.add(step_id) + Logger.info(f"start observing execution of {step_id}. now: {self.worker.observation_handlers[step_id].status}") + protocol_info = self.worker.observation_handlers[step_id] + job = self.jssp.step_by_id[step_id] + while True: + try: + if step_id not in self.observed_jobs: + # this happens after error recovery. In this case, we interrupt without doing anything + Logger.info(f"There seems to have happened a error recovery of step {step_id}. Stopping to observer it.") + return + # this happens if the job's status gets manipulated from somewhere else (e.g. error-recovery) + if job.status == StepStatus.FINISHED: + self.worker.process_step_finished(step_id, job.result) + break + # get the current status from the Observable protocol_info + status = StepStatus(protocol_info.status.value) + if status == StepStatus.RUNNING: + job.status = status + # reschedule if the job takes longer than expected + delayed, duration_increase = self.is_delayed_significantly(job) + if delayed: + Logger.warning(f"job {job.name} is delayed by {(datetime.today() - job.start).total_seconds() - job.duration}") + job.duration += duration_increase + # jobs taking longer than expected can lead to serious problems if schedule is not adapted + self.schedule_manager.mark_schedule_invalid() + if status == StepStatus.ERROR: + job.status = status + self.jssp.container_info_by_name[job.cont].in_error_state = True + self.schedule_manager.mark_schedule_invalid() + # save the error message for the GUI + try: + job.result = protocol_info.get_responses() + except Exception as ex: + res = str(ex) + Logger.error(f"error retrieving response: {res}") + job.result = res + # stop observing this step + break + if status == StepStatus.FINISHED: + job.finish = datetime.today() + predicted_duration = job.duration + job.duration = (job.finish-job.start).total_seconds() + # reschedule if we saved significantly much time + if job.duration + 2*self.schedule_manager.time_limit_short < predicted_duration: + self.schedule_manager.mark_schedule_suboptimal() + # handle job effect on container_info + try: + job.result = protocol_info.get_responses() + except Exception as ex: + job.result = str(ex)+str(traceback.print_exc()) + Logger.error(ex, traceback.print_exc()) + self.worker.process_step_finished(step_id, job.result) + job.status = status # do this AFTER the effect of the step was calculated, + # or other steps might get started with insufficient information + break + time.sleep(.5) + except Exception as ex: + Logger.error(ex, traceback.print_exc()) + + def is_delayed_significantly(self, job: ProcessStep) -> Tuple[bool, float]: + delay = (datetime.today() - job.start).total_seconds() - job.duration + offset = self.schedule_manager.time_limit_short + delayed = delay > 1 + duration_increase = 2 * offset + return delayed, duration_increase + + diff --git a/laborchestrator/logging_manager.py b/laborchestrator/logging_manager.py new file mode 100755 index 0000000000000000000000000000000000000000..14a7281d738dfbc21b7286c113117c010d57f34d --- /dev/null +++ b/laborchestrator/logging_manager.py @@ -0,0 +1,75 @@ +import logging +from datetime import datetime +import os + +_log_format = "%(asctime)s - [%(levelname)s] - %(name)s - (%(filename)s).%(funcName)s(%(lineno)d) - %(message)s" +_console_format = "%(levelname)s:%(message)s" + +default_session_name = f"session_{datetime.today().ctime()}" +default_session_name = default_session_name.replace(" ", "_").replace(":", "_") + +lvl_name = { + logging.DEBUG: "DEBUG", + logging.INFO: "INFO", + logging.WARNING: "WARNING", + logging.ERROR: "ERROR", + logging.CRITICAL: "CRITICAL", +} + +def get_file_handler(handler_name: str, lvl: int): + file_handler = logging.FileHandler(handler_name) + file_handler.setLevel(lvl) + file_handler.setFormatter(logging.Formatter(_log_format)) + return file_handler + +def get_stream_handler(lvl: int): + stream_handler = logging.StreamHandler() + stream_handler.setLevel(lvl) + stream_handler.setFormatter(logging.Formatter(_console_format)) + return stream_handler + +def get_standard_logger(session_name: str = default_session_name, stream_lvl: int = logging.INFO): + """ + Returns a custom logger named 'Orchestrator logger' with separate file and console handlers. + This logger is independent of the root logger so that calls like logging.warning() do not use these settings. + """ + # Create a dedicated logger + logger = logging.getLogger("Orchestrator logger") + logger.setLevel(logging.DEBUG) + logger.propagate = False # prevent log messages from being passed to the root logger + + # Clear any existing handlers (if reconfiguring) + if logger.hasHandlers(): + logger.handlers.clear() + + # Create a session-specific directory for log files + base_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "logs") + session_path = os.path.join(base_path, session_name) + try: + if not os.path.exists(session_path): + os.makedirs(session_path) + except OSError as err: + logger.error(f"Could not create log directory ({err}). Logs may not be saved.") + + # Set up file handlers for each logging level + for lvl, name in lvl_name.items(): + file_name = os.path.join(session_path, f"{session_name}_{name}.log") + logger.addHandler(get_file_handler(file_name, lvl)) + + # Add stream (console) handler with the desired level and format + logger.addHandler(get_stream_handler(stream_lvl)) + return logger + +# Create a module-level logger that can be imported as StandardLogger +StandardLogger = get_standard_logger() + +# Optionally, define __all__ to restrict module exports +__all__ = ['StandardLogger', 'get_standard_logger'] + + +if __name__ == "__main__": + test_logger=StandardLogger + test_logger.debug("Hello World") + test_logger.info("the end is near") + test_logger.warning("please prepare for the end!") + test_logger.error("Goodbye cruel world :-(") diff --git a/laborchestrator/logs/__init__.py b/laborchestrator/logs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/laborchestrator/old_dash_app.py b/laborchestrator/old_dash_app.py new file mode 100755 index 0000000000000000000000000000000000000000..9041b8ee263c35d41070a7ead6905717c74019be --- /dev/null +++ b/laborchestrator/old_dash_app.py @@ -0,0 +1,411 @@ +import traceback + +from dash import dcc, html, no_update +import time +from laborchestrator.logging_manager import StandardLogger as Logger +from dash.dependencies import State, Input, Output +from dash_extensions.enrich import MultiplexerTransform, DashProxy +from .traffic_light import TrafficLight, register_traffic_light_callbacks +import dash_interactive_graphviz +from threading import Thread +from laborchestrator.structures import ProcessStep, Variable, StepStatus +from laborchestrator.pythonlab_process_finder import ProcessFinder, ImportableProcess +from laborchestrator.orchestrator_interface import OrchestratorInterface +from laborchestrator.orchestrator_implementation import Orchestrator +import sys +import os +from importlib import reload + +tests_dir = os.path.join(os.path.dirname(__file__), '..', 'tests') +sys.path.append(tests_dir) +import test_data + + +def to_process_id(ip: ImportableProcess): + return f"{ip.module.__name__}.{ip.name}" + + +block_style = { + 'display': 'flex', + 'flexDirection': 'column', # Stack children vertically + 'alignItems': 'center', # Center align vertically in the block + 'justifyContent': 'center', # Optional: center align content within block + 'verticalAlign': 'top', # Align block with rest of the app + } + + +def create_timestamp() -> str: + lct = time.localtime() + y, mo, d, h, mi, s = [str(lct.__getattribute__(v)).rjust(2, '0') for v in + ['tm_year', 'tm_mon', 'tm_mday', 'tm_hour', 'tm_min', 'tm_sec']] + timestamp = f"{y}{mo}{d}_{h}{mi}{s}" + return timestamp + + +class SMDashApp: + # sm_interface: OrchestratorInterface + sm_interface: Orchestrator # bit hacky, but makes it easier to program. This GUI will be replaced anyway + app: DashProxy + p: Thread + selected_operation: str + + @property + def importable_processes(self): + # refresh the modules, so changes in the process apply + reload(self.process_module) + return ProcessFinder.get_processes(self.process_module) + + def __init__(self, sm_interface: OrchestratorInterface, port=8050, process_module=test_data): + self.sm_interface = sm_interface + self.process_module = process_module + self.test_view = False + self.process_to_add = None + self.process_type = {} + #self.last_jssp = {idx: job.status for idx, job in sm_interface.jssp.definite_job_by_id.items()} + self.app = DashProxy("Orchestrator webinterface", prevent_initial_callbacks=True, + suppress_callback_exceptions=True, transforms=[MultiplexerTransform()]) + self.p = Thread(target=self.app.run, daemon=True, kwargs=dict(debug=False, port=port), ) + self.selected_operation = "" + self.app.layout = html.Div(children=[ + html.H1(children='Status of the scheduled processes'), + html.Div(children=''' + A web application to observe, schedule and manipulate processes running with the pythonlaborchestrator. + '''), + dcc.Graph( + id='gantt', + figure="" + ), + html.Div(children=[ + html.Div(children=[ + dcc.Checklist(id='stop_gantt', options=[{'label': 'Update Gantt-Chart', 'value': 'yes'}], value=['yes']), + dcc.Checklist(id='stop_wfg', options=[{'label': 'Update WFG', 'value': 'yes'}], value=['yes']), + html.Div(children=[html.Button('Recover error', id='action', n_clicks=0), + dcc.Checklist(id='repeat', options=[{'label': 'Repeat?', 'value': 'yes'}], value=[], + style={'display': 'inline-block'})]), + html.Button('Run Pre Check', id='pre_check', n_clicks=0), + html.Button("Export current WFG", id='export', n_clicks=0), + html.Button(children=["Export current", html.Br(), "Scheduling Problem"], + id='export_problem', n_clicks=0), + ], style=block_style + ), + html.Div(children=[ + html.Div(id='delay_label', children="Delay in minutes:"), + dcc.Textarea(id="delay", value='0', style={'width': 100, 'height': 20}), + dcc.Dropdown(options=[{'value': to_process_id(ip), 'label': ip.name} + for ip in self.importable_processes], + id='process_select', style={"width": 250}), + html.Button("Add process", id='add_process', n_clicks=0), + ], style=block_style) + , + html.Div(children=[ + dcc.Dropdown(options=[], id='processes', style={"width": 250}), + html.Button('Stop Process', id='stop_process', n_clicks=0), + html.Button('Start Process', id='continue_process', n_clicks=0), + html.Button('Remove Process', id='remove_process', n_clicks=0), + html.Button('Schedule Process', id='schedule_process', n_clicks=0), + html.Button('Reschedule', id='reschedule', n_clicks=0), + ], style=block_style + ), + html.Div(children=[ + html.Button("Add Containers to DB", id='add_to_db', n_clicks=0), + html.Button("Mark Containers removed", id='remove_from_db', n_clicks=0), + ], style=block_style), + html.Div(children=[ + html.Div(id='schedule_time_label', children=["Schedule ", html.Br(), "computing time ", html.Br(), " in seconds:"]), + dcc.Textarea(value=self.sm_interface.get_parameter("scheduling_time"), id="schedule_time", + style={'width': 30, 'height': 20}), + html.Button("Get", id="schedule_time_get", n_clicks=0), + html.Button("Set", id="schedule_time_set", n_clicks=0), + ], style=block_style), + TrafficLight("traffic_light", size=40), + ], style={ + 'display': 'flex', # Place blocks side by side + 'alignItems': 'flex-start', # Align blocks at the top + } + ), + #html.Button("Emergency STOP", id='emergency_stop', n_clicks=0, + # style={'background-color': 'red', 'font-size': '55px', 'width': '300px', 'height': '150px', + # 'vertical-align': 'top', 'display': 'inline-block'}), + dash_interactive_graphviz.DashInteractiveGraphviz( + id="wfg", engine='dot', + dot_source="digraph {\n\thello -> world\n}" + ), + html.Div(id='node', children='No node selected'), + html.Div(id='visu', children=None), + dcc.Interval( + id='interval-component', + interval=500, + n_intervals=0 + ), + ],) + register_traffic_light_callbacks(self.app, "traffic_light") + + @self.app.callback( + Input(component_id='export_problem', component_property='n_clicks'), + Output(component_id='export_problem', component_property='n_clicks') + ) + def export_current_scheduling_problem(n_clicks): + timestamp = create_timestamp() + filename = f"jssp_{timestamp}.json" + self.sm_interface.export_current_scheduling_problem(filename) + + @self.app.callback( + Input(component_id='process_select', component_property='value'), + Output(component_id='process_select', component_property='options') + ) + def refresh_dropdown_menu(n_clicks): + # refreshes the options according to what's in the file system + return [{'value': to_process_id(ip), 'label': ip.name} for ip in self.importable_processes] + + @self.app.callback( + Input(component_id='remove_from_db', component_property='n_clicks'), + State(component_id='processes', component_property='value'), + Output(component_id='remove_from_db', component_property='n_clicks') + ) + def remove_containers_from_db(n_clicks, value): + # todo this is not, how it should be done. use interface methods instead. + labware_to_remove = [c.name for c in self.sm_interface.jssp.process_by_name[value].containers] + + self.sm_interface.remove_labware(labware_ids=labware_to_remove) + return no_update + + @self.app.callback( + Input(component_id='add_to_db', component_property='n_clicks'), + State(component_id='processes', component_property='value'), + Output(component_id='add_to_db', component_property='n_clicks'), + ) + def add_containers_to_db(n_clicks, value): + present, missing = self.sm_interface.check_labware_presence([value]) + Logger.info(f"found: {present}\nmissing: {missing}") + self.sm_interface.add_labware(missing) + return no_update + + @self.app.callback( + Input(component_id='export', component_property='n_clicks'), + State(component_id='gantt', component_property='figure'), + State(component_id='wfg', component_property='dot_source'), + Output(component_id='export', component_property='n_clicks'), + ) + def export_state(n_clicks, gantt, wfg): + timestamp = create_timestamp() + with open(f"WFG_{timestamp}.txt", "w") as writer: + writer.write(wfg) + with open(f"Schedule_{timestamp}.txt", "w") as writer: + writer.write(self.sm_interface.gantt_chart_scheduled_processes.to_json()) + return no_update + + @self.app.callback( + Input(component_id='add_process', component_property='n_clicks'), + Output(component_id='process_select', component_property='title'), + State(component_id='process_select', component_property='value'), + State(component_id='delay', component_property='value'), + ) + def add_process(n_clicks, value, delay): + Logger.info(f"add {value} with {delay} minutes delay ") + try: + delay = int(delay) + except: + Logger.warning(f"could not translate delay {delay} to integer") + delay = 0 + for ip in self.importable_processes: + p_id = ProcessFinder.to_process_id(ip) + if p_id == value: + process = ProcessFinder.create_process(ip) + name = self.sm_interface.add_process(process_object=process, delay=delay) + self.process_type[name] = p_id.split('.')[-1] + return 'chosen' + return 'chosen' + + @self.app.callback( + Input('interval-component', 'n_intervals'), + Output(component_id='processes', component_property='options') + ) + def update_active_processes(n_intervals): + options = [] + for process in self.sm_interface.processes: + name = process.name + options.append(dict(value=name, label=f"{name} ({self.process_type[name] if name in self.process_type else ''})")) + return options + + @self.app.callback( + Input(component_id='continue_process', component_property='n_clicks'), + State(component_id='processes', component_property='value'), + Output(component_id='processes', component_property='value'), + ) + def start_process(n_clicks, value): + self.sm_interface.start_processes([value]) + return no_update + + @self.app.callback( + Input(component_id='stop_process', component_property='n_clicks'), + State(component_id='processes', component_property='value'), + Output(component_id='processes', component_property='value'), + ) + def stop_process(n_clicks, value): + self.sm_interface.stop_processes([value]) + return no_update + + @self.app.callback( + Input(component_id='remove_process', component_property='n_clicks'), + State(component_id='processes', component_property='value'), + Output(component_id='processes', component_property='value'), + ) + def remove_process(n_clicks, value): + self.sm_interface.remove_processes([value]) + return no_update + + @self.app.callback( + Input(component_id='schedule_process', component_property='n_clicks'), + State(component_id='processes', component_property='value'), + Output(component_id='processes', component_property='value'), + ) + def schedule_process(n_clicks, value): + try: + self.sm_interface.add_to_schedule(value) + except Exception as ex: + Logger.exception(traceback.print_exc()) + return no_update + + @self.app.callback( + Input(component_id='pre_check', component_property='n_clicks'), + State(component_id='processes', component_property='value'), + Output(component_id='node', component_property='children'), + ) + def pre_check_process(n_clicks, value): + try: + process = self.sm_interface.jssp.process_by_name[value] + ready, report = self.sm_interface.worker.check_prerequisites(process) + return f"Ready to start:{ready}\n\n{report}" + except Exception as ex: + print(ex, traceback.print_exc()) + return no_update + + @self.app.callback( + Input(component_id='reschedule', component_property='n_clicks'), + Output(component_id='reschedule', component_property='n_clicks'), + ) + def reschedule(n_clicks): + # this is not using a proper interface method + self.sm_interface.schedule_manager.mark_schedule_invalid(enforce=True) + + @self.app.callback( + Input(component_id='schedule_time_get', component_property='n_clicks'), + Output(component_id='schedule_time', component_property='value'), + ) + def get_schedule_time(n_clicks): + return self.sm_interface.get_parameter("scheduling_time") + + @self.app.callback( + Input(component_id='schedule_time_set', component_property='n_clicks'), + State(component_id="schedule_time", component_property="value"), + Output(component_id='schedule_time_get', component_property='n_clicks'), + ) + def set_schedule_time(n_clicks, value): + print(f"Setting scheduling time to {value}") + self.sm_interface.set_parameter("scheduling_time", value) + return no_update + + @self.app.callback( + Input(component_id="emergency_stop", component_property='n_clicks'), + Output(component_id="emergency_stop", component_property='n_clicks'), + ) + def stop(n_clicks): + print("this is just a test") + + @self.app.callback( + Output(component_id='node', component_property='children'), + Input(component_id='wfg', component_property='selected_node'), + ) + def select_operation(selected_node): + self.selected_operation = selected_node + if selected_node in self.sm_interface.jssp.operable: + op = self.sm_interface.jssp.operable[selected_node] + if isinstance(op, Variable): + if op.result is not None: + print('at this point we could visualize the variables result') + return str(op) + else: + nx_attr = self.sm_interface.jssp.process_by_name['P1'].wfg.nodes[selected_node] + if nx_attr['type'] == 'container': + cont_name = nx_attr['name'] + return str(self.sm_interface.jssp.container_info_by_name[cont_name]) + return 'no operation selected' + + @self.app.callback( + Output(component_id='visu', component_property='children'), + Input(component_id='wfg', component_property='selected_node'), + ) + def select_operation2(selected_node): + var = self.sm_interface.get_operable_node(selected_node) + if isinstance(var, Variable): + if var.result is not None: + if "Data" in var.result.__dir__(): + s = var.result.Data + return None + + @self.app.callback( + Output(component_id='node', component_property='children'), + Input(component_id='action', component_property='n_clicks'), + State(component_id='repeat', component_property='value') + ) + def manipulate(n_clicks, value): + job = self.sm_interface.get_operable_node(self.selected_operation) + repeat = 'yes' in value + if not job.status == StepStatus.ERROR: + print("Selected Operation is not in Error-State") + Logger.warning("Selected Operation is not in Error-State") + self.sm_interface.error_resolved(self.selected_operation, repeat_operation=repeat) + return f"Recovered {self.selected_operation}, repeat: {repeat}" + + @self.app.callback( + [Output(component_id='gantt', component_property='figure'), + Output(component_id='traffic_light-active', component_property="data")], + Input(component_id='interval-component', component_property='n_intervals'), + State(component_id='stop_gantt', component_property='value') + ) + def refresh_gantt(n_intervals, value): + if 'yes' in value: + if n_intervals % 2 == 0: + try: + gantt = self.sm_interface.gantt_chart_scheduled_processes + return [gantt, self.sm_interface.schedule_manager.schedule_quality.value] + except Exception as ex: + return [no_update, no_update] + return no_update + + @self.app.callback( + Output(component_id='wfg', component_property='dot_source'), + Input('interval-component', 'n_intervals'), + State(component_id='stop_wfg', component_property='value') + ) + def refresh_wfg(n_intervals, value): + try: + if 'yes' not in value: + return no_update + update = False + #new_jssp = {idx: job.status for idx, job in sm_interface.jssp.definite_job_by_id.items()} + #for idx, state in new_jssp.items(): + # if idx not in self.last_jssp or not state == self.last_jssp[idx]: + # update = True + if n_intervals % 5 == 0: + update = True + if update: + fig_str = str(self.sm_interface.workflow_graph_scheduled_processes) + return fig_str + return no_update + except Exception as ex: + print(ex, traceback.print_exc()) + print("wfg-update failed") + return no_update + + def run(self): + import logging + + if self.p.is_alive(): + Logger.warning('Server is already running. Restarting server') + self.stop() + logging.getLogger('werkzeug').setLevel(logging.ERROR) + self.p.start() + + def stop(self): + print("Sorry, I don't know, how to stop") diff --git a/laborchestrator/orchestrator_implementation.py b/laborchestrator/orchestrator_implementation.py new file mode 100755 index 0000000000000000000000000000000000000000..8e0655b2936f64069833e570ca758812ddf0a951 --- /dev/null +++ b/laborchestrator/orchestrator_implementation.py @@ -0,0 +1,464 @@ +"""module doc""" +import inspect +import traceback +from laborchestrator.logging_manager import StandardLogger as Logger +from datetime import datetime, timedelta +import json +import time +from typing import Dict, List, Optional, Any, Type, Tuple + +from laborchestrator.workflowgraph import WorkFlowGraph +from laborchestrator.structures import SchedulingInstance, StepStatus, ContainerInfo, MoveStep, ProcessStep +from laborchestrator.pythonlab_reader import PythonLabReader +from laborchestrator.process_reader import ProcessReader +from laborchestrator.pythonlab_process_finder import ProcessFinder +from laborchestrator.engine import WorkerInterface, WFGManager, ScheduleManager, WorkerObserver +from laborchestrator.orchestrator_interface import ( + OrchestratorInterface, + ProcessReader as ProcessReaderEnum, + ProcessInfo, + ProcessExecutionState, ProcessDescription +) +from laborchestrator.database_integration import StatusDBInterface, StatusDBDummyImplementation +from laborchestrator.sila_server import Server as OrchestratorServer + + +class Orchestrator(OrchestratorInterface): + """ + A class for managing one or more processes in a laboratory. You can add processes (also while others are already + running), monitor their progress. The class incorporates an intelligent scheduler. It adapts the schedule to delays, + errors, manual interference and addition of processes. Before really adding a process, you can check what the new + schedule would look like. The progress can be observed via gantt charts or workflow graphs. + """ + sila_server: OrchestratorServer + + def __init__(self, reader: str = "PythonLab", worker_type: Type[WorkerInterface] = WorkerInterface): + """Constructs a new empty interface. It es initialized in stop state. + Call start_processing() to get it into running state.""" + # the container for all experiments and a copy where processes will be added for tryouts + self.test_jssp = SchedulingInstance() + # structure for start, stop, work + self.select_process_reader() + # create the five main parts of the inner logic + self.jssp = SchedulingInstance() + self.db_client = StatusDBDummyImplementation() + self.schedule_manager = ScheduleManager(self.jssp) + self.wfg_manager = WFGManager(self.jssp, schedule_manager=self.schedule_manager) + self.worker = worker_type(self.jssp, schedule_manager=self.schedule_manager, db_client=self.db_client) + self.worker_observer = WorkerObserver(schedule_manager=self.schedule_manager, wfg_manager=self.wfg_manager, + jssp=self.jssp, worker=self.worker) + # used to enumerate all processes with no specified name + self.process_counter = 0 + + def start_sila_interface(self): + self.sila_server = OrchestratorServer(self) + self.sila_server.start_insecure("127.0.0.1", 50088) + + def check_labware_presence(self, process_names: List[str] = None) -> Tuple[List[Any], List[Any]]: + if process_names is None: + process_names = [] + # take all processes with no step started + for name, process in self.jssp.process_by_name.items(): + if all(step.status == StepStatus.WAITING for step in process.steps): + process_names.append(name) + found = [] + missing = [] + for name in process_names: + for cont in self.jssp.process_by_name[name].containers: + if self.db_client.get_container_at_position(cont.start_device.name, cont.current_pos): + found.append(cont) + else: + missing.append(cont) + return found, missing + + def add_labware(self, labware: List[ContainerInfo]): + for cont in labware: + self.db_client.add_container(cont) + + def remove_labware(self, labware_ids: List[str]): + for labware_id in labware_ids: + if labware_id in self.jssp.container_info_by_name: + self.db_client.remove_container(self.jssp.container_info_by_name[labware_id]) + else: + Logger.warning(f"No container with name {labware_id} found for current processes") + + def create_labware_location_graph(self, labware_ids: List[str]): + Logger.info("Labware graph creation is not implemented, yet.") + + # A class to read new processes. By default, it is a PythonLabReader + process_reader: ProcessReader + worker: WorkerInterface + worker_observer: WorkerObserver + wfg_manager: WFGManager + schedule_manager: ScheduleManager + _simulation_speed = 20 + db_client: StatusDBInterface + + def inject_db_interface(self, db_client: StatusDBInterface): + if not isinstance(db_client, StatusDBInterface): + Logger.error(f"Invalid database interface of type: {type(db_client)}") + return + self.db_client = db_client + self.worker.db_client = db_client + self.schedule_manager.db_client = db_client + + def add_lab_resources_from_file(self, lab_env_filename: str): + # load the file + with open(lab_env_filename, 'r') as instream: + yaml = instream.read() + # send it to the scheduler + success = self.schedule_manager.configure_lab(yaml) + if not success: + Logger.warning("Unable to set lab environment") + + def add_lab_resources_from_database(self, URI: str): + raise NotImplementedError + + def execution_on_time(self) -> bool: + return not self.jssp.schedule_violated() + + def add_process(self, description: Optional[str] = None, file_path: Optional[str] = None, + name: Optional[str] = None, process_object=None, delay: int = 0) -> str: + if process_object is None and description is None: + assert file_path is not None + with open(file_path, 'r') as in_stream: + description = in_stream.read() + self.process_counter += 1 + + if name is None or name in self.jssp.process_by_name: + name = f"P{self.process_counter}" + assert name not in self.jssp.process_by_name # theoretical possible + if process_object is None: + try: + importable_processes = ProcessFinder.importable_processes_from_string(description) + if importable_processes: + process_object = ProcessFinder.create_process(importable_processes[0]) + else: + Logger.warning("No importable processes found") + except Exception as ex: + print(ex, traceback.print_exc()) + if description is None: + description = inspect.getsource(type(process_object)) + + # add the process to the database, if it is not already there + available_processes = self.db_client.get_available_processes() + for process_name, process_uuid in available_processes: + src = self.db_client.get_process(process_uuid) + if src == description: + break + else: + name_in_db = type(process_object).__name__ if process_object else name + process_uuid = self.db_client.add_process_to_db(name=name_in_db, src=description) + + # create the process object and its database representation + new_process = self.process_reader.read_process(process_object, name=name, src=description) + experiment_uuid = self.db_client.create_experiment(process_id=process_uuid) + new_process.experiment_uuid = experiment_uuid + # possibly add the delayed start + if delay: + start_time = datetime.now() + timedelta(minutes=delay) + new_process.min_start = start_time + self.jssp.add_process(new_process) + # this should be done once, before the process might be scheduled + # it is only necessary to correct the mistake made by the reader which sets the origin of movements after + # barcode-reads to the robots arm + for step in self.jssp.step_by_id.values(): + if isinstance(step, MoveStep): #for i in dll.DefinedTypes: + step.used_devices = [d for d in step.used_devices if not d.tag == 'origin'] + self.wfg_manager.set_origins() + + self.jssp.start_time = datetime.today() + return name + + def select_process_reader(self, process_reader=ProcessReaderEnum.PYTHONLABREADER): + # todo this is not any better than normal hard code + if process_reader == ProcessReaderEnum.PYTHONLABREADER: + self.process_reader = PythonLabReader() + + def get_parameter(self, param_name: str): + if param_name == "scheduling_time": + return self.schedule_manager.time_limit_short + + def set_parameter(self, param_name: str, new_value): + if param_name == "scheduling_time": + self.schedule_manager.time_limit_short = float(new_value) + + @property + def available_processes(self) -> List[ProcessDescription]: + found_processes = [] + for name, uuid in self.db_client.get_available_processes(): + description = self.db_client.get_process(uuid) + found_processes.append(ProcessDescription(name=name, description=description)) + return found_processes + + @property + def processes(self) -> List[ProcessInfo]: + result = [ProcessInfo(name=name, + priority=process.priority, + state=process.status) + for name, process in self.jssp.process_by_name.items()] + return result + + def start_processes(self, process_names: List[str] = None) -> bool: + if process_names is None: + process_names = self.jssp.process_by_name.keys() + for name in process_names: + if name in self.worker.jssp.running_processes_names: + Logger.warning("Already running") + else: + # perform a worker/lab specific check whether everything is ready for the start + #ready, message = self.worker.check_prerequisites(self.jssp.process_by_name[name]) + #Logger.info(message) + #if ready: + # Starts the specified added process. This will cause a rescheduling. + self.jssp.start_process(name) + self.schedule_manager.mark_schedule_invalid(enforce=True) + return True + + def stop_processes(self, process_names: List[str] = None) -> bool: + if process_names is None: + process_names = self.jssp.process_by_name.keys() + for name in process_names: + self.jssp.stop_process(name) + return True + + def pause_processes(self, process_names: List[str] = None) -> bool: + if process_names is None: + process_names = self.jssp.process_by_name.keys() + for name in process_names: + self.jssp.stop_process(name) + return True + + def resume_processes(self, process_names: List[str] = None) -> bool: + return self.start_processes(process_names) + + def restart_process_from_datetime(self, process_uri: str, start: datetime = None) -> bool: + raise NotImplementedError + + def get_process_state(self, process_name: str) -> ProcessExecutionState: + if process_name not in self.jssp.process_by_name: + raise Exception(f"Process named {process_name} not found.") + return self.jssp.process_stati_by_name[process_name] + + def set_process_priority(self, process_name: str, priority: int): + raise NotImplementedError + + def remove_processes(self, process_names: List[str], return_labwares: bool = False, final_device: str = None): + if process_names is None: + process_names = self.jssp.process_by_name.keys() + for name in process_names: + if not self.jssp.process_stati_by_name[name] == ProcessExecutionState.IDLE: + self.schedule_manager.mark_schedule_invalid() + self.jssp.remove_process(name) + return True + + def simulate_all_processes(self, speed: float) -> bool: + for op in self.jssp.step_by_id.values(): + op.duration /= speed + self.worker.simulation_mode = True + self.schedule_manager.time_limit_short = 1 + self.start_processes() + return True + + @property + def simulation_speed(self) -> float: + return self._simulation_speed + + @simulation_speed.setter + def simulation_speed(self, speed: float): + self._simulation_speed = speed + + @property + def in_time(self) -> bool: + return not self.jssp.schedule_violated() + + def export_current_scheduling_problem(self, filename: str): + print(f"exporting to {filename}") + problem = self.schedule_manager.extract_near_future(n_steps=5000) + sila_wfg = WorkFlowGraph.create_sila_structure_from_jobs(problem.values(), self.jssp.combined_wfg) + with open(filename, "w") as writer: + json.dump(sila_wfg, writer, indent=4) + + @property + def gantt_chart_scheduled_processes(self, processes: List[str] = None): + return self.jssp.gannt_chart() + + @property + def gantt_chart_executed_processes(self, processes: List[str] = None): + return self.jssp.gannt_chart() + + @property + def workflow_graph_scheduled_processes(self, processes: List[str] = None): + return self.jssp.visualize_wfg() + + @property + def workflow_graph_executed_processes(self, processes: List[str] = None): + return self.jssp.visualize_wfg() + + def insert_process_step(self, process_step: Any, parent_step_ids: List[str], child_step_ids: List[str], + process_id: Optional[str] = None, waiting_cost: Dict[str, float] = None, + max_waiting_time: Dict[str, float] = None): + raise NotImplementedError + + def interrupt_process_step(self, step_id: str): + self.error_resolved(step_id, repeat_operation=False) + + def retry_process_step(self, step_id: str): + self.error_resolved(step_id, repeat_operation=True) + + def remove_process_step(self, step_id: str): + raise NotImplementedError + + def get_process_step_state(self, step_id: str): + return self.jssp.step_by_id[step_id].status + + def process_step_executed_externally(self, step_id: str, result): + self.error_resolved(step_id, repeat_operation=False) + + def reset_error_state(self, step_id: str, repeat_operation: bool = False): + self.error_resolved(step_id, repeat_operation=repeat_operation) + + def get_log(self, start_datetime: datetime = None, stop_datetime: datetime = None, level: int = 0): + raise NotImplementedError + + @property + def logging_level(self): + return Logger.getLogger().level + + @logging_level.setter + def logging_level(self, level: int): + Logger.getLogger().setLevel(level) + + def stop_container(self, container_name): + """ + No further operations of the specified container will be started. The current operation will continue. + + :param container_name: Unique name of the container to stop processing + :return: + """ + raise NotImplementedError + + def continue_container(self, container_name): + """ + Continues operating the specified container. + + :param container_name: Unique name of the container to continue processing + :return: + """ + raise NotImplementedError + + def remove_container(self, container_name: str, return_container=False, final_device=None): + """ + Removes all information of the specified container. If the flag is set, the scheduler will return the + container to its original position. You can also specify a storage device, where it shall be brought. + + :param container_name: Unique name of the container. + :param return_container: Shall the container be brought somewhere. By default it is its starting position. + :param final_device: If *return_container* is set, you can specify a device the container shall be brought. + :return: Nothing + """ + raise NotImplementedError + + def error_resolved(self, operation_id: str, repeat_operation: bool = False): + """ + In case an error need to be resolved by hand, use this command afterward to proceed with the process. + :param operation_id: Unique identifier for an operation + :param repeat_operation: Boolean whether the operation shall be tried again + :return: nothing + """ + if operation_id in self.jssp.operable: + print(f"Recovering step {operation_id}. Repeat the step: {repeat_operation}") + Logger.info(f"Recovering step {operation_id}. Repeat the step: {repeat_operation}") + operation = self.jssp.operable[operation_id] + if isinstance(operation, ProcessStep): + if operation_id in self.worker_observer.observed_jobs: + self.worker_observer.observed_jobs.remove(operation_id) + if repeat_operation: + # remove all information, the step was started + operation.status = StepStatus.WAITING + operation.start = None + operation.finish = None + if not repeat_operation: + if operation.start is None: + operation.start = datetime.today() + operation.finish = datetime.today() + timedelta(seconds=1) + operation.duration = (operation.finish - operation.start).total_seconds() + operation.status = StepStatus.FINISHED + if operation_id in self.worker.observation_handlers: + print(f"removing {operation_id} from observation_handlers.") + Logger.debug(f"removing {operation_id} from observation_handlers.") + self.worker.observation_handlers.pop(operation_id) + for cont_name in operation.cont_names: + cont = self.jssp.container_info_by_name[cont_name] + cont.in_error_state = False + self.schedule_manager.mark_schedule_invalid(enforce=True) + else: + Logger.warning("Recovering anything but process steps is not implemented") + Logger.error(f"Can only recover operations, not {operation_id}") + + def stop_and_remove_all_processes(self, return_containers=False, final_device=None): + """ + Removes all information all processes. If the flag is set, the scheduler will return all involved + containers to their original position. You can also specify a storage device, where all containers shall be + brought. + + :param return_containers: Shall the scheduler bring all involved containers to some location? By default, it is\ + their starting position. + :param final_device: If *return_containers* is set, you can specify a device where all involved containers\ + shall be brought. + :return: Nothing + """ + raise NotImplementedError + + def test_add_process(self, process): + """ + Tries to compute a schedule including the given process. You can get that schedule via *get_test_gantt_chart()*\ + and *get_test_workflow state()*. If you choose to really add and start it call *add_process()* + + :param process: The process to try to include. + :return: Nothing + """ + raise NotImplementedError + + def change_step(self, job_id, changes): + """ + Changes the specified operation. This might also be tried if the operation is already running. + + :param job_id: The unique id of the operation to change. + :param changes: A dictionary of changes to apply. + :return: Boolean, whether the operation could be changed. + """ + raise NotImplementedError + + def human_did_job(self, job_id, result): + """ + There are jobs that have to be done by humans(also coded as a lab device). Call this method to inform the + scheduler a human has finished the specified operation. + + :param job_id: unique id of the job, a human was supposed to do + :param result: In case the job had some results, those should be given here + :return: A "Thank You!" to the human + """ + pass + + def process_finished(self, process_name: str) -> bool: + """ + Checks whether the specified process is finished + :param process_name: + :return: + """ + p = self.jssp.process_by_name[process_name] + return all(job.status == StepStatus.FINISHED for job in p.steps if job.opacity > 0) + + def get_operable_node(self, idx): + if idx in self.jssp.operable: + return self.jssp.operable[idx] + return None + + def add_to_schedule(self, process_name): + if process_name in self.jssp.process_by_name: + p = self.jssp.process_by_name[process_name] + if p.status == ProcessExecutionState.IDLE: + p.status = ProcessExecutionState.SCHEDULED + if not self.jssp.running_processes_names: + self.schedule_manager.mark_schedule_invalid(enforce=True) diff --git a/laborchestrator/orchestrator_interface.py b/laborchestrator/orchestrator_interface.py new file mode 100755 index 0000000000000000000000000000000000000000..fe1f9e1ab9f0a754fe37d8e7b5acc2eec0f79077 --- /dev/null +++ b/laborchestrator/orchestrator_interface.py @@ -0,0 +1,539 @@ +from abc import ABC, ABCMeta, abstractmethod +from dataclasses import dataclass +from datetime import datetime +from typing import List, Dict, Any, Optional, Tuple + +from enum import Enum + + +class ProcessReader(Enum): + PYTHONLABREADER = 1 + # TODO: any other reader ? + + +class ProcessExecutionState(Enum): + """_State of a Process Execution_ + TODO: check SiLA specification + + :param Enum: _description_ + :type Enum: _type_ + """ + IDLE = 1 + SCHEDULED = 5 # - do we need this ? stefan: might be useful + RUNNING = 2 # EXECUTING + PAUSED = 3 + FINISHED = 4 + +@dataclass +class ProcessDescription: + name: str + description: Optional[str] = None + file_path: Optional[str] = None + +@dataclass +class ProcessInfo: + name: str + priority: int + state: ProcessExecutionState + + +@dataclass +class LabwareInfo: + """ """ + URI: str + barcode: str + + +class FormalOrchestratorConfigInterface(metaclass=ABCMeta): + """ Resources are all ... excluding labware""" + + @classmethod + def __subclasshook__(cls, subclass): + # TODO: check logic + return (hasattr(subclass, 'add_lab_resources') and + callable(subclass.add_lab_resources) or + NotImplemented) + + @abstractmethod + def add_lab_resources_from_file(self, lab_env_filename: str): + """Defines and configures the lab environment (types, names, functionalities, etc. of devices). + :param lab_env_filename: Name of the Lab Environment configuration file in YAML format, + according to the specification TODO:[@Mark: insert link] + :return: + """ + raise NotImplementedError + + @abstractmethod + def add_lab_resources_from_database(self, URI: str): + """Defines and configures the lab environment (types, names, functionalities, etc. of devices). + :param URI: Name of the Lab Environment configuration file in YAML format, + according to the specification TODO:[@Mark: insert link] + :return: + """ + raise NotImplementedError + + +class FormalProcessStepControllerInterface(metaclass=ABCMeta): + + @classmethod + def __subclasshook__(cls, subclass): + # TODO: check logic + return (hasattr(subclass, 'add_lab_resources') and + callable(subclass.add_lab_resources) or + NotImplemented) + + @abstractmethod + def insert_process_step(self, process_step: Any, + parent_step_ids: List[str], child_step_ids: List[str], process_id: Optional[str] = None, + waiting_cost: Dict[str, float] = None, max_waiting_time: Dict[str, float] = None): + """ + Adds the given process step inbetween the given parents and children with the (optional) given time constraints. + If process_id is omitted, there is assumed to be at least one child or parent step. The new step is then + assigned to the same process + """ + raise NotImplementedError + + @abstractmethod + def interrupt_process_step(self, step_id: str): + """ + Stops the process step as soon as possible and considers it finished + """ + raise NotImplementedError + + @abstractmethod + def retry_process_step(self, step_id: str): + """ + TODO: add description + """ + raise NotImplementedError + + @abstractmethod + def remove_process_step(self, step_id: str): + """ + TODO: add description + """ + raise NotImplementedError + + @abstractmethod + def get_process_step_state(self, step_id: str): + """ + TODO: add description + """ + raise NotImplementedError + + @abstractmethod + def process_step_executed_externally(self, step_id: str, result): + """ + TODO: improve description + There are jobs that have to be done externally, e.g. by a human or external device. + Call this method to inform the scheduler a external process step has finished + the specified operation. + + :param step_id: unique id of the step, a external entity was supposed to do + :param result: In case the job had some results, those should be given here + + """ + raise NotImplementedError + + @abstractmethod + def reset_error_state(self, step_id: str, repeat_operation: bool = False): + """TODO: improve description + In case an error need to be resolved manually, use this command afterwards to proceed with the process. + :param step_id: Unique identifier for an operation + :param repeat_operation: Boolean whether the operation shall be tried again + :return: nothing + """ + raise NotImplementedError + + +class FormalProcessControllerInterface(metaclass=ABCMeta): + """ + A class for managing one or more processes in a laboratory. You can add processes (also while others are already + running), monitor their progress. The class incorporates an intelligent scheduler. It adapts the schedule to delays, + errors, manual interference and addition of processes. Before really adding a process, you can check what the new + schedule would look like. The progress can be observed via gantt charts or workflow graphs. + """ + # A class to read new processes. By default, it is a PythonLabReader + @classmethod + def __subclasshook__(cls, subclass): + # TODO: check logic + return (hasattr(subclass, 'process_reader') and + callable(subclass.process_reader) and + + hasattr(subclass, 'add_process') and + callable(subclass.add_process) or + + hasattr(subclass, 'processes') and + callable(subclass.processes) or + + hasattr(subclass, 'start_process') and + callable(subclass.start_process) or + + hasattr(subclass, 'start_all_processes') and + callable(subclass.start_all_processes) or + + hasattr(subclass, 'stop_process') and + callable(subclass.stop_process) or + + hasattr(subclass, 'stop_all_processes') and + callable(subclass.stop_all_processes) or + + hasattr(subclass, 'pause_process') and + callable(subclass.pause_all_processes) or + + hasattr(subclass, 'resume_process') and + callable(subclass.resume_process) or + + hasattr(subclass, 'resume_all_processes') and + callable(subclass.resume_all_processes) or + + hasattr(subclass, 'get_process_state') and + callable(subclass.get_process_state) or + + hasattr(subclass, 'remove_process') and + callable(subclass.remove_process) or + + hasattr(subclass, 'remove_all_process') and + callable(subclass.remove_all_process) or + + hasattr(subclass, 'in_time') and + callable(subclass.in_time) or + + hasattr(subclass, 'process_step_executed_externally') and + callable(subclass.process_step_executed_externally) or + + hasattr(subclass, 'reset_error_state') and + callable(subclass.reset_error_state) or + + hasattr(subclass, 'gantt_chart') and + callable(subclass.gantt_chart) or + + hasattr(subclass, 'workflow_graph_visualisation') and + callable(subclass.workflow_graph_visualisation) or + NotImplemented) + + @abstractmethod + def select_process_reader(self, process_reader=ProcessReader.PYTHONLABREADER): + """Dependency injection of the process reader + """ + + def get_parameter(self, param_name: str): + """ + Used as a flexible and easily extendable method to retrieve parameters of the orchestrator + """ + + def set_parameter(self, param_name: str, new_value): + """ + Used as a flexible and easily extendable method to set parameters of the orchestrator + """ + + @property + @abstractmethod + def available_processes(self) -> List[ProcessDescription]: + """ + Checks the file-system(currently not implemented) or database for available saved processes. + All found processes are returned with the available information. + :return: A List of tuples [Name, description(if found), filepath(if found)] for each found process + """ + + @abstractmethod + def add_process(self, description: Optional[str] = None, file_path: Optional[str] = None, + name: Optional[str] = None) -> str: + """Adds a process to be orchestrated, read by the selected process reader + You have to either specify a description (i.e. the file content) or a file location. + If no name is specified, the process will be named like P_2 (enumerated). If the given name is already taken, + it is added a suffix. + :return: process object + :raises ParsingError + """ + raise NotImplementedError + + @property + @abstractmethod + def processes(self) -> List[ProcessInfo]: + """_Lists all process info for all current processes_ + + :raises NotImplementedError: _description_ + :return: _description_ + :rtype: List[str] + """ + raise NotImplementedError + + @abstractmethod + def start_processes(self, process_names: List[str] = None) -> bool: + """Starts the specified added process. This will cause an initial scheduling. + :return: bool if the process could be started + """ + raise NotImplementedError + + @abstractmethod + def stop_processes(self, process_names: List[str] = None) -> bool: + """ + Stops the specified process. All running operations will continue, but no follow up operation will be started. + + :param process_names: Unique name of the process to stop + :return: bool, if process could be stopped ? + """ + raise NotImplementedError + + @abstractmethod + def pause_processes(self, process_names: List[str] = None) -> bool: + """ + Pausing a process of a given name + :return: bool, if process could be paused + """ + raise NotImplementedError + + @abstractmethod + def resume_processes(self, process_names: List[str] = None) -> bool: + """ + Resume form Pausing a process of a given name + :return: bool, if all process could be resumed + """ + raise NotImplementedError + + @abstractmethod + def restart_process_from_datetime(self, process_uri: str, start: datetime = None) -> bool: + """ + Restarts a process from a given point in time. There has to be a database interface implemented. + :param process_uri: unique uri for the database interface to find information on the process + :param start: Point in time from where to restart. The default start point is the last known state. + :return: bool, if the process could be restarted + """ + raise NotImplementedError + + @abstractmethod + def get_process_state(self, process_name: str) -> ProcessExecutionState: + """ + returns current state of the process + :param process_name: + :return: ProcessExecutionState + """ + raise NotImplementedError + + @abstractmethod + def set_process_priority(self, process_name: str, priority: int): + """ + Changes the priority of an existing process + """ + raise NotImplementedError + + @abstractmethod + def remove_processes(self, process_names: List[str], + return_labwares: bool = False, final_device: str = None): + """ + TODO: improve description + Removes all information of the specified process. If the flag is set, the scheduler will return all involved + labware to their original position. You can also specify a storage device, where all labware shall be + brought. + + :param process_names: Unique name of the process to remove. + :param return_labwares: Shall the scheduler bring all involved labwares to some location? By default it is \ + their starting position. + :param final_device: If *return_labware* is set, you can specify a device where all involved labware shall\ + be brought. + :return: Nothing + """ + raise NotImplementedError + + @abstractmethod + def simulate_all_processes(self, speed: float) -> bool: + """TODO: description 0-600x ?. + :return: bool if all processes could be started + """ + # > 600 -> SpeedTooHightError + raise NotImplementedError + + @property + @abstractmethod + def simulation_speed(self) -> float: + raise NotImplementedError + + @simulation_speed.setter + @abstractmethod + def simulation_speed(self, speed: float) -> bool: + """TODO: description 0-600x ?. + :return: bool if all processes could be started + """ + # > 600 -> SpeedTooHightError + raise NotImplementedError + + @property + @abstractmethod + def in_time(self) -> bool: + """timing state of the orchestrator + + :return: _True, if orchestrator is currently in time (=not delayed) _ + :rtype: bool + """ + raise NotImplementedError + + @property + @abstractmethod + def gantt_chart_scheduled_processes(self, processes: List[str] = None): + """returns the gantt chart including that process. + + future + + :param processes: List of names of all processes to include in the gantt chart. By default, its all. + :return: The gantt chart as plotly figure. + """ + raise NotImplementedError + + @property + @abstractmethod + def gantt_chart_executed_processes(self, processes: List[str] = None): + """returns the gantt chart including that process. + + past + :param processes: List of names of all processes to include in the gantt chart. By default, its all. + :return: The gantt chart as plotly figure. + """ + raise NotImplementedError + + @property + @abstractmethod + def workflow_graph_scheduled_processes(self, processes: List[str] = None): + """ + control-flow or workflow ? + Creates a graphviz Digraph visualizing the progress of scheduled processes. + future + TODO: we could add different formats, defined by an WFGFormat(Enum) + + :param processes: A list of process names that shall be visualized. By default all processes will be included. + :return: WorkFlowGraphViz + """ + raise NotImplementedError + + @property + @abstractmethod + def workflow_graph_executed_processes(self, processes: List[str] = None): + """ + control-flow or workflow ? + Creates a graphviz Digraph visualizing the progress of scheduled processes. + future + TODO: we could add different formats, defined by an WFGFormat(Enum) + + :param processes: A list of process names that shall be visualized. By default all processes will be included. + :return: WorkFlowGraphViz + """ + raise NotImplementedError + + # !! I would write an extra module to validate and test a process (additional instance of the orchestrator) + # def validate_process(self, process): + + +class FormalLabwareManagerInterface(metaclass=ABCMeta): + + @classmethod + def __subclasshook__(cls, subclass): + # TODO: check logic + return (hasattr(subclass, 'add_lab_resources') and + callable(subclass.add_lab_resources) or + NotImplemented) + + def check_labware_presence(self, process_names: List[str] = None) -> Tuple[List[Any], List[Any]]: + """ + Checks whether in the database there is labware registered as required for the process. (In the process is + defined, where what labware is required). + :param process_names: By default, it is all processes, have been added but not started. + :return: A tuple [found, missing]. The first entry is a list of information on existing labware + and the second entry lists requirements of missing labware + """ + raise NotImplementedError + + @abstractmethod + def add_labware(self, labware: List[Any]): + """ + register one or several labware to orchestrator + + """ + raise NotImplementedError + + @abstractmethod + def remove_labware(self, labware_ids: List[str]): + """ + Removes one ore several labware from the orchestrator + """ + raise NotImplementedError + + @abstractmethod + def create_labware_location_graph(self, labware_ids: List[str]): + """ + history of labware movements (esp. colocations) + current state labware distribution + + """ + raise NotImplementedError + + +class FormalSampleManagerInterface(metaclass=ABCMeta): + + @classmethod + def __subclasshook__(cls, subclass): + # TODO: check logic + return (hasattr(subclass, 'add_lab_resources') and + callable(subclass.add_lab_resources) or + NotImplemented) + + @property + @abstractmethod + def sample_location_graph(self, labware_id: str): + """ + TODO: add description + """ + raise NotImplementedError + + @abstractmethod + def get_sample_labware(self, labware_id: str, sample_id): + """ + TODO: add description + """ + raise NotImplementedError + + +class FormalLoggingInterface(metaclass=ABCMeta): + + @classmethod + def __subclasshook__(cls, subclass): + # TODO: check logic + return (hasattr(subclass, 'add_lab_resources') and + callable(subclass.add_lab_resources) or + NotImplemented) + + @property + @abstractmethod + def logging_level(self): + """ + TODO: add description + """ + raise NotImplementedError + + @logging_level.setter + @abstractmethod + def logging_level(self, level: int): + """ + TODO: add description + """ + raise NotImplementedError + + @abstractmethod + def get_log(self, start_datetime: datetime = None, stop_datetime: datetime = None, level: int = 0): + """ + default for stop: now + """ + +class OrchestratorInterface( + ABC, + FormalOrchestratorConfigInterface, + FormalProcessControllerInterface, + FormalLoggingInterface, + FormalLabwareManagerInterface, + FormalProcessStepControllerInterface, + #FormalSampleManagerInterface + ): + pass + +class ProcessParsingError(Exception): + def __init__(self, message): + super().__init__() diff --git a/laborchestrator/process_reader.py b/laborchestrator/process_reader.py new file mode 100755 index 0000000000000000000000000000000000000000..51cb72cd83e1a0602d2e5f7da5355299decd4420 --- /dev/null +++ b/laborchestrator/process_reader.py @@ -0,0 +1,50 @@ +""" +The general interface for process readers. These are used to parse process descriptions from a workflow description +language into the laborchestrator's own structures. For exemplary use see pythonlab_reader.py. +""" + +from abc import ABC, abstractmethod +from typing import Any, List + +from laborchestrator.structures import SMProcess + + +class ProcessReader(ABC): + def __init__(self): + pass + + @abstractmethod + def read_process(self, process: Any, **kwargs) -> SMProcess: + """ + Reads a process written in some workflow description language into the orchestrators own format. + :param process: + :param kwargs: + :return: + """ + + @staticmethod + def adjust_opacities(p: SMProcess): + """ + Utility function setting the opacities of all process steps that are unsure to be executed + (depending on runtime decisions) to .5 + :param p: + :return: + """ + changed = [n.name for n in p.if_nodes] + while len(changed) > 0: + newly_changed = [] + for lis in p.steps, p.if_nodes, p.variables, p.computations: + for node in lis: + if node.opacity == 1 and any([idx in changed for idx in node.prior]): + node.opacity = .5 + newly_changed.append(node.name) + changed = newly_changed + + @staticmethod + def get_available_processes(file_dir: str) -> List[Any]: + """ + Searches a directory for available process descriptions. + :param file_dir: + :return: A list containing all found process descriptions (in their native language). + """ + return [] diff --git a/laborchestrator/pythonlab_process_finder.py b/laborchestrator/pythonlab_process_finder.py new file mode 100755 index 0000000000000000000000000000000000000000..8ac4a1754b6134f08d2a38a5721d551ba86230b2 --- /dev/null +++ b/laborchestrator/pythonlab_process_finder.py @@ -0,0 +1,112 @@ +""" +This module provides the ProcessFinder. +A tool ta get a list of all available PythonLan processes in a specified directory or module. +""" +import os +import importlib +from importlib import util as import_util +import pkgutil +import traceback +from typing import NamedTuple, List, Optional +from types import ModuleType +import logging + +from pythonlab.process import PLProcess + +default_dir = os.path.dirname(__file__) + + +class ImportableProcess(NamedTuple): + module: ModuleType + name: str + file_path: Optional[str] = None + + +class ProcessFinder: + + @staticmethod + def to_process_id(ip: ImportableProcess): + """ + In case two processes have the same name, this is a readable, unique string to distinguish them + :param ip: + :return: + """ + return f"{ip.module.__name__}.{ip.name}" + + @staticmethod + def create_process_from_id(process_id: str, pck: ModuleType) -> Optional[PLProcess]: + try: + importable = ProcessFinder.get_processes(pck) + for p in importable: + if ProcessFinder.to_process_id(p) == process_id: + return ProcessFinder.create_process(p) + except Exception as ex: + Logger.error(f"Could not import {process_id}: {ex}\n {traceback.print_exc()}") + return None + + @staticmethod + def get_processes(pck: ModuleType) -> List[ImportableProcess]: + importlib.reload(pck) + process_names = ProcessFinder._find_processes(pck) + return process_names + + @staticmethod + def _find_processes(pck: ModuleType) -> List[ImportableProcess]: + """ + recursively iterate through namespace + Specifying the second argument (prefix) to iter_modules makes the + returned name an absolute name instead of a relative one. This allows + import_module to work without having to do additional modification to + the name. + s. https://packaging.python.org/guides/creating-and-discovering-plugins/ + """ + processes = [] + for finder, name, ispkg in pkgutil.iter_modules(pck.__path__): + mod_name = pck.__name__ + "." + name + try: + submodule = importlib.import_module(mod_name) + importlib.reload(submodule) + if ispkg: + pass + else: + for attr in dir(submodule): + try: + buff = getattr(submodule, attr)() + if isinstance(buff, PLProcess): + processes.append(ImportableProcess(submodule, attr)) + except: + # there will be lots of errors to ignore because not all attributes are callable + #Logger.debug(traceback.print_exc()) + pass + except Exception as ex: + #Logger.debug(f"{ex}\n{traceback.print_exc()}") + pass + return processes + + @staticmethod + def importable_processes_from_string(src: str, debug_printouts: bool = False) -> List[ImportableProcess]: + found_processes = [] + module_name = "tmp_module" + spec = import_util.spec_from_loader(module_name, loader=None) + tmp_module = import_util.module_from_spec(spec) + exec(src, tmp_module.__dict__) + for attr_name in dir(tmp_module): + try: + attr = getattr(tmp_module, attr_name) + inst = attr() + if isinstance(inst, PLProcess): + found_processes.append(ImportableProcess(tmp_module, attr_name)) + except: + # there will be lots of errors to ignore because not all attributes are callable + # Logger.debug(traceback.print_exc()) + if debug_printouts: + if callable(attr) and issubclass(attr, PLProcess) and hasattr(attr, "process"): + print(f"Tried to import and instantiate {attr_name}.") + print(traceback.print_exc()) + return found_processes + + @staticmethod + def create_process(importable_process: ImportableProcess) -> PLProcess: + process = getattr(importable_process.module, importable_process.name)() + assert isinstance(process, PLProcess) + return process diff --git a/laborchestrator/pythonlab_reader.py b/laborchestrator/pythonlab_reader.py new file mode 100755 index 0000000000000000000000000000000000000000..57778bc0eff74c95f9d070f1f2d671c16fee7c95 --- /dev/null +++ b/laborchestrator/pythonlab_reader.py @@ -0,0 +1,304 @@ +""" +Contains an implementation of ProcessReader for PythonLab processes +""" +import importlib +import argparse +from laborchestrator.logging_manager import StandardLogger as Logger + +import networkx as nx +from laborchestrator.structures import ( + SMProcess, ProcessStep, Variable, ContainerInfo, MoveStep, IfNode, Computation, UsedDevice +) +from pythonlab.process import PLProcess +from pythonlab.pythonlab_reader import PLProcessReader +from laborchestrator.process_reader import ProcessReader +from laborchestrator.pythonlab_process_finder import ProcessFinder +from os import path +from typing import List, Dict, Any, Optional + + +class PythonLabReader(ProcessReader): + def __init__(self): + super(PythonLabReader, self).__init__() + + @staticmethod + def costs_from_prio(prio: float): + return 100 * 2 ** -prio + + @staticmethod + def preprocess_wfg(plp: PLProcess): + """Some regularisation, translation of priorities into waiting costs and filling of missing values.""" + g = plp.workflow + # add small waiting costs for regularization# _origin=origin, + for edge in g.edges.values(): + if 'wait_cost' in edge: + edge['wait_cost'] += 1 + # set the container priorities to the process priority if they have no individual priority + for cont in plp.labware_resources: + if cont.priority is None: + cont.priority = plp.priority + # add waiting costs for prioritised containers + for cont, node in zip(plp.labware_resources, plp.labware_nodes.values()): + for u, v in nx.dfs_edges(g, node): + g.nodes[node]['wait_to_start_costs'] = PythonLabReader.costs_from_prio(cont.priority)/2 + if 'wait_cost' in g[u][v]: + g[u][v]['wait_cost'] += PythonLabReader.costs_from_prio(cont.priority) + + @staticmethod + def read_process(process: PLProcess, name: Optional[str] = None, src=None, **kwargs) -> SMProcess: + """ + The main function. It takes a PythonLabProcess and derives a SMProcess from it. The Information is stored + in Job and ContainerInfo classes from the structures-module. + """ + # take the class name if none is given + if name is None: + name = type(process).__name__ + smp = SMProcess(name) + process = PLProcessReader.parse_process(process, src=src) + PythonLabReader.preprocess_wfg(process) + PythonLabReader.relabel_nodes(process, name) + g = process.workflow + smp.steps = PythonLabReader.read_jobs(g) + smp.containers = PythonLabReader.read_containers(g, process) + smp.variables = PythonLabReader.read_variables(g) + smp.computations = PythonLabReader.read_computations(g) + smp.if_nodes = PythonLabReader.read_if_nodes(g) + PythonLabReader.read_precedences(smp, g) + ProcessReader.adjust_opacities(smp) + smp.update_reagent_opacity() + PythonLabReader.read_wait_cons(smp, g) + PythonLabReader.fill_transfer_times(smp) + return smp + + @staticmethod + def read_jobs(g: nx.DiGraph): + """ + + Extracts all information on jobs in a workflow. The attributes of the jobs are assumed to be in the + data-dictionary which networkx provides for each node. + + :param g: The graph containing the whole workflow + :return: A list of all jobs described in the graph + """ + jobs = [] + topo = list(nx.topological_sort(g)) + for idx, data in g.nodes(data=True): + if data['type'] == 'operation': + # these are the preferred devices for this job + if 'executor' in data: + executors = {type(resource): resource.name for resource in data['executor']} + else: + executors = {} + main_device = data['device_type'] + preferred_main = executors[main_device] if main_device in executors else None + kwargs = dict( + name=idx, + cont_names=data['cont_names'], + function=data['fct'], + used_devices=[UsedDevice(main_device, tag='main', preferred=preferred_main)], + duration=data['duration'] if 'duration' in data else 1, # might not be set, yet + label=data['name'] + ) + if 'reagents' in data: + kwargs['cont_names'].extend(labware.name for labware in data['reagents']) + if 'wait_to_start_costs' in data: + kwargs['wait_to_start_costs'] = data['wait_to_start_costs'] + # we save all special data on a data dictionary + kwargs['data'] = {key: value for key, value in data.items() if key not in kwargs} + if data['fct'] == 'move': + # try to find where the container is before that move + origin = PythonLabReader.find_origin(idx, data, topo, g) + if origin: + kwargs['used_devices'].append(origin) + # figure out, where exactly to container is going + if 'position' in data: + kwargs['pref_dest_pos'] = data['position'] + preferred_target = executors[data['target']] if data['target'] in executors else None + if "target_name" in data and not preferred_target: + preferred_target = data['target_name'] + kwargs['used_devices'].append(UsedDevice(data['target'], tag='target', preferred=preferred_target)) + jobs.append(MoveStep(**kwargs)) + else: + jobs.append(ProcessStep(**kwargs)) + return jobs + + @staticmethod + def find_origin(idx, data: Dict[str, Any], topo_sort: List[str], g: nx.DiGraph) -> Optional[UsedDevice]: + # sort all nodes before the current node topologically (shortest distance first) + ancestry = reversed(topo_sort[:topo_sort.index(idx)]) + # asserting we can only move one container at a time + cont_name = data["cont_names"][0] + for m in ancestry: + data2 = g.nodes[m] + if data2['type'] == 'operation' and \ + data2['fct'] == 'move' and \ + cont_name in data2['cont_names']: + data['source'] = data2['target'] + pref_source = data2['target_name'] + Logger.debug(f"source of step {idx} set to {data['source']}|{pref_source}.") + return UsedDevice(data['source'], tag='origin', preferred=pref_source) + if data2['type'] == 'container' and \ + data2['name'] == cont_name: + data['source'] = data2['origin_type'] + pref_source = data2['origin'] + Logger.debug(f"source of step {idx} set to {data['source']}|{pref_source}.") + return UsedDevice(data['source'], tag='origin', preferred=pref_source) + return None + + @staticmethod + def read_containers(g: nx.DiGraph, p: PLProcess): + containers = [] + for idx in p.labware_nodes.values(): + data = g.nodes[idx] + containers.append(ContainerInfo( + name=data['name'], + current_device=data['origin'], + current_pos=data['origin_pos'], + start_device=UsedDevice(device_type=data['origin_type'], name=data['origin'], preferred=data['origin']), + lidded=data['lidded'], + filled=True if 'filled' not in data else data['filled'], # it's an optional parameter in PythonLab + is_reagent="is_reagent" in data, + )) + return containers + + @staticmethod + def read_variables(g: nx.DiGraph): + variables = [] + for idx, data in g.nodes(data=True): + if data['type'] == 'variable': + kwargs = dict( + name=idx, + var_name=data['var_name'] + ) + if 'var_type' in data: + kwargs['var_type'] = data['var_type'] + variables.append(Variable(**kwargs)) + return variables + + @staticmethod + def read_computations(g: nx.DiGraph): + computations = [] + for idx, data in g.nodes(data=True): + if data['type'] == 'computation': + kwargs = dict( + name=idx, + evaluation=data['function'], + var_name=data['var_name'] + ) + computations.append(Computation(**kwargs)) + return computations + + @staticmethod + def read_if_nodes(g: nx.DiGraph): + if_nodes = [] + for idx, data in g.nodes(data=True): + if data['type'] == 'if_node': + out = [v for buff, v in g.out_edges(idx)] + true_tree = [v for v in out if g[idx][v]['sub_tree']] + false_tree = [v for v in out if not g[idx][v]['sub_tree']] + kwargs = dict( + name=idx, + evaluation=data['function'], + true_tree=true_tree, + false_tree=false_tree, + ) + if_nodes.append(IfNode(**kwargs)) + return if_nodes + + @staticmethod + def fill_transfer_times(smp): + for job in smp.steps: + if isinstance(job, MoveStep): + job.duration = 40 + if 'lidded' in job.data: + job.duration += 30 + + @staticmethod + def relabel_nodes(p, prefix): + """ + Adds '[ProcessName]_' to all node labels (This makes them unique in the schedule, if the ProcessName is unique) + """ + label_map = {old: f"{prefix}_{old}" for old in list(p.workflow.nodes) + [r.name for r in p.labware_resources]} + nx.relabel_nodes(p.workflow, label_map, copy=False) + new_container_map = dict() + for name, node in p.labware_nodes.items(): + nx_node = p.workflow.nodes[label_map[node]] + nx_node['name'] = prefix + '_' + nx_node['name'] + new_container_map[nx_node['name']] = label_map[node] + # also change references to containers + for n, data in p.workflow.nodes(data=True): + if 'cont_names' in data: + data['cont_names'] = [label_map[name] for name in data['cont_names']] + for resource in p.labware_resources: + resource._name = label_map[resource.name] + # restore the links between name and label in the PLProcess accordingly + p.labware_nodes = new_container_map + + @staticmethod + def read_precedences(smp: SMProcess, g: nx.DiGraph): + operable = {node.name: node for node in smp.steps + smp.if_nodes + smp.variables + smp.computations} + for u, v, data in g.edges(data=True): + if u in operable: + if v in operable: + operable[v].prior.append(u) + elif v in []: + pass + for job in operable.values(): + job.is_start = len(job.prior) == 0 + + @staticmethod + def read_wait_cons(smp: SMProcess, g: nx.DiGraph): + job_by_id = {job.name: job for job in smp.steps} + # iterate through edges and copy the values into Job-class + for u, v, data in g.edges(data=True): + if v in job_by_id: + job = job_by_id[v] + if 'max_wait' in data: + # we can not yet handle maximum waiting time to start + if not job.is_start: + job.max_wait[u] = data['max_wait'] + if 'wait_cost' in data: + job.wait_cost[u] = data['wait_cost'] + if "min_wait" in data: + job.min_wait[u] = data['min_wait'] + for n, data in g.nodes(data=True): + if 'wait_to_start_cost' in data: + job_by_id[n].wait_to_start_costs += data['wait_to_start_cost'] + + @staticmethod + def get_available_processes(file_dir: str) -> List[Any]: + parent_dir, dir_name = path.split(path.dirname(file_dir)) + sys.path.append(parent_dir) + module = importlib.import_module(dir_name) + return ProcessFinder.get_processes(module) + + +if __name__ == '__main__': + """ + This is intended for testing the import and reading of a PythonLab process. + """ + parser = argparse.ArgumentParser() + parser.add_argument( + "-p", "--process", action="store", help="PythonLab Process" + ) + args = parser.parse_args() + process_file = args.process + from pathlib import Path + import sys + process_dir = Path(process_file).resolve().parent + sys.path.append(str(process_dir)) + if not __package__: + __package__ = process_dir + + with open(process_file, 'r') as reader: + source_code = reader.read() + print("Successfully read file") + importable_processes = ProcessFinder.importable_processes_from_string(source_code, debug_printouts=True) + if importable_processes: + print(f"Successfully imported the process {importable_processes[0].name} from its source code") + process_object = ProcessFinder.create_process(importable_processes[0]) + print(f"Created process object: {process_object}") + sm_process = PythonLabReader.read_process(process_object, src=source_code) + print("Successfully created workflow structure") + else: + print("import failed") diff --git a/laborchestrator/sila_server/SiLA_features/CancelController-v1_0.sila.xml b/laborchestrator/sila_server/SiLA_features/CancelController-v1_0.sila.xml new file mode 100755 index 0000000000000000000000000000000000000000..0b2729fad247f31222350991ac2ed27c1a32b5d9 --- /dev/null +++ b/laborchestrator/sila_server/SiLA_features/CancelController-v1_0.sila.xml @@ -0,0 +1,79 @@ + + + CancelController + Cancel Controller + + This feature offers commands to cancel/terminate Commands. Cancellation is the act of stopping the running Command execution(s), + irrevocably. The SiLA Server SHOULD be able to be in a state where any further + commands can be issued after a cancellation. + + + CancelCommand + Cancel Command + + Cancel a specified currently running Observable Command or cancel all currently running Observable Commands . + For any canceled Observable Command the SiLA Server MUST update the Command Execution Status to "Command Finished + with Error". + The SiLA Server MUST throw a descriptive error message indicating cancellation as the reason for the Command + execution not being able to finish successfully for any canceled Command. + + No + + CommandExecutionUUID + Command Execution UUID + The Command Execution UUID according to the SiLA Standard. + + UUID + + + + InvalidCommandExecutionUUID + OperationNotSupported + + + + CancelAll + Cancel All + + Cancels all currently running Observable and Unobservable Commands running on this SiLA Server. + The SiLA Server MUST throw an Execution Error indicating 'cancellation' as the reason for the + Command not being able to finish successfully. + + No + + + UUID + UUID + A Universally Unique Identifier (UUID) referring to observable command executions. + + + + String + + + 36 + [0-9a-f]{8}\-[0-9a-f]{4}\-[0-9a-f]{4}\-[0-9a-f]{4}\-[0-9a-f]{12} + + + + + + InvalidCommandExecutionUUID + Invalid Command Execution UUID + + The given Command Execution UUID does not specify a command that is currently being executed. + + + + OperationNotSupported + Operation Not Supported + + Canceling is not supported for the SiLA 2 Command with the + specified CommandExecutionUUID. + + + diff --git a/laborchestrator/sila_server/SiLA_features/LabOrchestratorService.sila.xml b/laborchestrator/sila_server/SiLA_features/LabOrchestratorService.sila.xml new file mode 100755 index 0000000000000000000000000000000000000000..3b57974b7a6de3e9624cf902b4d585488db70c9d --- /dev/null +++ b/laborchestrator/sila_server/SiLA_features/LabOrchestratorService.sila.xml @@ -0,0 +1,165 @@ + + + LabOrchestratorService + LabOrchestrator Service + + This feature controls the Lab Orchestrator + + + LoadProcess + Load Process + Load a pythonLab Process + No + + LabProcessName + Lab Process Name + Name of the Labprocess in the LARA database + + String + + + + InvalidProcess + + + + LoadProcessFile + Load Process File + Load a pythonLab Process file + No + + LabProcessFileName + Lab Process File Name + Filenname of the Labprocess + + String + + + + Name + Name + Unique name given to the process. Can be used to reference this process. + + String + + + + InvalidProcess + + + + LoadLabConfiguration + Load Lab Configuration + Load a Lab Configuration from Database + No + + LabConfigurationName + Lab Configuration Name + Name of Lab Configuration to be loaded from the (LARA) database + + String + + + + InvalidLabConfiguration + + + + LoadLabConfigurationFile + Load Lab Configuration File + Load a Lab Configuration from YAML file + No + + LabConfigurationFileName + Lab Configuration File Name + Filename of Lab Configuration to be loaded - in YAML format. + + String + + + + InvalidLabConfiguration + + + + StartProcess + Start Process + Starts a given process + No + + ProcessName + Process Name + Name of the process to start + + String + + + + + StopProcess + Stop Process + Stops a given process + No + + ProcessName + Process Name + Name of the process to stop + + String + + + + + GetStatus + Get Status + Returns the status of the specified process + No + + ProcessName + Process Name + Name of the process + + String + + + + ProcessStatus + Process Status + Status of the process + + ProcessStatus + + + + + ProcessStatus + Process Status + Status of a Process + + + + String + + + + Idle + Running + Finished + Paused + Error + + + + + + + InvalidProcess + Invalid Process + Invalid Python Lab Process + + + InvalidLabConfiguration + Invalid Lab Configuration + Invalid Lab Configuration + + diff --git a/laborchestrator/sila_server/SiLA_features/PauseController-v2_0.sila.xml b/laborchestrator/sila_server/SiLA_features/PauseController-v2_0.sila.xml new file mode 100755 index 0000000000000000000000000000000000000000..a0b298c3b5047a5c99eec7d345002a32cbec9a55 --- /dev/null +++ b/laborchestrator/sila_server/SiLA_features/PauseController-v2_0.sila.xml @@ -0,0 +1,108 @@ + + + PauseController + Pause Controller + + Allows to pause or resume a currently running Observable Command. Pausing is the act of stopping the + progress of the desired intent of a Command with the option of continuing the execution when resuming. + + A SiLA Client SHOULD be able to pause or resume the Observable Commands at any time. Not every Observable Command + might support this Feature. If not, an "OperationNotSupported" Execution Error MUST be thrown. + + + Pause + Pause + + Pause the Command execution. The Command can then be resumed again. The Command Execution Status of the + Observable Command MUST not be affected. + + No + + CommandExecutionUUID + Command Execution UUID + The Command Execution UUID according to the SiLA Standard. + + UUID + + + + InvalidCommandExecutionUUID + InvalidCommandState + OperationNotSupported + + + + Resume + Resume + Resume the Command after it has been paused. + No + + CommandExecutionUUID + Command Execution UUID + The Command Execution UUID according to the SiLA Standard. + + UUID + + + + InvalidCommandExecutionUUID + InvalidCommandState + OperationNotSupported + + + + PausedCommands + Paused Commands + A List of Command Execution UUID that are in a paused state. + Yes + + + + UUID + + + + + + InvalidCommandExecutionUUID + Invalid Command Execution UUID + + The given Command Execution UUID does not specify a command that is currently being executed. + + + + InvalidCommandState + Invalid Command State + + The specified command is not in a valid state to perform the operation (Pause or Resume). + + + + OperationNotSupported + Operation Not Supported + + The operation (Pause or Resume) is not supported for the SiLA 2 command which the + specified Command Execution UUID belongs to. + + + + UUID + UUID + A Universally Unique Identifier (UUID) referring to observable command executions. + + + + String + + + 36 + [0-9a-f]{8}\-[0-9a-f]{4}\-[0-9a-f]{4}\-[0-9a-f]{4}\-[0-9a-f]{12} + + + + + diff --git a/laborchestrator/sila_server/SiLA_features/SimulationController-v1_0.sila.xml b/laborchestrator/sila_server/SiLA_features/SimulationController-v1_0.sila.xml new file mode 100755 index 0000000000000000000000000000000000000000..27a9f0499fadf48be53b1cf87258f5c8c4456ca8 --- /dev/null +++ b/laborchestrator/sila_server/SiLA_features/SimulationController-v1_0.sila.xml @@ -0,0 +1,76 @@ + + + SimulationController + Simulation Controller + + This Feature provides control over the simulation behaviour of a SiLA Server. + + A SiLA Server can run in two modes: + (a) Real Mode - with real activities, e.g. addressing or controlling real hardware, e.g. through serial/CANBus commands, + writing to real databases, moving real objects etc. + (b) Simulation Mode - where every command is only simulated and responses are just example returns. + + Note that certain commands and properties might not be affected by this feature if they + do not interact with the real world. + + + StartSimulationMode + Start Simulation Mode + + Sets the SiLA Server to run in Simulation Mode, i.e. all following commands are executed in simulation mode. + + The Simulation Mode can only be entered, if all hardware operations have been safely terminated + or are in a controlled, safe state. + + The simulation mode can be stopped by issuing the 'Start Real Mode' command. + + No + + StartSimulationModeFailed + + + + StartRealMode + Start Real Mode + + Sets the SiLA Server to run in real mode, i.e. all following commands are executed with real-world + interactions, like serial port/CAN communication, motor actions etc. + + If the server is in Simulation Mode it can be interrupted at any time. A re-initialization of + the hardware might be required. The Real Mode can be stopped by issuing the 'Start Simulation Mode' command. + + No + + StartRealModeFailed + + + + SimulationMode + SimulationMode + Indication whether SiLA Server is in Simulation Mode or not. + No + + Boolean + + + + StartSimulationModeFailed + The start of Simulation Mode failed. + + The server cannot change to Simulation Mode. + This error can, e.g., be thrown, if a real-world process needs to be ended before switching to simulation + mode. + + + + StartRealModeFailed + The start of Real Mode failed. + + The server cannot change to Real Mode. + This error can, e.g., be thrown, if a device is not ready to change into Real Mode. + + + diff --git a/laborchestrator/sila_server/__init__.py b/laborchestrator/sila_server/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..109f4dd8eb56c941664f4e30b37d8172598d5bec --- /dev/null +++ b/laborchestrator/sila_server/__init__.py @@ -0,0 +1,8 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from .generated import Client +from .server import Server + +__all__ = [ + "Client", + "Server", +] diff --git a/laborchestrator/sila_server/__main__.py b/laborchestrator/sila_server/__main__.py new file mode 100755 index 0000000000000000000000000000000000000000..3b828abfac0572b8c961817c68a67299bd548e09 --- /dev/null +++ b/laborchestrator/sila_server/__main__.py @@ -0,0 +1,109 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +import logging +import signal +from typing import Optional +from uuid import UUID + +import typer +from sila2.framework.utils import running_in_docker +from typer import BadParameter, Option + +from .server import Server + +logger = logging.getLogger(__name__) + + +def main( + ip_address: str = Option( + "0.0.0.0" if running_in_docker() else "127.0.0.1", "-a", "--ip-address", help="The IP address" + ), + port: int = Option(50052, "-p", "--port", help="The port"), + server_uuid: Optional[str] = Option( + None, "--server-uuid", help="The server UUID [default: generate]", show_default=False + ), + disable_discovery: bool = Option(False, "--disable-discovery", help="Disable SiLA Server Discovery"), + insecure: bool = Option(False, "--insecure", help="Start without encryption"), + private_key_file: Optional[str] = Option( + None, "-k", "--private-key-file", help="Private key file (e.g. 'server-key.pem')" + ), + cert_file: Optional[str] = Option(None, "-c", "--cert-file", help="Certificate file (e.g. 'server-cert.pem')"), + ca_file_for_discovery: Optional[str] = Option( + None, + "--ca-file-for-discovery", + help="Certificate Authority file for distribution via the SiLA Server Discovery (e.g. 'server-ca.pem')", + ), + ca_export_file: Optional[str] = Option( + None, help="When using a self-signed certificate, write the generated CA to this file" + ), + quiet: bool = Option(False, "--quiet", help="Only log errors"), + verbose: bool = Option(False, "--verbose", help="Enable verbose logging"), + debug: bool = Option(False, "--debug", help="Enable debug logging"), +): + # validate parameters + if (insecure or ca_export_file is not None) and (cert_file is not None or private_key_file is not None): + raise BadParameter("Cannot use --insecure or --ca-export-file with --private-key-file or --cert-file") + if (cert_file is None and private_key_file is not None) or (private_key_file is None and cert_file is not None): + raise BadParameter("Either provide both --private-key-file and --cert-file, or none of them") + if insecure and ca_export_file is not None: + raise BadParameter("Cannot use --export-ca-file with --insecure") + + # prepare server parameters + cert = open(cert_file, "rb").read() if cert_file is not None else None + private_key = open(private_key_file, "rb").read() if private_key_file is not None else None + ca_for_discovery = open(ca_file_for_discovery, "rb").read() if ca_file_for_discovery is not None else None + parsed_server_uuid = UUID(server_uuid) if server_uuid is not None else None + + # logging setup + initialize_logging(quiet=quiet, verbose=verbose, debug=debug) + + # run server + server = Server(server_uuid=parsed_server_uuid) + try: + if insecure: + server.start_insecure(ip_address, port, enable_discovery=not disable_discovery) + else: + server.start( + ip_address, + port, + cert_chain=cert, + private_key=private_key, + enable_discovery=not disable_discovery, + ca_for_discovery=ca_for_discovery, + ) + if ca_export_file is not None: + with open(ca_export_file, "wb") as fp: + fp.write(server.generated_ca) + logger.info(f"Wrote generated CA to '{ca_export_file}'") + logger.info("Server startup complete") + + signal.signal(signal.SIGTERM, lambda *args: server.stop()) + + try: + server.grpc_server.wait_for_termination() + except KeyboardInterrupt: + pass + finally: + if server.running: + server.stop() + logger.info("Server shutdown complete") + + +def initialize_logging(*, quiet: bool = False, verbose: bool = False, debug: bool = False): + if sum((quiet, verbose, debug)) > 1: + raise BadParameter("--quiet, --verbose and --debug are mutually exclusive") + + level = logging.WARNING + if verbose: + level = logging.INFO + if debug: + level = logging.DEBUG + if quiet: + level = logging.ERROR + + logging.basicConfig(level=level, format="%(asctime)s:%(levelname)s:%(name)s:%(message)s") + logger.setLevel(logging.INFO) + logging.getLogger("xmlschema").setLevel(logging.WARNING) + + +if __name__ == "__main__": + typer.run(main) diff --git a/laborchestrator/sila_server/feature_implementations/__init__.py b/laborchestrator/sila_server/feature_implementations/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..765e2a0f0a07b0e8f6e820c5d2812a7239cf28b7 --- /dev/null +++ b/laborchestrator/sila_server/feature_implementations/__init__.py @@ -0,0 +1 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 diff --git a/laborchestrator/sila_server/feature_implementations/cancelcontroller_impl.py b/laborchestrator/sila_server/feature_implementations/cancelcontroller_impl.py new file mode 100755 index 0000000000000000000000000000000000000000..9b7036d2e34711166e2247e71cc72e2cff7cc09f --- /dev/null +++ b/laborchestrator/sila_server/feature_implementations/cancelcontroller_impl.py @@ -0,0 +1,22 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from __future__ import annotations + +from typing import TYPE_CHECKING + +from sila2.server import MetadataDict + +from ..generated.cancelcontroller import UUID, CancelAll_Responses, CancelCommand_Responses, CancelControllerBase + +if TYPE_CHECKING: + from ..server import Server + + +class CancelControllerImpl(CancelControllerBase): + def __init__(self, parent_server: Server) -> None: + super().__init__(parent_server=parent_server) + + def CancelCommand(self, CommandExecutionUUID: UUID, *, metadata: MetadataDict) -> CancelCommand_Responses: + raise NotImplementedError # TODO + + def CancelAll(self, *, metadata: MetadataDict) -> CancelAll_Responses: + raise NotImplementedError # TODO diff --git a/laborchestrator/sila_server/feature_implementations/laborchestratorservice_impl.py b/laborchestrator/sila_server/feature_implementations/laborchestratorservice_impl.py new file mode 100755 index 0000000000000000000000000000000000000000..e929a35d170aa7917aea6fb671dd769c17f25daa --- /dev/null +++ b/laborchestrator/sila_server/feature_implementations/laborchestratorservice_impl.py @@ -0,0 +1,59 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from __future__ import annotations + +import traceback +from typing import TYPE_CHECKING + +from sila2.server import MetadataDict + +from ..generated.laborchestratorservice import ( + GetStatus_Responses, + LabOrchestratorServiceBase, + LoadLabConfiguration_Responses, + LoadLabConfigurationFile_Responses, + LoadProcess_Responses, + LoadProcessFile_Responses, + StartProcess_Responses, + StopProcess_Responses, +) + +if TYPE_CHECKING: + from ..server import Server + + +class LabOrchestratorServiceImpl(LabOrchestratorServiceBase): + def __init__(self, parent_server: Server) -> None: + super().__init__(parent_server=parent_server) + + def LoadProcess(self, LabProcessName: str, *, metadata: MetadataDict) -> LoadProcess_Responses: + raise NotImplementedError # TODO + + def LoadProcessFile(self, LabProcessFileName: str, *, metadata: MetadataDict) -> LoadProcessFile_Responses: + try: + process_name = self.parent_server.orchestrator.add_process(file_path=LabProcessFileName) + print(process_name) + return process_name + except Exception as ex: + print(ex, traceback.print_exc()) + return "FAIL" + + def LoadLabConfiguration( + self, LabConfigurationName: str, *, metadata: MetadataDict + ) -> LoadLabConfiguration_Responses: + raise NotImplementedError # TODO + + def LoadLabConfigurationFile( + self, LabConfigurationFileName: str, *, metadata: MetadataDict + ) -> LoadLabConfigurationFile_Responses: + raise NotImplementedError # TODO + + def StartProcess(self, ProcessName: str, *, metadata: MetadataDict) -> StartProcess_Responses: + self.parent_server.orchestrator.start_processes([ProcessName]) + + def StopProcess(self, ProcessName: str, *, metadata: MetadataDict) -> StopProcess_Responses: + raise NotImplementedError # TODO + + def GetStatus(self, ProcessName: str, *, metadata: MetadataDict) -> GetStatus_Responses: + status = self.parent_server.orchestrator.get_process_state(ProcessName) + response = status.name.title() + return response diff --git a/laborchestrator/sila_server/feature_implementations/pausecontroller_impl.py b/laborchestrator/sila_server/feature_implementations/pausecontroller_impl.py new file mode 100755 index 0000000000000000000000000000000000000000..216521e9afb2db1ebf5ef3b843613793981b50cc --- /dev/null +++ b/laborchestrator/sila_server/feature_implementations/pausecontroller_impl.py @@ -0,0 +1,22 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from __future__ import annotations + +from typing import TYPE_CHECKING + +from sila2.server import MetadataDict + +from ..generated.pausecontroller import UUID, Pause_Responses, PauseControllerBase, Resume_Responses + +if TYPE_CHECKING: + from ..server import Server + + +class PauseControllerImpl(PauseControllerBase): + def __init__(self, parent_server: Server) -> None: + super().__init__(parent_server=parent_server) + + def Pause(self, CommandExecutionUUID: UUID, *, metadata: MetadataDict) -> Pause_Responses: + raise NotImplementedError # TODO + + def Resume(self, CommandExecutionUUID: UUID, *, metadata: MetadataDict) -> Resume_Responses: + raise NotImplementedError # TODO diff --git a/laborchestrator/sila_server/feature_implementations/simulationcontroller_impl.py b/laborchestrator/sila_server/feature_implementations/simulationcontroller_impl.py new file mode 100755 index 0000000000000000000000000000000000000000..e8c49df060308234d501fb192ad412b4c4095eba --- /dev/null +++ b/laborchestrator/sila_server/feature_implementations/simulationcontroller_impl.py @@ -0,0 +1,29 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from __future__ import annotations + +from typing import TYPE_CHECKING + +from sila2.server import MetadataDict + +from ..generated.simulationcontroller import ( + SimulationControllerBase, + StartRealMode_Responses, + StartSimulationMode_Responses, +) + +if TYPE_CHECKING: + from ..server import Server + + +class SimulationControllerImpl(SimulationControllerBase): + def __init__(self, parent_server: Server) -> None: + super().__init__(parent_server=parent_server) + + def get_SimulationMode(self, *, metadata: MetadataDict) -> bool: + raise NotImplementedError # TODO + + def StartSimulationMode(self, *, metadata: MetadataDict) -> StartSimulationMode_Responses: + raise NotImplementedError # TODO + + def StartRealMode(self, *, metadata: MetadataDict) -> StartRealMode_Responses: + raise NotImplementedError # TODO diff --git a/laborchestrator/sila_server/feature_implementations/updated_cancelcontroller_impl.py b/laborchestrator/sila_server/feature_implementations/updated_cancelcontroller_impl.py new file mode 100755 index 0000000000000000000000000000000000000000..9b7036d2e34711166e2247e71cc72e2cff7cc09f --- /dev/null +++ b/laborchestrator/sila_server/feature_implementations/updated_cancelcontroller_impl.py @@ -0,0 +1,22 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from __future__ import annotations + +from typing import TYPE_CHECKING + +from sila2.server import MetadataDict + +from ..generated.cancelcontroller import UUID, CancelAll_Responses, CancelCommand_Responses, CancelControllerBase + +if TYPE_CHECKING: + from ..server import Server + + +class CancelControllerImpl(CancelControllerBase): + def __init__(self, parent_server: Server) -> None: + super().__init__(parent_server=parent_server) + + def CancelCommand(self, CommandExecutionUUID: UUID, *, metadata: MetadataDict) -> CancelCommand_Responses: + raise NotImplementedError # TODO + + def CancelAll(self, *, metadata: MetadataDict) -> CancelAll_Responses: + raise NotImplementedError # TODO diff --git a/laborchestrator/sila_server/feature_implementations/updated_laborchestratorservice_impl.py b/laborchestrator/sila_server/feature_implementations/updated_laborchestratorservice_impl.py new file mode 100755 index 0000000000000000000000000000000000000000..b65682556153fba5ad5a76cf5a19526798819cd6 --- /dev/null +++ b/laborchestrator/sila_server/feature_implementations/updated_laborchestratorservice_impl.py @@ -0,0 +1,50 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from __future__ import annotations + +from typing import TYPE_CHECKING + +from sila2.server import MetadataDict + +from ..generated.laborchestratorservice import ( + GetStatus_Responses, + LabOrchestratorServiceBase, + LoadLabConfiguration_Responses, + LoadLabConfigurationFile_Responses, + LoadProcess_Responses, + LoadProcessFile_Responses, + StartProcess_Responses, + StopProcess_Responses, +) + +if TYPE_CHECKING: + from ..server import Server + + +class LabOrchestratorServiceImpl(LabOrchestratorServiceBase): + def __init__(self, parent_server: Server) -> None: + super().__init__(parent_server=parent_server) + + def LoadProcess(self, LabProcessName: str, *, metadata: MetadataDict) -> LoadProcess_Responses: + raise NotImplementedError # TODO + + def LoadProcessFile(self, LabProcessFileName: str, *, metadata: MetadataDict) -> LoadProcessFile_Responses: + raise NotImplementedError # TODO + + def LoadLabConfiguration( + self, LabConfigurationName: str, *, metadata: MetadataDict + ) -> LoadLabConfiguration_Responses: + raise NotImplementedError # TODO + + def LoadLabConfigurationFile( + self, LabConfigurationFileName: str, *, metadata: MetadataDict + ) -> LoadLabConfigurationFile_Responses: + raise NotImplementedError # TODO + + def StartProcess(self, ProcessName: str, *, metadata: MetadataDict) -> StartProcess_Responses: + raise NotImplementedError # TODO + + def StopProcess(self, ProcessName: str, *, metadata: MetadataDict) -> StopProcess_Responses: + raise NotImplementedError # TODO + + def GetStatus(self, ProcessName: str, *, metadata: MetadataDict) -> GetStatus_Responses: + raise NotImplementedError # TODO diff --git a/laborchestrator/sila_server/feature_implementations/updated_pausecontroller_impl.py b/laborchestrator/sila_server/feature_implementations/updated_pausecontroller_impl.py new file mode 100755 index 0000000000000000000000000000000000000000..216521e9afb2db1ebf5ef3b843613793981b50cc --- /dev/null +++ b/laborchestrator/sila_server/feature_implementations/updated_pausecontroller_impl.py @@ -0,0 +1,22 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from __future__ import annotations + +from typing import TYPE_CHECKING + +from sila2.server import MetadataDict + +from ..generated.pausecontroller import UUID, Pause_Responses, PauseControllerBase, Resume_Responses + +if TYPE_CHECKING: + from ..server import Server + + +class PauseControllerImpl(PauseControllerBase): + def __init__(self, parent_server: Server) -> None: + super().__init__(parent_server=parent_server) + + def Pause(self, CommandExecutionUUID: UUID, *, metadata: MetadataDict) -> Pause_Responses: + raise NotImplementedError # TODO + + def Resume(self, CommandExecutionUUID: UUID, *, metadata: MetadataDict) -> Resume_Responses: + raise NotImplementedError # TODO diff --git a/laborchestrator/sila_server/feature_implementations/updated_simulationcontroller_impl.py b/laborchestrator/sila_server/feature_implementations/updated_simulationcontroller_impl.py new file mode 100755 index 0000000000000000000000000000000000000000..e8c49df060308234d501fb192ad412b4c4095eba --- /dev/null +++ b/laborchestrator/sila_server/feature_implementations/updated_simulationcontroller_impl.py @@ -0,0 +1,29 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from __future__ import annotations + +from typing import TYPE_CHECKING + +from sila2.server import MetadataDict + +from ..generated.simulationcontroller import ( + SimulationControllerBase, + StartRealMode_Responses, + StartSimulationMode_Responses, +) + +if TYPE_CHECKING: + from ..server import Server + + +class SimulationControllerImpl(SimulationControllerBase): + def __init__(self, parent_server: Server) -> None: + super().__init__(parent_server=parent_server) + + def get_SimulationMode(self, *, metadata: MetadataDict) -> bool: + raise NotImplementedError # TODO + + def StartSimulationMode(self, *, metadata: MetadataDict) -> StartSimulationMode_Responses: + raise NotImplementedError # TODO + + def StartRealMode(self, *, metadata: MetadataDict) -> StartRealMode_Responses: + raise NotImplementedError # TODO diff --git a/laborchestrator/sila_server/generated/__init__.py b/laborchestrator/sila_server/generated/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..844e1e4287d8698af0fa40bc5942804ed0fde162 --- /dev/null +++ b/laborchestrator/sila_server/generated/__init__.py @@ -0,0 +1,4 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from .client import Client + +__all__ = ["Client"] diff --git a/laborchestrator/sila_server/generated/cancelcontroller/CancelController.proto b/laborchestrator/sila_server/generated/cancelcontroller/CancelController.proto new file mode 100755 index 0000000000000000000000000000000000000000..4017740fa50f8ef5a2742691b94bd406a406e52b --- /dev/null +++ b/laborchestrator/sila_server/generated/cancelcontroller/CancelController.proto @@ -0,0 +1,35 @@ +syntax = "proto3"; + +import "SiLAFramework.proto"; + +package sila2.org.silastandard.core.commands.cancelcontroller.v1; + +/* This feature offers commands to cancel/terminate Commands. Cancellation is the act of stopping the running Command execution(s), irrevocably. The SiLA Server SHOULD be able to be in a state where any further commands can be issued after a cancellation. */ +service CancelController { + /* Cancel a specified currently running Observable Command or cancel all currently running Observable Commands . For any canceled Observable Command the SiLA Server MUST update the Command Execution Status to "Command Finished with Error". The SiLA Server MUST throw a descriptive error message indicating cancellation as the reason for the Command execution not being able to finish successfully for any canceled Command. */ + rpc CancelCommand (sila2.org.silastandard.core.commands.cancelcontroller.v1.CancelCommand_Parameters) returns (sila2.org.silastandard.core.commands.cancelcontroller.v1.CancelCommand_Responses) {} + /* Cancels all currently running Observable and Unobservable Commands running on this SiLA Server. The SiLA Server MUST throw an Execution Error indicating 'cancellation' as the reason for the Command not being able to finish successfully. */ + rpc CancelAll (sila2.org.silastandard.core.commands.cancelcontroller.v1.CancelAll_Parameters) returns (sila2.org.silastandard.core.commands.cancelcontroller.v1.CancelAll_Responses) {} +} + +/* A Universally Unique Identifier (UUID) referring to observable command executions. */ +message DataType_UUID { + sila2.org.silastandard.String UUID = 1; /* A Universally Unique Identifier (UUID) referring to observable command executions. */ +} + +/* Parameters for CancelCommand */ +message CancelCommand_Parameters { + sila2.org.silastandard.core.commands.cancelcontroller.v1.DataType_UUID CommandExecutionUUID = 1; /* The Command Execution UUID according to the SiLA Standard. */ +} + +/* Responses of CancelCommand */ +message CancelCommand_Responses { +} + +/* Parameters for CancelAll */ +message CancelAll_Parameters { +} + +/* Responses of CancelAll */ +message CancelAll_Responses { +} diff --git a/laborchestrator/sila_server/generated/cancelcontroller/CancelController.sila.xml b/laborchestrator/sila_server/generated/cancelcontroller/CancelController.sila.xml new file mode 100755 index 0000000000000000000000000000000000000000..f56815b85c7cf0f5dd90f6f857c4c4e259d20966 --- /dev/null +++ b/laborchestrator/sila_server/generated/cancelcontroller/CancelController.sila.xml @@ -0,0 +1,64 @@ + + CancelController + Cancel Controller + This feature offers commands to cancel/terminate Commands. Cancellation is the act of stopping the running Command execution(s), + irrevocably. The SiLA Server SHOULD be able to be in a state where any further + commands can be issued after a cancellation. + + CancelCommand + Cancel Command + Cancel a specified currently running Observable Command or cancel all currently running Observable Commands . + For any canceled Observable Command the SiLA Server MUST update the Command Execution Status to "Command Finished + with Error". + The SiLA Server MUST throw a descriptive error message indicating cancellation as the reason for the Command + execution not being able to finish successfully for any canceled Command. + No + + CommandExecutionUUID + Command Execution UUID + The Command Execution UUID according to the SiLA Standard. + + UUID + + + + InvalidCommandExecutionUUID + OperationNotSupported + + + + CancelAll + Cancel All + Cancels all currently running Observable and Unobservable Commands running on this SiLA Server. + The SiLA Server MUST throw an Execution Error indicating 'cancellation' as the reason for the + Command not being able to finish successfully. + No + + + UUID + UUID + A Universally Unique Identifier (UUID) referring to observable command executions. + + + + String + + + 36 + [0-9a-f]{8}\-[0-9a-f]{4}\-[0-9a-f]{4}\-[0-9a-f]{4}\-[0-9a-f]{12} + + + + + + InvalidCommandExecutionUUID + Invalid Command Execution UUID + The given Command Execution UUID does not specify a command that is currently being executed. + + + OperationNotSupported + Operation Not Supported + Canceling is not supported for the SiLA 2 Command with the + specified CommandExecutionUUID. + + diff --git a/laborchestrator/sila_server/generated/cancelcontroller/__init__.py b/laborchestrator/sila_server/generated/cancelcontroller/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..2164b6c96c72e6e2191663b1966df859d80af542 --- /dev/null +++ b/laborchestrator/sila_server/generated/cancelcontroller/__init__.py @@ -0,0 +1,17 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from .cancelcontroller_base import CancelControllerBase +from .cancelcontroller_client import CancelControllerClient +from .cancelcontroller_errors import InvalidCommandExecutionUUID, OperationNotSupported +from .cancelcontroller_feature import CancelControllerFeature +from .cancelcontroller_types import UUID, CancelAll_Responses, CancelCommand_Responses + +__all__ = [ + "CancelControllerBase", + "CancelControllerFeature", + "CancelControllerClient", + "CancelCommand_Responses", + "CancelAll_Responses", + "InvalidCommandExecutionUUID", + "OperationNotSupported", + "UUID", +] diff --git a/laborchestrator/sila_server/generated/cancelcontroller/cancelcontroller_base.py b/laborchestrator/sila_server/generated/cancelcontroller/cancelcontroller_base.py new file mode 100755 index 0000000000000000000000000000000000000000..d874fb2a5db68a5a909ba60f57f6c5674e07a91a --- /dev/null +++ b/laborchestrator/sila_server/generated/cancelcontroller/cancelcontroller_base.py @@ -0,0 +1,55 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING + +from sila2.server import FeatureImplementationBase, MetadataDict + +from .cancelcontroller_types import UUID, CancelAll_Responses, CancelCommand_Responses + +if TYPE_CHECKING: + + from ...server import Server + + +class CancelControllerBase(FeatureImplementationBase, ABC): + parent_server: Server + + def __init__(self, parent_server: Server): + """ + This feature offers commands to cancel/terminate Commands. Cancellation is the act of stopping the running Command execution(s), + irrevocably. The SiLA Server SHOULD be able to be in a state where any further + commands can be issued after a cancellation. + """ + super().__init__(parent_server=parent_server) + + @abstractmethod + def CancelCommand(self, CommandExecutionUUID: UUID, *, metadata: MetadataDict) -> CancelCommand_Responses: + """ + Cancel a specified currently running Observable Command or cancel all currently running Observable Commands . + For any canceled Observable Command the SiLA Server MUST update the Command Execution Status to "Command Finished + with Error". + The SiLA Server MUST throw a descriptive error message indicating cancellation as the reason for the Command + execution not being able to finish successfully for any canceled Command. + + + :param CommandExecutionUUID: The Command Execution UUID according to the SiLA Standard. + + :param metadata: The SiLA Client Metadata attached to the call + + """ + pass + + @abstractmethod + def CancelAll(self, *, metadata: MetadataDict) -> CancelAll_Responses: + """ + Cancels all currently running Observable and Unobservable Commands running on this SiLA Server. + The SiLA Server MUST throw an Execution Error indicating 'cancellation' as the reason for the + Command not being able to finish successfully. + + + :param metadata: The SiLA Client Metadata attached to the call + + """ + pass diff --git a/laborchestrator/sila_server/generated/cancelcontroller/cancelcontroller_client.py b/laborchestrator/sila_server/generated/cancelcontroller/cancelcontroller_client.py new file mode 100755 index 0000000000000000000000000000000000000000..3e17645d0c049c907fee60057f8561079636747f --- /dev/null +++ b/laborchestrator/sila_server/generated/cancelcontroller/cancelcontroller_client.py @@ -0,0 +1,46 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +# ----- +# This class does not do anything useful at runtime. Its only purpose is to provide type annotations. +# Since sphinx does not support .pyi files (yet?), this is a .py file. +# ----- + +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + + from typing import Iterable, Optional + + from cancelcontroller_types import CancelAll_Responses, CancelCommand_Responses + from sila2.client import ClientMetadataInstance + + from .cancelcontroller_types import UUID + + +class CancelControllerClient: + """ + This feature offers commands to cancel/terminate Commands. Cancellation is the act of stopping the running Command execution(s), + irrevocably. The SiLA Server SHOULD be able to be in a state where any further + commands can be issued after a cancellation. + """ + + def CancelCommand( + self, CommandExecutionUUID: UUID, *, metadata: Optional[Iterable[ClientMetadataInstance]] = None + ) -> CancelCommand_Responses: + """ + Cancel a specified currently running Observable Command or cancel all currently running Observable Commands . + For any canceled Observable Command the SiLA Server MUST update the Command Execution Status to "Command Finished + with Error". + The SiLA Server MUST throw a descriptive error message indicating cancellation as the reason for the Command + execution not being able to finish successfully for any canceled Command. + """ + ... + + def CancelAll(self, *, metadata: Optional[Iterable[ClientMetadataInstance]] = None) -> CancelAll_Responses: + """ + Cancels all currently running Observable and Unobservable Commands running on this SiLA Server. + The SiLA Server MUST throw an Execution Error indicating 'cancellation' as the reason for the + Command not being able to finish successfully. + """ + ... diff --git a/laborchestrator/sila_server/generated/cancelcontroller/cancelcontroller_errors.py b/laborchestrator/sila_server/generated/cancelcontroller/cancelcontroller_errors.py new file mode 100755 index 0000000000000000000000000000000000000000..705d6bece652770477e97269a0ce01e2791cc047 --- /dev/null +++ b/laborchestrator/sila_server/generated/cancelcontroller/cancelcontroller_errors.py @@ -0,0 +1,26 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from __future__ import annotations + +from typing import Optional + +from sila2.framework.errors.defined_execution_error import DefinedExecutionError + +from .cancelcontroller_feature import CancelControllerFeature + + +class InvalidCommandExecutionUUID(DefinedExecutionError): + def __init__(self, message: Optional[str] = None): + if message is None: + message = "The given Command Execution UUID does not specify a command that is currently being executed." + super().__init__( + CancelControllerFeature.defined_execution_errors["InvalidCommandExecutionUUID"], message=message + ) + + +class OperationNotSupported(DefinedExecutionError): + def __init__(self, message: Optional[str] = None): + if message is None: + message = ( + "Canceling is not supported for the SiLA 2 Command with the\n specified CommandExecutionUUID." + ) + super().__init__(CancelControllerFeature.defined_execution_errors["OperationNotSupported"], message=message) diff --git a/laborchestrator/sila_server/generated/cancelcontroller/cancelcontroller_feature.py b/laborchestrator/sila_server/generated/cancelcontroller/cancelcontroller_feature.py new file mode 100755 index 0000000000000000000000000000000000000000..f74bdc19f04fa6c3f215d481e77e176b6fb91363 --- /dev/null +++ b/laborchestrator/sila_server/generated/cancelcontroller/cancelcontroller_feature.py @@ -0,0 +1,6 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from os.path import dirname, join + +from sila2.framework import Feature + +CancelControllerFeature = Feature(join(dirname(__file__), "CancelController.sila.xml")) diff --git a/laborchestrator/sila_server/generated/cancelcontroller/cancelcontroller_types.py b/laborchestrator/sila_server/generated/cancelcontroller/cancelcontroller_types.py new file mode 100755 index 0000000000000000000000000000000000000000..a7e7289a04892f4555ee7550fb2cb53d670844ae --- /dev/null +++ b/laborchestrator/sila_server/generated/cancelcontroller/cancelcontroller_types.py @@ -0,0 +1,17 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from __future__ import annotations + +from typing import NamedTuple + + +class CancelCommand_Responses(NamedTuple): + + pass + + +class CancelAll_Responses(NamedTuple): + + pass + + +UUID = str diff --git a/laborchestrator/sila_server/generated/client.py b/laborchestrator/sila_server/generated/client.py new file mode 100755 index 0000000000000000000000000000000000000000..0d013772b9a904c15499d9abf5ae4b38685a4424 --- /dev/null +++ b/laborchestrator/sila_server/generated/client.py @@ -0,0 +1,76 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from __future__ import annotations + +from typing import Set + +from sila2.client import SilaClient +from sila2.framework import FullyQualifiedFeatureIdentifier + +from . import cancelcontroller, laborchestratorservice, pausecontroller, simulationcontroller + + +class Client(SilaClient): + + SimulationController: simulationcontroller.SimulationControllerClient + + LabOrchestratorService: laborchestratorservice.LabOrchestratorServiceClient + + CancelController: cancelcontroller.CancelControllerClient + + PauseController: pausecontroller.PauseControllerClient + + _expected_features: Set[FullyQualifiedFeatureIdentifier] = { + FullyQualifiedFeatureIdentifier("org.silastandard/core/SiLAService/v1"), + FullyQualifiedFeatureIdentifier("org.silastandard/core/SimulationController/v1"), + FullyQualifiedFeatureIdentifier("de.unigreifswald/instruments/LabOrchestratorService/v1"), + FullyQualifiedFeatureIdentifier("org.silastandard/core.commands/CancelController/v1"), + FullyQualifiedFeatureIdentifier("org.silastandard/core.commands/PauseController/v2"), + } + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + self._register_defined_execution_error_class( + simulationcontroller.SimulationControllerFeature.defined_execution_errors["StartSimulationModeFailed"], + simulationcontroller.StartSimulationModeFailed, + ) + + self._register_defined_execution_error_class( + simulationcontroller.SimulationControllerFeature.defined_execution_errors["StartRealModeFailed"], + simulationcontroller.StartRealModeFailed, + ) + + self._register_defined_execution_error_class( + laborchestratorservice.LabOrchestratorServiceFeature.defined_execution_errors["InvalidProcess"], + laborchestratorservice.InvalidProcess, + ) + + self._register_defined_execution_error_class( + laborchestratorservice.LabOrchestratorServiceFeature.defined_execution_errors["InvalidLabConfiguration"], + laborchestratorservice.InvalidLabConfiguration, + ) + + self._register_defined_execution_error_class( + cancelcontroller.CancelControllerFeature.defined_execution_errors["InvalidCommandExecutionUUID"], + cancelcontroller.InvalidCommandExecutionUUID, + ) + + self._register_defined_execution_error_class( + cancelcontroller.CancelControllerFeature.defined_execution_errors["OperationNotSupported"], + cancelcontroller.OperationNotSupported, + ) + + self._register_defined_execution_error_class( + pausecontroller.PauseControllerFeature.defined_execution_errors["InvalidCommandExecutionUUID"], + pausecontroller.InvalidCommandExecutionUUID, + ) + + self._register_defined_execution_error_class( + pausecontroller.PauseControllerFeature.defined_execution_errors["InvalidCommandState"], + pausecontroller.InvalidCommandState, + ) + + self._register_defined_execution_error_class( + pausecontroller.PauseControllerFeature.defined_execution_errors["OperationNotSupported"], + pausecontroller.OperationNotSupported, + ) diff --git a/laborchestrator/sila_server/generated/laborchestratorservice/LabOrchestratorService.proto b/laborchestrator/sila_server/generated/laborchestratorservice/LabOrchestratorService.proto new file mode 100755 index 0000000000000000000000000000000000000000..6e5796749f4163fc30aaccf89f31c850609161c0 --- /dev/null +++ b/laborchestrator/sila_server/generated/laborchestratorservice/LabOrchestratorService.proto @@ -0,0 +1,93 @@ +syntax = "proto3"; + +import "SiLAFramework.proto"; + +package sila2.de.unigreifswald.instruments.laborchestratorservice.v1; + +/* This feature controls the Lab Orchestrator */ +service LabOrchestratorService { + /* Load a pythonLab Process */ + rpc LoadProcess (sila2.de.unigreifswald.instruments.laborchestratorservice.v1.LoadProcess_Parameters) returns (sila2.de.unigreifswald.instruments.laborchestratorservice.v1.LoadProcess_Responses) {} + /* Load a pythonLab Process file */ + rpc LoadProcessFile (sila2.de.unigreifswald.instruments.laborchestratorservice.v1.LoadProcessFile_Parameters) returns (sila2.de.unigreifswald.instruments.laborchestratorservice.v1.LoadProcessFile_Responses) {} + /* Load a Lab Configuration from Database */ + rpc LoadLabConfiguration (sila2.de.unigreifswald.instruments.laborchestratorservice.v1.LoadLabConfiguration_Parameters) returns (sila2.de.unigreifswald.instruments.laborchestratorservice.v1.LoadLabConfiguration_Responses) {} + /* Load a Lab Configuration from YAML file */ + rpc LoadLabConfigurationFile (sila2.de.unigreifswald.instruments.laborchestratorservice.v1.LoadLabConfigurationFile_Parameters) returns (sila2.de.unigreifswald.instruments.laborchestratorservice.v1.LoadLabConfigurationFile_Responses) {} + /* Starts a given process */ + rpc StartProcess (sila2.de.unigreifswald.instruments.laborchestratorservice.v1.StartProcess_Parameters) returns (sila2.de.unigreifswald.instruments.laborchestratorservice.v1.StartProcess_Responses) {} + /* Stops a given process */ + rpc StopProcess (sila2.de.unigreifswald.instruments.laborchestratorservice.v1.StopProcess_Parameters) returns (sila2.de.unigreifswald.instruments.laborchestratorservice.v1.StopProcess_Responses) {} + /* Returns the status of the specified process */ + rpc GetStatus (sila2.de.unigreifswald.instruments.laborchestratorservice.v1.GetStatus_Parameters) returns (sila2.de.unigreifswald.instruments.laborchestratorservice.v1.GetStatus_Responses) {} +} + +/* Status of a Process */ +message DataType_ProcessStatus { + sila2.org.silastandard.String ProcessStatus = 1; /* Status of a Process */ +} + +/* Parameters for LoadProcess */ +message LoadProcess_Parameters { + sila2.org.silastandard.String LabProcessName = 1; /* Name of the Labprocess in the LARA database */ +} + +/* Responses of LoadProcess */ +message LoadProcess_Responses { +} + +/* Parameters for LoadProcessFile */ +message LoadProcessFile_Parameters { + sila2.org.silastandard.String LabProcessFileName = 1; /* Filenname of the Labprocess */ +} + +/* Responses of LoadProcessFile */ +message LoadProcessFile_Responses { + sila2.org.silastandard.String Name = 1; /* Unique name given to the process. Can be used to reference this process. */ +} + +/* Parameters for LoadLabConfiguration */ +message LoadLabConfiguration_Parameters { + sila2.org.silastandard.String LabConfigurationName = 1; /* Name of Lab Configuration to be loaded from the (LARA) database */ +} + +/* Responses of LoadLabConfiguration */ +message LoadLabConfiguration_Responses { +} + +/* Parameters for LoadLabConfigurationFile */ +message LoadLabConfigurationFile_Parameters { + sila2.org.silastandard.String LabConfigurationFileName = 1; /* Filename of Lab Configuration to be loaded - in YAML format. */ +} + +/* Responses of LoadLabConfigurationFile */ +message LoadLabConfigurationFile_Responses { +} + +/* Parameters for StartProcess */ +message StartProcess_Parameters { + sila2.org.silastandard.String ProcessName = 1; /* Name of the process to start */ +} + +/* Responses of StartProcess */ +message StartProcess_Responses { +} + +/* Parameters for StopProcess */ +message StopProcess_Parameters { + sila2.org.silastandard.String ProcessName = 1; /* Name of the process to stop */ +} + +/* Responses of StopProcess */ +message StopProcess_Responses { +} + +/* Parameters for GetStatus */ +message GetStatus_Parameters { + sila2.org.silastandard.String ProcessName = 1; /* Name of the process */ +} + +/* Responses of GetStatus */ +message GetStatus_Responses { + sila2.de.unigreifswald.instruments.laborchestratorservice.v1.DataType_ProcessStatus ProcessStatus = 1; /* Status of the process */ +} diff --git a/laborchestrator/sila_server/generated/laborchestratorservice/LabOrchestratorService.sila.xml b/laborchestrator/sila_server/generated/laborchestratorservice/LabOrchestratorService.sila.xml new file mode 100755 index 0000000000000000000000000000000000000000..0a2b9d67308552f27ee9792d3d03883ca8a27b58 --- /dev/null +++ b/laborchestrator/sila_server/generated/laborchestratorservice/LabOrchestratorService.sila.xml @@ -0,0 +1,162 @@ + + LabOrchestratorService + LabOrchestrator Service + This feature controls the Lab Orchestrator + + LoadProcess + Load Process + Load a pythonLab Process + No + + LabProcessName + Lab Process Name + Name of the Labprocess in the LARA database + + String + + + + InvalidProcess + + + + LoadProcessFile + Load Process File + Load a pythonLab Process file + No + + LabProcessFileName + Lab Process File Name + Filenname of the Labprocess + + String + + + + Name + Name + Unique name given to the process. Can be used to reference this process. + + String + + + + InvalidProcess + + + + LoadLabConfiguration + Load Lab Configuration + Load a Lab Configuration from Database + No + + LabConfigurationName + Lab Configuration Name + Name of Lab Configuration to be loaded from the (LARA) database + + String + + + + InvalidLabConfiguration + + + + LoadLabConfigurationFile + Load Lab Configuration File + Load a Lab Configuration from YAML file + No + + LabConfigurationFileName + Lab Configuration File Name + Filename of Lab Configuration to be loaded - in YAML format. + + String + + + + InvalidLabConfiguration + + + + StartProcess + Start Process + Starts a given process + No + + ProcessName + Process Name + Name of the process to start + + String + + + + + StopProcess + Stop Process + Stops a given process + No + + ProcessName + Process Name + Name of the process to stop + + String + + + + + GetStatus + Get Status + Returns the status of the specified process + No + + ProcessName + Process Name + Name of the process + + String + + + + ProcessStatus + Process Status + Status of the process + + ProcessStatus + + + + + ProcessStatus + Process Status + Status of a Process + + + + String + + + + Idle + Running + Finished + Paused + Error + + + + + + + InvalidProcess + Invalid Process + Invalid Python Lab Process + + + InvalidLabConfiguration + Invalid Lab Configuration + Invalid Lab Configuration + + diff --git a/laborchestrator/sila_server/generated/laborchestratorservice/__init__.py b/laborchestrator/sila_server/generated/laborchestratorservice/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..0fa72c7d3ae51e672935506574637afc2d02857d --- /dev/null +++ b/laborchestrator/sila_server/generated/laborchestratorservice/__init__.py @@ -0,0 +1,31 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from .laborchestratorservice_base import LabOrchestratorServiceBase +from .laborchestratorservice_client import LabOrchestratorServiceClient +from .laborchestratorservice_errors import InvalidLabConfiguration, InvalidProcess +from .laborchestratorservice_feature import LabOrchestratorServiceFeature +from .laborchestratorservice_types import ( + GetStatus_Responses, + LoadLabConfiguration_Responses, + LoadLabConfigurationFile_Responses, + LoadProcess_Responses, + LoadProcessFile_Responses, + ProcessStatus, + StartProcess_Responses, + StopProcess_Responses, +) + +__all__ = [ + "LabOrchestratorServiceBase", + "LabOrchestratorServiceFeature", + "LabOrchestratorServiceClient", + "LoadProcess_Responses", + "LoadProcessFile_Responses", + "LoadLabConfiguration_Responses", + "LoadLabConfigurationFile_Responses", + "StartProcess_Responses", + "StopProcess_Responses", + "GetStatus_Responses", + "InvalidProcess", + "InvalidLabConfiguration", + "ProcessStatus", +] diff --git a/laborchestrator/sila_server/generated/laborchestratorservice/laborchestratorservice_base.py b/laborchestrator/sila_server/generated/laborchestratorservice/laborchestratorservice_base.py new file mode 100755 index 0000000000000000000000000000000000000000..a720446a3526ce3b2789e41fedc9cb1733361e34 --- /dev/null +++ b/laborchestrator/sila_server/generated/laborchestratorservice/laborchestratorservice_base.py @@ -0,0 +1,138 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING + +from sila2.server import FeatureImplementationBase, MetadataDict + +from .laborchestratorservice_types import ( + GetStatus_Responses, + LoadLabConfiguration_Responses, + LoadLabConfigurationFile_Responses, + LoadProcess_Responses, + LoadProcessFile_Responses, + StartProcess_Responses, + StopProcess_Responses, +) + +if TYPE_CHECKING: + + from ...server import Server + + +class LabOrchestratorServiceBase(FeatureImplementationBase, ABC): + parent_server: Server + + def __init__(self, parent_server: Server): + """ + + This feature controls the Lab Orchestrator + + """ + super().__init__(parent_server=parent_server) + + @abstractmethod + def LoadProcess(self, LabProcessName: str, *, metadata: MetadataDict) -> LoadProcess_Responses: + """ + Load a pythonLab Process + + + :param LabProcessName: Name of the Labprocess in the LARA database + + :param metadata: The SiLA Client Metadata attached to the call + + """ + pass + + @abstractmethod + def LoadProcessFile(self, LabProcessFileName: str, *, metadata: MetadataDict) -> LoadProcessFile_Responses: + """ + Load a pythonLab Process file + + + :param LabProcessFileName: Filenname of the Labprocess + + :param metadata: The SiLA Client Metadata attached to the call + + :return: + + - Name: Unique name given to the process. Can be used to reference this process. + + + """ + pass + + @abstractmethod + def LoadLabConfiguration( + self, LabConfigurationName: str, *, metadata: MetadataDict + ) -> LoadLabConfiguration_Responses: + """ + Load a Lab Configuration from Database + + + :param LabConfigurationName: Name of Lab Configuration to be loaded from the (LARA) database + + :param metadata: The SiLA Client Metadata attached to the call + + """ + pass + + @abstractmethod + def LoadLabConfigurationFile( + self, LabConfigurationFileName: str, *, metadata: MetadataDict + ) -> LoadLabConfigurationFile_Responses: + """ + Load a Lab Configuration from YAML file + + + :param LabConfigurationFileName: Filename of Lab Configuration to be loaded - in YAML format. + + :param metadata: The SiLA Client Metadata attached to the call + + """ + pass + + @abstractmethod + def StartProcess(self, ProcessName: str, *, metadata: MetadataDict) -> StartProcess_Responses: + """ + Starts a given process + + + :param ProcessName: Name of the process to start + + :param metadata: The SiLA Client Metadata attached to the call + + """ + pass + + @abstractmethod + def StopProcess(self, ProcessName: str, *, metadata: MetadataDict) -> StopProcess_Responses: + """ + Stops a given process + + + :param ProcessName: Name of the process to stop + + :param metadata: The SiLA Client Metadata attached to the call + + """ + pass + + @abstractmethod + def GetStatus(self, ProcessName: str, *, metadata: MetadataDict) -> GetStatus_Responses: + """ + Returns the status of the specified process + + + :param ProcessName: Name of the process + + :param metadata: The SiLA Client Metadata attached to the call + + :return: + + - ProcessStatus: Status of the process + + + """ + pass diff --git a/laborchestrator/sila_server/generated/laborchestratorservice/laborchestratorservice_client.py b/laborchestrator/sila_server/generated/laborchestratorservice/laborchestratorservice_client.py new file mode 100755 index 0000000000000000000000000000000000000000..b74556bb4d26e393281cb176957a8ab093f0e184 --- /dev/null +++ b/laborchestrator/sila_server/generated/laborchestratorservice/laborchestratorservice_client.py @@ -0,0 +1,88 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +# ----- +# This class does not do anything useful at runtime. Its only purpose is to provide type annotations. +# Since sphinx does not support .pyi files (yet?), this is a .py file. +# ----- + +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + + from typing import Iterable, Optional + + from laborchestratorservice_types import ( + GetStatus_Responses, + LoadLabConfiguration_Responses, + LoadLabConfigurationFile_Responses, + LoadProcess_Responses, + LoadProcessFile_Responses, + StartProcess_Responses, + StopProcess_Responses, + ) + from sila2.client import ClientMetadataInstance + + +class LabOrchestratorServiceClient: + """ + + This feature controls the Lab Orchestrator + + """ + + def LoadProcess( + self, LabProcessName: str, *, metadata: Optional[Iterable[ClientMetadataInstance]] = None + ) -> LoadProcess_Responses: + """ + Load a pythonLab Process + """ + ... + + def LoadProcessFile( + self, LabProcessFileName: str, *, metadata: Optional[Iterable[ClientMetadataInstance]] = None + ) -> LoadProcessFile_Responses: + """ + Load a pythonLab Process file + """ + ... + + def LoadLabConfiguration( + self, LabConfigurationName: str, *, metadata: Optional[Iterable[ClientMetadataInstance]] = None + ) -> LoadLabConfiguration_Responses: + """ + Load a Lab Configuration from Database + """ + ... + + def LoadLabConfigurationFile( + self, LabConfigurationFileName: str, *, metadata: Optional[Iterable[ClientMetadataInstance]] = None + ) -> LoadLabConfigurationFile_Responses: + """ + Load a Lab Configuration from YAML file + """ + ... + + def StartProcess( + self, ProcessName: str, *, metadata: Optional[Iterable[ClientMetadataInstance]] = None + ) -> StartProcess_Responses: + """ + Starts a given process + """ + ... + + def StopProcess( + self, ProcessName: str, *, metadata: Optional[Iterable[ClientMetadataInstance]] = None + ) -> StopProcess_Responses: + """ + Stops a given process + """ + ... + + def GetStatus( + self, ProcessName: str, *, metadata: Optional[Iterable[ClientMetadataInstance]] = None + ) -> GetStatus_Responses: + """ + Returns the status of the specified process + """ + ... diff --git a/laborchestrator/sila_server/generated/laborchestratorservice/laborchestratorservice_errors.py b/laborchestrator/sila_server/generated/laborchestratorservice/laborchestratorservice_errors.py new file mode 100755 index 0000000000000000000000000000000000000000..89d551827b09b50249a0746d365df5d26ab2ab50 --- /dev/null +++ b/laborchestrator/sila_server/generated/laborchestratorservice/laborchestratorservice_errors.py @@ -0,0 +1,24 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from __future__ import annotations + +from typing import Optional + +from sila2.framework.errors.defined_execution_error import DefinedExecutionError + +from .laborchestratorservice_feature import LabOrchestratorServiceFeature + + +class InvalidProcess(DefinedExecutionError): + def __init__(self, message: Optional[str] = None): + if message is None: + message = "Invalid Python Lab Process" + super().__init__(LabOrchestratorServiceFeature.defined_execution_errors["InvalidProcess"], message=message) + + +class InvalidLabConfiguration(DefinedExecutionError): + def __init__(self, message: Optional[str] = None): + if message is None: + message = "Invalid Lab Configuration" + super().__init__( + LabOrchestratorServiceFeature.defined_execution_errors["InvalidLabConfiguration"], message=message + ) diff --git a/laborchestrator/sila_server/generated/laborchestratorservice/laborchestratorservice_feature.py b/laborchestrator/sila_server/generated/laborchestratorservice/laborchestratorservice_feature.py new file mode 100755 index 0000000000000000000000000000000000000000..5dabb0e33109338d49b1696eaadfada8dd35e24b --- /dev/null +++ b/laborchestrator/sila_server/generated/laborchestratorservice/laborchestratorservice_feature.py @@ -0,0 +1,6 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from os.path import dirname, join + +from sila2.framework import Feature + +LabOrchestratorServiceFeature = Feature(join(dirname(__file__), "LabOrchestratorService.sila.xml")) diff --git a/laborchestrator/sila_server/generated/laborchestratorservice/laborchestratorservice_types.py b/laborchestrator/sila_server/generated/laborchestratorservice/laborchestratorservice_types.py new file mode 100755 index 0000000000000000000000000000000000000000..8a2df76b558b4d3d2169b63ed41ae9c6acf7f9fc --- /dev/null +++ b/laborchestrator/sila_server/generated/laborchestratorservice/laborchestratorservice_types.py @@ -0,0 +1,48 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from __future__ import annotations + +from typing import NamedTuple + + +class LoadProcess_Responses(NamedTuple): + + pass + + +class LoadProcessFile_Responses(NamedTuple): + + Name: str + """ + Unique name given to the process. Can be used to reference this process. + """ + + +class LoadLabConfiguration_Responses(NamedTuple): + + pass + + +class LoadLabConfigurationFile_Responses(NamedTuple): + + pass + + +class StartProcess_Responses(NamedTuple): + + pass + + +class StopProcess_Responses(NamedTuple): + + pass + + +class GetStatus_Responses(NamedTuple): + + ProcessStatus: ProcessStatus + """ + Status of the process + """ + + +ProcessStatus = str diff --git a/laborchestrator/sila_server/generated/pausecontroller/PauseController.proto b/laborchestrator/sila_server/generated/pausecontroller/PauseController.proto new file mode 100755 index 0000000000000000000000000000000000000000..a264a765c73a56849b96287ed51789068f305c3b --- /dev/null +++ b/laborchestrator/sila_server/generated/pausecontroller/PauseController.proto @@ -0,0 +1,47 @@ +syntax = "proto3"; + +import "SiLAFramework.proto"; + +package sila2.org.silastandard.core.commands.pausecontroller.v2; + +/* Allows to pause or resume a currently running Observable Command. Pausing is the act of stopping the progress of the desired intent of a Command with the option of continuing the execution when resuming. A SiLA Client SHOULD be able to pause or resume the Observable Commands at any time. Not every Observable Command might support this Feature. If not, an "OperationNotSupported" Execution Error MUST be thrown. */ +service PauseController { + /* Pause the Command execution. The Command can then be resumed again. The Command Execution Status of the Observable Command MUST not be affected. */ + rpc Pause (sila2.org.silastandard.core.commands.pausecontroller.v2.Pause_Parameters) returns (sila2.org.silastandard.core.commands.pausecontroller.v2.Pause_Responses) {} + /* Resume the Command after it has been paused. */ + rpc Resume (sila2.org.silastandard.core.commands.pausecontroller.v2.Resume_Parameters) returns (sila2.org.silastandard.core.commands.pausecontroller.v2.Resume_Responses) {} + /* A List of Command Execution UUID that are in a paused state. */ + rpc Subscribe_PausedCommands (sila2.org.silastandard.core.commands.pausecontroller.v2.Subscribe_PausedCommands_Parameters) returns (stream sila2.org.silastandard.core.commands.pausecontroller.v2.Subscribe_PausedCommands_Responses) {} +} + +/* A Universally Unique Identifier (UUID) referring to observable command executions. */ +message DataType_UUID { + sila2.org.silastandard.String UUID = 1; /* A Universally Unique Identifier (UUID) referring to observable command executions. */ +} + +/* Parameters for Pause */ +message Pause_Parameters { + sila2.org.silastandard.core.commands.pausecontroller.v2.DataType_UUID CommandExecutionUUID = 1; /* The Command Execution UUID according to the SiLA Standard. */ +} + +/* Responses of Pause */ +message Pause_Responses { +} + +/* Parameters for Resume */ +message Resume_Parameters { + sila2.org.silastandard.core.commands.pausecontroller.v2.DataType_UUID CommandExecutionUUID = 1; /* The Command Execution UUID according to the SiLA Standard. */ +} + +/* Responses of Resume */ +message Resume_Responses { +} + +/* Parameters for PausedCommands */ +message Subscribe_PausedCommands_Parameters { +} + +/* Responses of PausedCommands */ +message Subscribe_PausedCommands_Responses { + repeated sila2.org.silastandard.core.commands.pausecontroller.v2.DataType_UUID PausedCommands = 1; /* A List of Command Execution UUID that are in a paused state. */ +} diff --git a/laborchestrator/sila_server/generated/pausecontroller/PauseController.sila.xml b/laborchestrator/sila_server/generated/pausecontroller/PauseController.sila.xml new file mode 100755 index 0000000000000000000000000000000000000000..2c785f46a69fc98ff37680fdd20ae287f41fd2a7 --- /dev/null +++ b/laborchestrator/sila_server/generated/pausecontroller/PauseController.sila.xml @@ -0,0 +1,93 @@ + + PauseController + Pause Controller + Allows to pause or resume a currently running Observable Command. Pausing is the act of stopping the + progress of the desired intent of a Command with the option of continuing the execution when resuming. + + A SiLA Client SHOULD be able to pause or resume the Observable Commands at any time. Not every Observable Command + might support this Feature. If not, an "OperationNotSupported" Execution Error MUST be thrown. + + Pause + Pause + Pause the Command execution. The Command can then be resumed again. The Command Execution Status of the + Observable Command MUST not be affected. + No + + CommandExecutionUUID + Command Execution UUID + The Command Execution UUID according to the SiLA Standard. + + UUID + + + + InvalidCommandExecutionUUID + InvalidCommandState + OperationNotSupported + + + + Resume + Resume + Resume the Command after it has been paused. + No + + CommandExecutionUUID + Command Execution UUID + The Command Execution UUID according to the SiLA Standard. + + UUID + + + + InvalidCommandExecutionUUID + InvalidCommandState + OperationNotSupported + + + + PausedCommands + Paused Commands + A List of Command Execution UUID that are in a paused state. + Yes + + + + UUID + + + + + + InvalidCommandExecutionUUID + Invalid Command Execution UUID + The given Command Execution UUID does not specify a command that is currently being executed. + + + InvalidCommandState + Invalid Command State + The specified command is not in a valid state to perform the operation (Pause or Resume). + + + OperationNotSupported + Operation Not Supported + The operation (Pause or Resume) is not supported for the SiLA 2 command which the + specified Command Execution UUID belongs to. + + + UUID + UUID + A Universally Unique Identifier (UUID) referring to observable command executions. + + + + String + + + 36 + [0-9a-f]{8}\-[0-9a-f]{4}\-[0-9a-f]{4}\-[0-9a-f]{4}\-[0-9a-f]{12} + + + + + diff --git a/laborchestrator/sila_server/generated/pausecontroller/__init__.py b/laborchestrator/sila_server/generated/pausecontroller/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..ce853e838447d6213b1a0f494237b94497a51902 --- /dev/null +++ b/laborchestrator/sila_server/generated/pausecontroller/__init__.py @@ -0,0 +1,18 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from .pausecontroller_base import PauseControllerBase +from .pausecontroller_client import PauseControllerClient +from .pausecontroller_errors import InvalidCommandExecutionUUID, InvalidCommandState, OperationNotSupported +from .pausecontroller_feature import PauseControllerFeature +from .pausecontroller_types import UUID, Pause_Responses, Resume_Responses + +__all__ = [ + "PauseControllerBase", + "PauseControllerFeature", + "PauseControllerClient", + "Pause_Responses", + "Resume_Responses", + "InvalidCommandExecutionUUID", + "InvalidCommandState", + "OperationNotSupported", + "UUID", +] diff --git a/laborchestrator/sila_server/generated/pausecontroller/pausecontroller_base.py b/laborchestrator/sila_server/generated/pausecontroller/pausecontroller_base.py new file mode 100755 index 0000000000000000000000000000000000000000..feace0fb34a52029acfde003b2aab4b7e778c270 --- /dev/null +++ b/laborchestrator/sila_server/generated/pausecontroller/pausecontroller_base.py @@ -0,0 +1,106 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from __future__ import annotations + +from abc import ABC, abstractmethod +from queue import Queue +from typing import TYPE_CHECKING, List, Optional, Union + +from sila2.server import FeatureImplementationBase, MetadataDict + +from .pausecontroller_types import UUID, Pause_Responses, Resume_Responses + +if TYPE_CHECKING: + + from ...server import Server + + +class PauseControllerBase(FeatureImplementationBase, ABC): + parent_server: Server + + _PausedCommands_producer_queue: Queue[Union[List[UUID], Exception]] + _PausedCommands_current_value: List[UUID] + + def __init__(self, parent_server: Server): + """ + Allows to pause or resume a currently running Observable Command. Pausing is the act of stopping the + progress of the desired intent of a Command with the option of continuing the execution when resuming. + + A SiLA Client SHOULD be able to pause or resume the Observable Commands at any time. Not every Observable Command + might support this Feature. If not, an "OperationNotSupported" Execution Error MUST be thrown. + """ + super().__init__(parent_server=parent_server) + + self._PausedCommands_producer_queue = Queue() + + def update_PausedCommands(self, PausedCommands: List[UUID], queue: Optional[Queue[List[UUID]]] = None) -> None: + """ + A List of Command Execution UUID that are in a paused state. + + This method updates the observable property 'PausedCommands'. + + :param queue: The queue to send updates to. If None, the default Queue will be used. + """ + if queue is None: + queue = self._PausedCommands_producer_queue + self._PausedCommands_current_value = PausedCommands + queue.put(PausedCommands) + + def PausedCommands_on_subscription(self, *, metadata: MetadataDict) -> Optional[Queue[List[UUID]]]: + """ + A List of Command Execution UUID that are in a paused state. + + This method is called when a client subscribes to the observable property 'PausedCommands' + + :param metadata: The SiLA Client Metadata attached to the call + :return: Optional `Queue` that should be used for updating this property. + If None, the default Queue will be used. + """ + pass + + def abort_PausedCommands_subscriptions(self, error: Exception, queue: Optional[Queue[List[UUID]]] = None) -> None: + """ + A List of Command Execution UUID that are in a paused state. + + This method aborts subscriptions to the observable property 'PausedCommands'. + + :param error: The Exception to be sent to the subscribing client. + If it is no DefinedExecutionError or UndefinedExecutionError, it will be wrapped in an UndefinedExecutionError. + :param queue: The queue to abort. If None, the default Queue will be used. + """ + if queue is None: + queue = self._PausedCommands_producer_queue + queue.put(error) + + @property + def current_PausedCommands(self) -> List[UUID]: + try: + return self._PausedCommands_current_value + except AttributeError: + raise AttributeError("Observable property PausedCommands has never been set") + + @abstractmethod + def Pause(self, CommandExecutionUUID: UUID, *, metadata: MetadataDict) -> Pause_Responses: + """ + Pause the Command execution. The Command can then be resumed again. The Command Execution Status of the + Observable Command MUST not be affected. + + + :param CommandExecutionUUID: The Command Execution UUID according to the SiLA Standard. + + :param metadata: The SiLA Client Metadata attached to the call + + """ + pass + + @abstractmethod + def Resume(self, CommandExecutionUUID: UUID, *, metadata: MetadataDict) -> Resume_Responses: + """ + Resume the Command after it has been paused. + + + :param CommandExecutionUUID: The Command Execution UUID according to the SiLA Standard. + + :param metadata: The SiLA Client Metadata attached to the call + + """ + pass diff --git a/laborchestrator/sila_server/generated/pausecontroller/pausecontroller_client.py b/laborchestrator/sila_server/generated/pausecontroller/pausecontroller_client.py new file mode 100755 index 0000000000000000000000000000000000000000..e151a31c8e90998a59cd3ece4380d56b530ef186 --- /dev/null +++ b/laborchestrator/sila_server/generated/pausecontroller/pausecontroller_client.py @@ -0,0 +1,50 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +# ----- +# This class does not do anything useful at runtime. Its only purpose is to provide type annotations. +# Since sphinx does not support .pyi files (yet?), this is a .py file. +# ----- + +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + + from typing import Iterable, List, Optional + + from pausecontroller_types import Pause_Responses, Resume_Responses + from sila2.client import ClientMetadataInstance, ClientObservableProperty + + from .pausecontroller_types import UUID + + +class PauseControllerClient: + """ + Allows to pause or resume a currently running Observable Command. Pausing is the act of stopping the + progress of the desired intent of a Command with the option of continuing the execution when resuming. + + A SiLA Client SHOULD be able to pause or resume the Observable Commands at any time. Not every Observable Command + might support this Feature. If not, an "OperationNotSupported" Execution Error MUST be thrown. + """ + + PausedCommands: ClientObservableProperty[List[UUID]] + """ + A List of Command Execution UUID that are in a paused state. + """ + + def Pause( + self, CommandExecutionUUID: UUID, *, metadata: Optional[Iterable[ClientMetadataInstance]] = None + ) -> Pause_Responses: + """ + Pause the Command execution. The Command can then be resumed again. The Command Execution Status of the + Observable Command MUST not be affected. + """ + ... + + def Resume( + self, CommandExecutionUUID: UUID, *, metadata: Optional[Iterable[ClientMetadataInstance]] = None + ) -> Resume_Responses: + """ + Resume the Command after it has been paused. + """ + ... diff --git a/laborchestrator/sila_server/generated/pausecontroller/pausecontroller_errors.py b/laborchestrator/sila_server/generated/pausecontroller/pausecontroller_errors.py new file mode 100755 index 0000000000000000000000000000000000000000..32e475e8470d712845f34f3c75a61bc8cadc06a5 --- /dev/null +++ b/laborchestrator/sila_server/generated/pausecontroller/pausecontroller_errors.py @@ -0,0 +1,31 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from __future__ import annotations + +from typing import Optional + +from sila2.framework.errors.defined_execution_error import DefinedExecutionError + +from .pausecontroller_feature import PauseControllerFeature + + +class InvalidCommandExecutionUUID(DefinedExecutionError): + def __init__(self, message: Optional[str] = None): + if message is None: + message = "The given Command Execution UUID does not specify a command that is currently being executed." + super().__init__( + PauseControllerFeature.defined_execution_errors["InvalidCommandExecutionUUID"], message=message + ) + + +class InvalidCommandState(DefinedExecutionError): + def __init__(self, message: Optional[str] = None): + if message is None: + message = "The specified command is not in a valid state to perform the operation (Pause or Resume)." + super().__init__(PauseControllerFeature.defined_execution_errors["InvalidCommandState"], message=message) + + +class OperationNotSupported(DefinedExecutionError): + def __init__(self, message: Optional[str] = None): + if message is None: + message = "The operation (Pause or Resume) is not supported for the SiLA 2 command which the\n specified Command Execution UUID belongs to." + super().__init__(PauseControllerFeature.defined_execution_errors["OperationNotSupported"], message=message) diff --git a/laborchestrator/sila_server/generated/pausecontroller/pausecontroller_feature.py b/laborchestrator/sila_server/generated/pausecontroller/pausecontroller_feature.py new file mode 100755 index 0000000000000000000000000000000000000000..a8bc9a922e2bfc1265bc9edeed66202eab77dfa0 --- /dev/null +++ b/laborchestrator/sila_server/generated/pausecontroller/pausecontroller_feature.py @@ -0,0 +1,6 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from os.path import dirname, join + +from sila2.framework import Feature + +PauseControllerFeature = Feature(join(dirname(__file__), "PauseController.sila.xml")) diff --git a/laborchestrator/sila_server/generated/pausecontroller/pausecontroller_types.py b/laborchestrator/sila_server/generated/pausecontroller/pausecontroller_types.py new file mode 100755 index 0000000000000000000000000000000000000000..b8a6c465684ffbd6a919a487fa4154dc79e6433d --- /dev/null +++ b/laborchestrator/sila_server/generated/pausecontroller/pausecontroller_types.py @@ -0,0 +1,17 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from __future__ import annotations + +from typing import NamedTuple + + +class Pause_Responses(NamedTuple): + + pass + + +class Resume_Responses(NamedTuple): + + pass + + +UUID = str diff --git a/laborchestrator/sila_server/generated/simulationcontroller/SimulationController.proto b/laborchestrator/sila_server/generated/simulationcontroller/SimulationController.proto new file mode 100755 index 0000000000000000000000000000000000000000..c5ba1a03082d06e8e0fd47832b16f573bb8f8bee --- /dev/null +++ b/laborchestrator/sila_server/generated/simulationcontroller/SimulationController.proto @@ -0,0 +1,40 @@ +syntax = "proto3"; + +import "SiLAFramework.proto"; + +package sila2.org.silastandard.core.simulationcontroller.v1; + +/* This Feature provides control over the simulation behaviour of a SiLA Server. A SiLA Server can run in two modes: (a) Real Mode - with real activities, e.g. addressing or controlling real hardware, e.g. through serial/CANBus commands, writing to real databases, moving real objects etc. (b) Simulation Mode - where every command is only simulated and responses are just example returns. Note that certain commands and properties might not be affected by this feature if they do not interact with the real world. */ +service SimulationController { + /* Sets the SiLA Server to run in Simulation Mode, i.e. all following commands are executed in simulation mode. The Simulation Mode can only be entered, if all hardware operations have been safely terminated or are in a controlled, safe state. The simulation mode can be stopped by issuing the 'Start Real Mode' command. */ + rpc StartSimulationMode (sila2.org.silastandard.core.simulationcontroller.v1.StartSimulationMode_Parameters) returns (sila2.org.silastandard.core.simulationcontroller.v1.StartSimulationMode_Responses) {} + /* Sets the SiLA Server to run in real mode, i.e. all following commands are executed with real-world interactions, like serial port/CAN communication, motor actions etc. If the server is in Simulation Mode it can be interrupted at any time. A re-initialization of the hardware might be required. The Real Mode can be stopped by issuing the 'Start Simulation Mode' command. */ + rpc StartRealMode (sila2.org.silastandard.core.simulationcontroller.v1.StartRealMode_Parameters) returns (sila2.org.silastandard.core.simulationcontroller.v1.StartRealMode_Responses) {} + /* Indication whether SiLA Server is in Simulation Mode or not. */ + rpc Get_SimulationMode (sila2.org.silastandard.core.simulationcontroller.v1.Get_SimulationMode_Parameters) returns (sila2.org.silastandard.core.simulationcontroller.v1.Get_SimulationMode_Responses) {} +} + +/* Parameters for StartSimulationMode */ +message StartSimulationMode_Parameters { +} + +/* Responses of StartSimulationMode */ +message StartSimulationMode_Responses { +} + +/* Parameters for StartRealMode */ +message StartRealMode_Parameters { +} + +/* Responses of StartRealMode */ +message StartRealMode_Responses { +} + +/* Parameters for SimulationMode */ +message Get_SimulationMode_Parameters { +} + +/* Responses of SimulationMode */ +message Get_SimulationMode_Responses { + sila2.org.silastandard.Boolean SimulationMode = 1; /* Indication whether SiLA Server is in Simulation Mode or not. */ +} diff --git a/laborchestrator/sila_server/generated/simulationcontroller/SimulationController.sila.xml b/laborchestrator/sila_server/generated/simulationcontroller/SimulationController.sila.xml new file mode 100755 index 0000000000000000000000000000000000000000..e77378247f872b937b90b7e99bed92fde5b8ba50 --- /dev/null +++ b/laborchestrator/sila_server/generated/simulationcontroller/SimulationController.sila.xml @@ -0,0 +1,62 @@ + + SimulationController + Simulation Controller + This Feature provides control over the simulation behaviour of a SiLA Server. + + A SiLA Server can run in two modes: + (a) Real Mode - with real activities, e.g. addressing or controlling real hardware, e.g. through serial/CANBus commands, + writing to real databases, moving real objects etc. + (b) Simulation Mode - where every command is only simulated and responses are just example returns. + + Note that certain commands and properties might not be affected by this feature if they + do not interact with the real world. + + StartSimulationMode + Start Simulation Mode + Sets the SiLA Server to run in Simulation Mode, i.e. all following commands are executed in simulation mode. + + The Simulation Mode can only be entered, if all hardware operations have been safely terminated + or are in a controlled, safe state. + + The simulation mode can be stopped by issuing the 'Start Real Mode' command. + No + + StartSimulationModeFailed + + + + StartRealMode + Start Real Mode + Sets the SiLA Server to run in real mode, i.e. all following commands are executed with real-world + interactions, like serial port/CAN communication, motor actions etc. + + If the server is in Simulation Mode it can be interrupted at any time. A re-initialization of + the hardware might be required. The Real Mode can be stopped by issuing the 'Start Simulation Mode' command. + No + + StartRealModeFailed + + + + SimulationMode + SimulationMode + Indication whether SiLA Server is in Simulation Mode or not. + No + + Boolean + + + + StartSimulationModeFailed + The start of Simulation Mode failed. + The server cannot change to Simulation Mode. + This error can, e.g., be thrown, if a real-world process needs to be ended before switching to simulation + mode. + + + StartRealModeFailed + The start of Real Mode failed. + The server cannot change to Real Mode. + This error can, e.g., be thrown, if a device is not ready to change into Real Mode. + + diff --git a/laborchestrator/sila_server/generated/simulationcontroller/__init__.py b/laborchestrator/sila_server/generated/simulationcontroller/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..52c8ded610d92daa205d0a36532b35fd1fc0a790 --- /dev/null +++ b/laborchestrator/sila_server/generated/simulationcontroller/__init__.py @@ -0,0 +1,16 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from .simulationcontroller_base import SimulationControllerBase +from .simulationcontroller_client import SimulationControllerClient +from .simulationcontroller_errors import StartRealModeFailed, StartSimulationModeFailed +from .simulationcontroller_feature import SimulationControllerFeature +from .simulationcontroller_types import StartRealMode_Responses, StartSimulationMode_Responses + +__all__ = [ + "SimulationControllerBase", + "SimulationControllerFeature", + "SimulationControllerClient", + "StartSimulationMode_Responses", + "StartRealMode_Responses", + "StartSimulationModeFailed", + "StartRealModeFailed", +] diff --git a/laborchestrator/sila_server/generated/simulationcontroller/simulationcontroller_base.py b/laborchestrator/sila_server/generated/simulationcontroller/simulationcontroller_base.py new file mode 100755 index 0000000000000000000000000000000000000000..35cd82375a6a6569fa3049e8608879b9d60613eb --- /dev/null +++ b/laborchestrator/sila_server/generated/simulationcontroller/simulationcontroller_base.py @@ -0,0 +1,72 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING + +from sila2.server import FeatureImplementationBase, MetadataDict + +from .simulationcontroller_types import StartRealMode_Responses, StartSimulationMode_Responses + +if TYPE_CHECKING: + + from ...server import Server + + +class SimulationControllerBase(FeatureImplementationBase, ABC): + parent_server: Server + + def __init__(self, parent_server: Server): + """ + This Feature provides control over the simulation behaviour of a SiLA Server. + + A SiLA Server can run in two modes: + (a) Real Mode - with real activities, e.g. addressing or controlling real hardware, e.g. through serial/CANBus commands, + writing to real databases, moving real objects etc. + (b) Simulation Mode - where every command is only simulated and responses are just example returns. + + Note that certain commands and properties might not be affected by this feature if they + do not interact with the real world. + """ + super().__init__(parent_server=parent_server) + + @abstractmethod + def get_SimulationMode(self, *, metadata: MetadataDict) -> bool: + """ + Indication whether SiLA Server is in Simulation Mode or not. + + :param metadata: The SiLA Client Metadata attached to the call + :return: Indication whether SiLA Server is in Simulation Mode or not. + """ + pass + + @abstractmethod + def StartSimulationMode(self, *, metadata: MetadataDict) -> StartSimulationMode_Responses: + """ + Sets the SiLA Server to run in Simulation Mode, i.e. all following commands are executed in simulation mode. + + The Simulation Mode can only be entered, if all hardware operations have been safely terminated + or are in a controlled, safe state. + + The simulation mode can be stopped by issuing the 'Start Real Mode' command. + + + :param metadata: The SiLA Client Metadata attached to the call + + """ + pass + + @abstractmethod + def StartRealMode(self, *, metadata: MetadataDict) -> StartRealMode_Responses: + """ + Sets the SiLA Server to run in real mode, i.e. all following commands are executed with real-world + interactions, like serial port/CAN communication, motor actions etc. + + If the server is in Simulation Mode it can be interrupted at any time. A re-initialization of + the hardware might be required. The Real Mode can be stopped by issuing the 'Start Simulation Mode' command. + + + :param metadata: The SiLA Client Metadata attached to the call + + """ + pass diff --git a/laborchestrator/sila_server/generated/simulationcontroller/simulationcontroller_client.py b/laborchestrator/sila_server/generated/simulationcontroller/simulationcontroller_client.py new file mode 100755 index 0000000000000000000000000000000000000000..03f5af46693b4723da733c4a8107d87d26774eaf --- /dev/null +++ b/laborchestrator/sila_server/generated/simulationcontroller/simulationcontroller_client.py @@ -0,0 +1,58 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +# ----- +# This class does not do anything useful at runtime. Its only purpose is to provide type annotations. +# Since sphinx does not support .pyi files (yet?), this is a .py file. +# ----- + +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + + from typing import Iterable, Optional + + from sila2.client import ClientMetadataInstance, ClientUnobservableProperty + from simulationcontroller_types import StartRealMode_Responses, StartSimulationMode_Responses + + +class SimulationControllerClient: + """ + This Feature provides control over the simulation behaviour of a SiLA Server. + + A SiLA Server can run in two modes: + (a) Real Mode - with real activities, e.g. addressing or controlling real hardware, e.g. through serial/CANBus commands, + writing to real databases, moving real objects etc. + (b) Simulation Mode - where every command is only simulated and responses are just example returns. + + Note that certain commands and properties might not be affected by this feature if they + do not interact with the real world. + """ + + SimulationMode: ClientUnobservableProperty[bool] + """ + Indication whether SiLA Server is in Simulation Mode or not. + """ + + def StartSimulationMode( + self, *, metadata: Optional[Iterable[ClientMetadataInstance]] = None + ) -> StartSimulationMode_Responses: + """ + Sets the SiLA Server to run in Simulation Mode, i.e. all following commands are executed in simulation mode. + + The Simulation Mode can only be entered, if all hardware operations have been safely terminated + or are in a controlled, safe state. + + The simulation mode can be stopped by issuing the 'Start Real Mode' command. + """ + ... + + def StartRealMode(self, *, metadata: Optional[Iterable[ClientMetadataInstance]] = None) -> StartRealMode_Responses: + """ + Sets the SiLA Server to run in real mode, i.e. all following commands are executed with real-world + interactions, like serial port/CAN communication, motor actions etc. + + If the server is in Simulation Mode it can be interrupted at any time. A re-initialization of + the hardware might be required. The Real Mode can be stopped by issuing the 'Start Simulation Mode' command. + """ + ... diff --git a/laborchestrator/sila_server/generated/simulationcontroller/simulationcontroller_errors.py b/laborchestrator/sila_server/generated/simulationcontroller/simulationcontroller_errors.py new file mode 100755 index 0000000000000000000000000000000000000000..062e98ed1e7b5d51e21b1232aa99e994f5281cc5 --- /dev/null +++ b/laborchestrator/sila_server/generated/simulationcontroller/simulationcontroller_errors.py @@ -0,0 +1,24 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from __future__ import annotations + +from typing import Optional + +from sila2.framework.errors.defined_execution_error import DefinedExecutionError + +from .simulationcontroller_feature import SimulationControllerFeature + + +class StartSimulationModeFailed(DefinedExecutionError): + def __init__(self, message: Optional[str] = None): + if message is None: + message = "The server cannot change to Simulation Mode.\n This error can, e.g., be thrown, if a real-world process needs to be ended before switching to simulation\n mode." + super().__init__( + SimulationControllerFeature.defined_execution_errors["StartSimulationModeFailed"], message=message + ) + + +class StartRealModeFailed(DefinedExecutionError): + def __init__(self, message: Optional[str] = None): + if message is None: + message = "The server cannot change to Real Mode.\n This error can, e.g., be thrown, if a device is not ready to change into Real Mode." + super().__init__(SimulationControllerFeature.defined_execution_errors["StartRealModeFailed"], message=message) diff --git a/laborchestrator/sila_server/generated/simulationcontroller/simulationcontroller_feature.py b/laborchestrator/sila_server/generated/simulationcontroller/simulationcontroller_feature.py new file mode 100755 index 0000000000000000000000000000000000000000..12205153c19a462b867640976c5e5ab8d5d35b35 --- /dev/null +++ b/laborchestrator/sila_server/generated/simulationcontroller/simulationcontroller_feature.py @@ -0,0 +1,6 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from os.path import dirname, join + +from sila2.framework import Feature + +SimulationControllerFeature = Feature(join(dirname(__file__), "SimulationController.sila.xml")) diff --git a/laborchestrator/sila_server/generated/simulationcontroller/simulationcontroller_types.py b/laborchestrator/sila_server/generated/simulationcontroller/simulationcontroller_types.py new file mode 100755 index 0000000000000000000000000000000000000000..f77c0e40fe8e6461b18882bc6749509ae4b7f172 --- /dev/null +++ b/laborchestrator/sila_server/generated/simulationcontroller/simulationcontroller_types.py @@ -0,0 +1,14 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 +from __future__ import annotations + +from typing import NamedTuple + + +class StartSimulationMode_Responses(NamedTuple): + + pass + + +class StartRealMode_Responses(NamedTuple): + + pass diff --git a/laborchestrator/sila_server/py.typed b/laborchestrator/sila_server/py.typed new file mode 100755 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/laborchestrator/sila_server/server.py b/laborchestrator/sila_server/server.py new file mode 100755 index 0000000000000000000000000000000000000000..8b2feb988fa64a30643d5cd76d22eda68c8195d4 --- /dev/null +++ b/laborchestrator/sila_server/server.py @@ -0,0 +1,41 @@ +# Generated by sila2.code_generator; sila2.__version__: 0.10.3 + +from typing import Optional +from uuid import UUID + +from sila2.server import SilaServer + +from .feature_implementations.cancelcontroller_impl import CancelControllerImpl +from .feature_implementations.laborchestratorservice_impl import LabOrchestratorServiceImpl +from .feature_implementations.pausecontroller_impl import PauseControllerImpl +from .feature_implementations.simulationcontroller_impl import SimulationControllerImpl +from .generated.cancelcontroller import CancelControllerFeature +from .generated.laborchestratorservice import LabOrchestratorServiceFeature +from .generated.pausecontroller import PauseControllerFeature +from .generated.simulationcontroller import SimulationControllerFeature +from laborchestrator.orchestrator_interface import OrchestratorInterface + + +class Server(SilaServer): + def __init__(self, orchestrator: OrchestratorInterface, server_uuid: Optional[UUID] = None): + super().__init__( + server_name="Orchestrator", + server_type="PythonLabOrchestratorServer", + server_version="0.1", + server_description="Use this to control a running pythonlaborchestrator", + server_vendor_url="https://gitlab.com/SiLA2/sila_python", + server_uuid=server_uuid, + ) + self.orchestrator = orchestrator + + self.cancelcontroller = CancelControllerImpl(self) + self.set_feature_implementation(CancelControllerFeature, self.cancelcontroller) + + self.laborchestratorservice = LabOrchestratorServiceImpl(self) + self.set_feature_implementation(LabOrchestratorServiceFeature, self.laborchestratorservice) + + self.pausecontroller = PauseControllerImpl(self) + self.set_feature_implementation(PauseControllerFeature, self.pausecontroller) + + self.simulationcontroller = SimulationControllerImpl(self) + self.set_feature_implementation(SimulationControllerFeature, self.simulationcontroller) diff --git a/laborchestrator/start_script.py b/laborchestrator/start_script.py new file mode 100755 index 0000000000000000000000000000000000000000..a94ebeea9504e1aeec8a9f09ca342450651288b3 --- /dev/null +++ b/laborchestrator/start_script.py @@ -0,0 +1,27 @@ +from laborchestrator.old_dash_app import SMDashApp +from laborchestrator.orchestrator_implementation import Orchestrator +import time +from os import path +from laborchestrator.logging_manager import StandardLogger as Logger + + +# create orchestrator +orchestrator = Orchestrator() +# try to find a running scheduler server and set its lab configuration to the default +try: + from pythonlabscheduler.sila_server.generated.client import Client as SchedulerClient + scheduler = SchedulerClient.discover(insecure=True, timeout=5) + # get the absolute filepath + here = path.abspath(__file__) + config_file = path.join('..', "tests", "test_data", "sila_server_config_changed.yml") + #scheduler.LabConfigurationController.LoadJobShopFromFile(config_file) + Logger.info("Configured the lab of the scheduling service") +except: + Logger.warning("Could not find a running scheduler server. You will have to configure the lab manually.") + +# start the dash app +dash_app = SMDashApp(orchestrator) +dash_app.run() + +while True: + time.sleep(1) diff --git a/laborchestrator/structures.py b/laborchestrator/structures.py new file mode 100755 index 0000000000000000000000000000000000000000..9828f3c537391a142f66e37145cd9e95c6ad941a --- /dev/null +++ b/laborchestrator/structures.py @@ -0,0 +1,584 @@ +""" +Here we define the structures of a process, a scheduling instance etc. +""" +import traceback +from dataclasses import dataclass, field +from typing import List, Dict, Any, Union, Callable, Optional +from enum import IntEnum +from copy import deepcopy +from datetime import datetime, timedelta +from laborchestrator.orchestrator_interface import ProcessExecutionState +from laborchestrator.logging_manager import StandardLogger as Logger +import networkx as nx +import pandas as pd +import plotly.express as px +from graphviz import Digraph + + +class StepStatus(IntEnum): + WAITING = 0 + RUNNING = 1 + FINISHED = 2 + ERROR = 3 + + +@dataclass +class UsedDevice: + device_type: Any + name: Optional[str] = None # is set when a specific device is assigned + preferred: Optional[str] = None # optional with for a specific device + tag: str = "" # convenient field to add information like main, source, target, additional, etc. + + def __str__(self): + return f"{self.tag}|{self.device_type}|{self.name}|{self.preferred}" + + +@dataclass +class ContainerInfo: + """Editable data collection for containers going through a lab process.""" + name: str + current_device: str + current_pos: int + start_device: UsedDevice + filled: bool = True # this is safer, since filled container are handled more carefully + content: str = "" + barcode: Optional[str] = None + finished: bool = False + lidded: bool = False + lid_site: Any = None + in_error_state: bool = False + is_reagent: bool = False + + +@dataclass +class ProcessStep: + """Editable data collection for a general operations in a lab process.""" + name: str # important for referencing. Should be the same in the workflow graph. + cont_names: List[str] # the first one should be the main container (if there is such) + function: str # todo: change this to some kind of enum + duration: float # duration of the operation in seconds + process_name: str = "" # unique (intern, human-readable) name of the experiment this job belongs to + status: StepStatus = StepStatus.WAITING + start: Union[datetime, None] = None + finish: Union[datetime, None] = None + prior: List[str] = field(default_factory=list) # list of the names of operations prior to this + used_devices: List[UsedDevice] = field(default_factory=list) # containers to store information on used devices + label: str = 'LABEL' # use only for visualization + wait_cost: Dict[str, float] = field(default_factory=dict) # waiting costs after prior operations(linked by name) + max_wait: Dict[str, int] = field(default_factory=dict) # maximum waiting times after prior operations + min_wait: Dict[str, int] = field(default_factory=dict) # minimum waiting times after prior operations + wait_to_start_costs: float = 0 + is_start: bool = False # flag whether this job has no other jobs that have to be finished prior + opacity: float = 1 # the reduces if this job is behind an if-statement + result: Any = None # todo sollte eine uuid zur datenbank werden + data: Dict = field(default_factory=dict) # custom data like duration, speed, etc. + + @property + def cont(self): + if not self.cont_names: + return None + return self.cont_names[0] + + def priors_done(self, job_by_name: Dict): + """Checks whether all prerequisite operations are finished""" + return all([job_by_name[name].status == StepStatus.FINISHED for name in self.prior]) + + @property + def main_device(self) -> Optional[UsedDevice]: + for used_device in self.used_devices: + if used_device.tag == "main": + return used_device + return None + + +@dataclass +class MoveStep(ProcessStep): + """Operation resembling a movement of a container""" + pref_dest_pos: Optional[int] = None # optional preferences for the destination slot number + destination_pos: int = 0 # actual destination slot number (set at execution runtime) + origin_pos: int = 0 # this should not be relevant for the execution but might be nice for logging + + @property + def origin_device(self) -> Optional[UsedDevice]: + for used_device in self.used_devices: + if used_device.tag == "origin": + return used_device + return None + + @property + def target_device(self) -> Optional[UsedDevice]: + for used_device in self.used_devices: + if used_device.tag == "target": + return used_device + return None + + +@dataclass +class Variable: + """Representing a variable node of the workflow graph""" + name: str # to index the node in the implicit workflow graph + var_name: str + result: Any = None + prior: List[str] = field(default_factory=list) + status: StepStatus = StepStatus.WAITING + opacity: float = 1 # the reduces if this variable is behind an if-statement + + +@dataclass +class IfNode: + """Represents a constraint in the workflow graph""" + name: str + evaluation: Callable[[Dict[str, Any]], bool] # takes **kwargs and outputs the decision of the node + decision: Union[bool, None] = None + prior: List[str] = field(default_factory=list) # indices of the variable nodes needed for a decision + true_tree: List[str] = field(default_factory=list) # indices of nodes to be executed (directly) after positive decision + false_tree: List[str] = field(default_factory=list) # indices of nodes not to be executed (directly) after positive decision + status: StepStatus = StepStatus.WAITING + opacity: float = 1 # the reduces if this node is behind an if-statement + + +@dataclass +class Computation: + name: str + var_name: str + evaluation: Callable[[Dict[str, Any]], Any] # takes **kwargs and outputs the computation result + prior: List[str] = field(default_factory=list) # indices of the variable nodes needed for computation + result: Any = None + status: StepStatus = StepStatus.WAITING + opacity: float = 1 # the reduces if this computation is behind an if-statement + + +@dataclass +class ScheduledAssignment: + start: datetime # scheduled start of the job + # List of jobs scheduled on the same machine, that have to finish prior + machine_prior: List[str] = field(default_factory=list) + # other participating are assigned by their tag in used_devices. e.g.: 'target'->'Carousel' + participating: Dict[str, str] = field(default_factory=dict) + + @property + def device(self) -> Optional[str]: + """ + :return: name of the device this job is scheduled on + """ + if 'main' not in self.participating: + return None + return self.participating['main'] + + +# a schedule is an assignment of start and device to each job +class Schedule(Dict[str, ScheduledAssignment]): + def __str__(self): + return "\n".join(f"{idx} --> {assign}" for idx, assign in self.items()) + + +# all kinds of nodes except for container nodes +Operable = Union[ProcessStep, Variable, IfNode, Computation] + +node_col = dict(container='grey', variable='blue', operation='red', if_node='orange', computation="lightgrey", dummy="cyan") +node_col2 = {ContainerInfo: 'grey', Variable: 'blue', IfNode: 'orange', Computation: "lightgrey", + ProcessStep: 'red', MoveStep: 'red'} +job_col = {StepStatus.WAITING: 'red', StepStatus.RUNNING: 'yellow', StepStatus.FINISHED: 'green', StepStatus.ERROR: 'pink'} + + +ProcessInfo = str + + +class SMProcess: + """ + Encapsulates the information about an experimental process (as workflow graph) and provides utility functions. + This structure is independent of the process description language + """ + name: str + experiment_uuid: str + # earliest possible starting time. Allows to schedule processes for a delayed start + min_start: datetime | None + _status: ProcessExecutionState + steps: List[ProcessStep] + containers: List[ContainerInfo] + variables: List[Variable] + if_nodes: List[IfNode] + computations: List[Computation] + + def __init__(self, name: str, priority: int = 0): + self.steps = [] + self.containers = [] + self.variables = [] + self.if_nodes = [] + self.computations = [] + self.priority = priority + self.name = name + self.experiment_uuid = name + self._status = ProcessExecutionState.IDLE + self.min_start = None + + def update_reagent_opacity(self): + """ + This method sets the opacity of all operations of reagents + :return: + """ + g = self.wfg + step_by_id = {j.name: j for j in self.steps} + for cont in self.containers: + if cont.is_reagent: + name = cont.name + for buff, start_job in g.out_edges(name): + # get all descendants + reachable = nx.descendants(g, start_job) + # filter the ones, that use this reagent but do also involve other containers + needed_for = [step_by_id[idx] for idx in reachable + if idx in step_by_id and + name in step_by_id[idx].cont_names and + len(step_by_id[idx].cont_names) > 1] + max_opacity = .5 if len(needed_for) == 0 else max(job.opacity for job in needed_for) + reachable.add(start_job) + for reagent_usage in reachable: + if reagent_usage in step_by_id: + step = step_by_id[reagent_usage] + # we iterate over all jobs, that only use this reagent + if step.cont == name: + step.opacity = max_opacity + if not step.opacity == max_opacity: + Logger.warning(f"setting opacity of {step.name} to {max_opacity}") + + def get_info(self) -> ProcessInfo: + return self.name + + @property + def status(self): + """ + This automatically sets the status to FINISHED if all steps are finished + :return: + """ + if all(step.status == StepStatus.FINISHED for step in self.steps): + self._status = ProcessExecutionState.FINISHED + return self._status + + @status.setter + def status(self, status): + self._status = status + + @property + def wfg(self) -> nx.DiGraph: + """ + Having the workflow as nx.DiGraph is very useful, but saving it in memory means a second 'source of truth'. + So, we create it on the fly. + :return: the whole workflow as nx.DiGraph + """ + g = nx.DiGraph() + operable = self.steps + self.if_nodes + self.variables + self.computations + # sort out those with opacity 0 + all_nodes = self.containers + [n for n in operable if n.opacity > 0] + type_tag = {ProcessStep: 'operation', Computation: 'computation', IfNode: 'if_node', Variable: 'variable', + ContainerInfo: 'container', MoveStep: 'operation'} + node_data = [(n.name, dict(type=type_tag[type(n)], + opacity=n.opacity if hasattr(n, 'opacity') else 1)) + for n in all_nodes] + g.add_nodes_from(node_data) + edge_data = [] + for n in all_nodes: + if hasattr(n, 'prior'): + for prior in n.prior: + edge_data.append((prior, n.name, dict())) + for job in self.steps: + if job.is_start: + for cont in job.cont_names: + edge_data.append((cont, job.name, dict())) + g.add_edges_from(edge_data) + return g + + @property + def starting_nodes(self) -> list[ProcessStep]: + starts = [step for step in self.steps if step.is_start] + return starts + + +class SchedulingInstance: + step_by_id: Dict[str, ProcessStep] + container_info_by_name: Dict[str, ContainerInfo] + if_node_by_id: Dict[str, IfNode] + var_by_id: Dict[str, Variable] + computation_by_id: Dict[str, Computation] + # here all operations are saved, that got removed f.e. due to runtime decisions + deleted_operable: Dict[str, Operable] + + process_by_name: Dict[str, SMProcess] + # processes can be stopped and continued + schedule: Schedule + + def __init__(self): + self.container_info_by_name = {} + self.process_by_name = {} + self.step_by_id = {} + self.if_node_by_id = {} + self.var_by_id = {} + self.computation_by_id = {} + self.deleted_operable = {} + self.schedule = Schedule() + self.future = [] + + def add_process(self, process: SMProcess): + if process.name in self.process_by_name: + logger.Error(f"Error: process names {process.name} already exists") + if not self.unique_job_names(process): + Logger.error(f"Error: Some operation name occurs twice") + # link process, operation and containers + self.process_by_name[process.name] = process + for job in process.steps: + self.step_by_id[job.name] = job + job.process_name = process.name + for cont in process.containers: + self.container_info_by_name[cont.name] = cont + for var in process.variables: + self.var_by_id[var.name] = var + for comp in process.computations: + self.computation_by_id[comp.name] = comp + for if_node in process.if_nodes: + self.if_node_by_id[if_node.name] = if_node + + def remove_process(self, process_id): + """ + Purges the process and its content from all dictionaries + :param process_id: + :return: + """ + Logger.info(f"removing process {process_id}") + if process_id not in self.process_by_name: + Logger.error(f"There is no process with id {process_id}") + return + p = self.process_by_name[process_id] + for job in p.steps: + self.step_by_id.pop(job.name) + for cont in p.containers: + self.container_info_by_name.pop(cont.name) + for var in p.variables: + self.var_by_id.pop(var.name) + for comp in p.computations: + self.computation_by_id.pop(comp.name) + for if_node in p.if_nodes: + self.if_node_by_id.pop(if_node.name) + self.process_by_name.pop(process_id) + + def set_schedule(self, schedule: Schedule): + """ + saves the schedule and writes its information into all affected job info + :param schedule: + :return: + """ + J = self.step_by_id + self.schedule = schedule + for idx, assign in schedule.items(): + J[idx].main_device.name = assign.device + # set the names of other participating devices + for used_device in J[idx].used_devices: + if used_device.tag in assign.participating: + used_device.name = assign.participating[used_device.tag] + + def start_process(self, process_id): + if process_id not in self.process_by_name: + Logger.error(f"There is no process named {process_id}") + else: + self.process_by_name[process_id].status = ProcessExecutionState.RUNNING + + def stop_process(self, process_id): + if process_id not in self.process_by_name: + Logger.error(f"There is no process named {process_id}") + else: + process = self.process_by_name[process_id] + if process.status == ProcessExecutionState.RUNNING: + process.status = ProcessExecutionState.PAUSED + + def unique_job_names(self, process: SMProcess): + """Checks whether none of the processes operations names is already given to an operation.""" + return True + + def naming_consistent(self, process: SMProcess): + """Check whether the names of the jobs correspond to the indices in the workflow graph.""" + return True + + def update_reagent_opacity(self): + """ + This method sets the opacity of all operations of reagent + :return: + """ + for p in self.process_by_name.values(): + p.update_reagent_opacity() + + def visualize_wfg(self): + dot = Digraph(comment="Workflow") + dot.attr(rankdir='LR') + g = self.combined_wfg + try: + # some default_values + for n, data in g.nodes(data=True): + data['name'] = n + data['color'] = 'cyan' + data['style'] = 'filled' + for u, v, data in g.edges(data=True): + data['label'] = '' + data['c'] = 0 + data['w'] = 'inf' + # customize the labels and colors + for idx, op in self.operable.items(): + #g.nodes[idx]['style'] = 'filled'if op.opacity == 1 else 'striped' + g.nodes[idx]['style'] = 'filled' if idx in self.future else 'striped' + g.nodes[idx]['color'] = node_col2[type(op)] + for idx, job in self.step_by_id.items(): + g.nodes[idx]['color'] = job_col[job.status] + g.nodes[idx]['name'] = job.function + for idx_o in job.prior: + g.edges[idx_o, idx]['c'] = round(job.wait_cost[idx_o]) + g.edges[idx_o, idx]['w'] = job.max_wait[idx_o] + for idx, if_node in self.if_node_by_id.items(): + if if_node.status == StepStatus.FINISHED: + g.nodes[idx]['name'] += f"\nis {if_node.decision}" + else: + for if_true in if_node.true_tree: + g.edges[idx, if_true]['label'] = "true " + for if_true in if_node.false_tree: + g.edges[idx, if_true]['label'] = "false " + for idx, var in self.var_by_id.items(): + if var.status == StepStatus.FINISHED: + g.nodes[idx]['name'] += f"\n= {var.result}"[:15] + for idx, cont in self.container_info_by_name.items(): + g.nodes[idx]['color'] = node_col2[ContainerInfo] + g.nodes[idx]['name'] += f"\nBC={cont.barcode}" + for idx, comp in self.computation_by_id.items(): + if comp.status == StepStatus.FINISHED: + g.nodes[idx]['name'] += f"\n= {comp.result}"[:15] + # create the dot graph + for n, data in g.nodes(data=True): + dot.node(str(n), data['name'], color=data['color'], style=data['style']) + for u, v, data in g.edges(data=True): + dot.edge(str(u), str(v), f"{data['label']}c={data['c']}, w={data['w']}") + except Exception as ex: + Logger.debug(ex, traceback.print_exc()) + return None + dot.format = "png" + return dot + + def schedule_violated(self, tolerance: float = 10) -> bool: + tolerance = timedelta(seconds=tolerance) + for job_id, scheduled_start in self.schedule.items(): + # ignore removed jobs + if job_id in self.step_by_id: + job = self.step_by_id[job_id] + if job.status == StepStatus.WAITING and scheduled_start.start+tolerance < datetime.today(): + return True + if job.status == StepStatus.RUNNING and scheduled_start.start+tolerance < job.start: + return True + return False + + def gannt_chart(self): + data = [] + color_discrete_map = dict(Cytomat1550_1='red', Cytomat1550_2='crimson', VarioskanLUX="green", + F5="blue", Rotanta_Transfer="violet", NOW="black") + # collect containers with scheduled jobs + relevant_cont_names = set() + for idx, start in self.schedule.items(): + job = self.step_by_id[idx] + if job.main_device.name is None: + job.main_device.name = "F5" + for cont_name in job.cont_names: + cont_info = self.container_info_by_name[cont_name] + if cont_info.barcode: + name = f"BC_({cont_info.barcode})" + else: + name = f"{cont_name}(BC unknown)" + relevant_cont_names.add(name) + data.append(dict( + name=name, + start=start.start, + finish=start.start+timedelta(seconds=job.duration), + device=job.main_device.name, + hover_name=idx, + )) + # there might be jobs without a container involved + if not job.cont_names: + relevant_cont_names.add("No Container") + data.append(dict( + name=f"No Container", + start=start.start, + finish=start.start+timedelta(seconds=job.duration), + device=job.main_device.name, + hover_name=idx, + )) + + if self.running_processes_names or len(self.schedule) == 0: + for name in relevant_cont_names: + now = datetime.today() + width = timedelta(seconds=1) + data.append(dict(name=name, start=now, finish=now+width, device="NOW")) + df = pd.DataFrame(data) + fig = px.timeline(df, x_start="start", x_end="finish", y="name", color="device", + color_discrete_map=color_discrete_map, hover_name='hover_name') + fig.update_yaxes(autorange="reversed") # otherwise tasks are listed from the bottom up + return fig + + def remove_operable(self, idx: str): + """ + Deletes a process step, variable, computation or decision from the structure including all references. + """ + if idx not in self.operable: + Logger.error(f"Node {idx} is not active node in this JSSP") + op = self.operable[idx] + self.deleted_operable[idx] = op + # delete the reference from wherever it belonged to + for d in [self.step_by_id, self.var_by_id, self.computation_by_id, self.if_node_by_id]: + if idx in d: + d.pop(idx) + # remove it from the process as well + for p in self.process_by_name.values(): + for l in [p.containers, p.if_nodes, p.steps, p.variables, p.computations]: + if op in l: + l.remove(op) + + @property + def running_processes_names(self) -> List[str]: + """ :return a list of the names of all running processes""" + running = [name for name, p in self.process_by_name.items() if + p.status == ProcessExecutionState.RUNNING] + return running + + @property + def process_stati_by_name(self) -> Dict[str, ProcessExecutionState]: + return {name: p.status for name, p in self.process_by_name.items()} + + @property + def combined_wfg(self) -> nx.DiGraph: + g = nx.DiGraph() + for p in self.process_by_name.values(): + g = nx.compose(g, p.wfg) + return g + + @property + def definite_step_by_id(self) -> Dict[str, ProcessStep]: + # mark all jobs, that shall be excluded due to errors + conflict = set() + for cont_name, cont in self.container_info_by_name.items(): + if cont.in_error_state: + for idx, step in self.step_by_id.items(): + if cont_name in step.cont_names: + # adds any step that includes a container in error-state + conflict.add(idx) + # todo make this a consistent wfg + return {idx: job for idx, job in self.step_by_id.items() if job.opacity == 1 and idx not in conflict} + + @property + def definite_if_node_by_id(self): + return {idx: node for idx, node in self.if_node_by_id.items() if node.opacity == 1} + + @property + def definite_var_by_id(self): + return {idx: var for idx, var in self.var_by_id.items() if var.opacity == 1} + + @property + def definite_computation_by_id(self): + return {idx: computation for idx, computation in self.computation_by_id.items() if computation.opacity == 1} + + @property + def operable(self) -> Dict[str, Operable]: + res = {} + for d in [self.if_node_by_id, self.var_by_id, self.computation_by_id, self.step_by_id]: + res.update(d) + return res diff --git a/laborchestrator/traffic_light.py b/laborchestrator/traffic_light.py new file mode 100644 index 0000000000000000000000000000000000000000..d1e0723f3105184008a9c278b8a6ff461db54b92 --- /dev/null +++ b/laborchestrator/traffic_light.py @@ -0,0 +1,162 @@ +from dash_extensions.enrich import html, dcc, Output, Input, State + + +def TrafficLight(component_id, size=60): + """ + Creates a reusable traffic light component with adjustable size. + + Args: + component_id (str): The unique identifier for the component. + size (int): The size of the traffic lights (width and height in pixels). + + Returns: + html.Div: A Dash component representing the traffic light. + """ + size_px = f"{size}px" + font_size = round(16*size/60) + label_style = dict(color="white", whiteSpace="nowrap", fontSize=font_size) + + return html.Div( + id=component_id, + style={ + "width": f"{size * 4}px", # Adjust container width based on size + "backgroundColor": "black", + "borderRadius": "10px", + "padding": "10px", + "display": "flex", + "flexDirection": "column", + "alignItems": "flex-start", + }, + children=[ + dcc.Store(id=f"{component_id}-active", data=0), # Store to track active state + # Green Light with Label + html.Div( + style={"display": "flex", "alignItems": "center", "marginBottom": "10px"}, + children=[ + html.Div( + id=f"{component_id}-green-light", + style={ + "width": size_px, + "height": size_px, + "backgroundColor": "gray", + "borderRadius": "50%", + "marginRight": "10px", + }, + ), + html.Div("Schedule Optimal", style=label_style), + ], + ), + # Yellow Light with Label + html.Div( + style={"display": "flex", "alignItems": "center", "marginBottom": "10px"}, + children=[ + html.Div( + id=f"{component_id}-yellow-light", + style={ + "width": size_px, + "height": size_px, + "backgroundColor": "gray", + "borderRadius": "50%", + "marginRight": "10px", + }, + ), + html.Div("Schedule Feasible", style=label_style), + ], + ), + # Orange Light with Label + html.Div( + style={"display": "flex", "alignItems": "center", "marginBottom": "10px"}, + children=[ + html.Div( + id=f"{component_id}-orange-light", + style={ + "width": size_px, + "height": size_px, + "backgroundColor": "gray", + "borderRadius": "50%", + "marginRight": "10px", + }, + ), + html.Div( + ["Schedule Executable ", html.Br(), "(but Infeasible)"], + style=label_style, + ), + ], + ), + # Red Light with Label + html.Div( + style={"display": "flex", "alignItems": "center"}, + children=[ + html.Div( + id=f"{component_id}-red-light", + style={ + "width": size_px, + "height": size_px, + "backgroundColor": "gray", + "borderRadius": "50%", + "marginRight": "10px", + }, + ), + html.Div("No Schedule Found", style=label_style), + ], + ), + ], + ) + + + +# Callback to update traffic lights +def register_traffic_light_callbacks(app, component_id): + """ + Registers the callback for the traffic light component. + + Args: + app (Dash): The Dash app instance. + component_id (str): The unique identifier of the traffic light. + """ + + @app.callback( + [ + Output(f"{component_id}-green-light", "style"), + Output(f"{component_id}-yellow-light", "style"), + Output(f"{component_id}-orange-light", "style"), + Output(f"{component_id}-red-light", "style"), + ], + [ + Input(f"{component_id}-active", "data") + ], + [ + State(f"{component_id}-green-light", "style"), + State(f"{component_id}-yellow-light", "style"), + State(f"{component_id}-orange-light", "style"), + State(f"{component_id}-red-light", "style"), + ] + ) + def update_traffic_light(active, green_style, yellow_style, orange_style, red_style): + """ + Updates the traffic light's active light based on the `active` property, + while preserving existing style entries. + + Args: + active (int): The index of the active light (0 for green, 1 for yellow, 2 for orange, 3 for red). + green_style, yellow_style, orange_style, red_style (dict): Current styles of each light. + + Returns: + tuple: Updated styles for each light. + """ + + def update_style(current_style, color, is_active): + # Create a new dictionary to preserve existing styles while updating relevant keys + updated_style = current_style.copy() # Copy the current style + updated_style.update({ + "backgroundColor": color if is_active else "gray", + "boxShadow": f"0 0 20px {color}" if is_active else "", + }) + return updated_style + + return ( + update_style(green_style or {}, "green", active == 0), + update_style(yellow_style or {}, "yellow", active == 1), + update_style(orange_style or {}, "orange", active == 2), + update_style(red_style or {}, "red", active == 3), + ) diff --git a/laborchestrator/workflowgraph.py b/laborchestrator/workflowgraph.py new file mode 100755 index 0000000000000000000000000000000000000000..4a91c5c54d23aef9b7b55fe702c1f26990af616d --- /dev/null +++ b/laborchestrator/workflowgraph.py @@ -0,0 +1,125 @@ +""" +This is a wrapper for a networkx workflow graph. +It adds some convenient functionalities +""" +import traceback +import networkx as nx +from laborchestrator.logging_manager import StandardLogger as Logger +from laborchestrator.structures import Schedule, ScheduledAssignment, ProcessStep, SMProcess +from datetime import datetime +from typing import Optional, List, Dict, NamedTuple, Iterable + + +class Resource(NamedTuple): + Type: str + Tag: str + Preferred: str + + +class Node(NamedTuple): + Idx: str + Duration: float + RequiredResources: List[Resource] # f.e. [('StorageResource', 'origin', 'Carousel'), + # ('MoverResource', 'main', 'Mover'), + # ('IncubationResource', 'target', 'Incubator2')] + StartTime: str + Finish: str + WaitToStartCost: Optional[float] = None + + +class Edge(NamedTuple): + Head: str + Tail: str + WaitCost: Optional[float] = None + MaxWaitingTime: Optional[float] = None + MinWaitingTime: Optional[float] = None + + +class Graph(NamedTuple): + Nodes: List[Node] + Edges: List[Edge] + + +class Assignment: + start: datetime + machine_assignments: Dict[str, str] # f.e. {origin: Carousel, main: Mover, target: Incubator1} + machine_prior: List[str] # additional precedence constraints + + +class WorkFlowGraph: + @staticmethod + def create_sila_structure_from_jobs(jobs: Iterable[ProcessStep], wfg: nx.DiGraph): + g = Graph([], []) + job_by_name = {j.name: j for j in jobs} + for j in jobs: + requirements = [] + for d in j.used_devices: + try: + type_str = d.device_type.__name__ + except Exception as ex: + type_str = str(d.device_type) + # if the step is finished no other device should be scheduled on it + if j.start: + preference = d.name + else: + preference = d.preferred + requirements.append( + Resource(Type=type_str, Tag=d.tag, Preferred=str(preference)) + ) + g.Nodes.append(Node(j.name, j.duration, requirements, str(j.start), str(j.finish), j.wait_to_start_costs)) + + connected = set() # avoid double edges + for prior in j.prior: + priors = [prior] + # we also add all edges of precedences that are induced by non-step-nodes + # therefore search for all steps that are connect that way + while priors: + p = priors.pop() + if p in job_by_name: + if p not in connected: + min_wait = j.min_wait[prior] if prior in j.min_wait else None + g.Edges.append(Edge(j.name, p, j.wait_cost[prior], j.max_wait[prior], min_wait)) + connected.add(p) + else: + priors.extend([idx for idx in wfg.predecessors(p) + # being connected through steps that might never happen does not count + # if not (idx in job_by_name and job_by_name[idx].opacity < 1)]) + if not wfg.nodes[idx]['opacity'] < 1]) + return g + + @staticmethod + def add_waiting_dummies(g: Graph, processes: list[SMProcess]): + dummy_machine = Resource("Dummy", "main", "DummyDump") + for process in processes: + if not process.min_start or process.min_start < datetime.now(): + continue + waiting_left = process.min_start - datetime.now() + # create a dummy_task + dummy = Node(Idx=f"dummy_{process.name}", Duration=10, RequiredResources=[dummy_machine], + StartTime="None", Finish="None") + g.Nodes.append(dummy) + # add waiting constraints to this dummy task + for start_step in process.starting_nodes: + dummy_edge = Edge(Head=start_step.name, Tail=dummy.Idx, MinWaitingTime=waiting_left.total_seconds(), + MaxWaitingTime=float('inf')) + g.Edges.append(dummy_edge) + Logger.info(f"Process {process.name} has {waiting_left} to wait") + + @staticmethod + def create_schedule_from_sila_struct(schedule_response) -> Schedule: + try: + schedule = Schedule() + for elem in schedule_response: + assign = ScheduledAssignment( + # remove the timezone info. it is not needed for anything, except for the sila standard + start=elem.StartTime.replace(tzinfo=None), + participating={m.Tag: m.MachineName for m in elem.AssignedMachines}, + machine_prior=elem.MachinePrecedences, + ) + if "dummy" in elem.ProcessStepId: + Logger.debug(f"removing {elem.ProcessStepId} from schedule") + continue + schedule[elem.ProcessStepId] = assign + return schedule + except Exception as ex: + Logger.error(f"Could not retrieve schedule from response: {ex}", traceback.print_exc()) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..63ce576a728464f009dc4e40f0f13068c203dc6d --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,162 @@ +#--------------------------- setuptools ----------------------------- + +[build-system] +requires = ["setuptools>=42", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "laborchestrator" +version = "0.2.2" +readme = "README.md" +authors =[{name = "mark doerr", email = "mark.doerr@uni-greifswald.de"}, + {name = "Stefan Maak", email = "stefan.maak@uni-greifswald.de"}] +description="General Purpose Orchestrator for Scientific Laboratories. It collaborates nicely with SiLA servers and pythonLab as process description language." +license = {text = "MIT"} +classifiers=[ + 'Development Status :: 2 - Pre-Alpha', + 'Intended Audience :: Developers', + 'Intended Audience :: Science/Research', + 'Intended Audience :: Education', + 'License :: OSI Approved :: MIT License', + 'Operating System :: OS Independent', + 'Natural Language :: English', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Topic :: Scientific/Engineering', + 'Topic :: Scientific/Engineering :: Information Analysis', + 'Topic :: Scientific/Engineering :: Visualization', + 'Topic :: Scientific/Engineering :: Bio-Informatics', + 'Topic :: Scientific/Engineering :: Chemistry' +] + +dependencies = [ +"pyYAML>=6.0", +"dash>=2.6.1", +"dash-bootstrap-components>=1.2.1", +"dash-interactive-graphviz", +"dash-extensions>=0.1.11", +"numpy", +"pandas", +"sila2", +"graphviz", +"pydantic", +"networkx", +#"rdflib", +#Pillow +#pyyaml +] + +# use MANIFEST.in to include non-python files ! +#packages = [ +# { include = "laborchestrator" }, +#] + +# uncomment to enable commandline access of the module via its name from anywhere +#[project.scripts] +#laborchestrator = "laborchestrator.__main__:main" + +[project.optional-dependencies] +dev = [ + "pytest>=7.3", + "pytest-cov>=2.12", + "pytest-xdist>=2.0", + "coverage>=7.2", + "tox>=4.5", + "safety>=1.0", + "bandit>=1.0", + #"flake8>=3.0", + "pytest-cov", + "pyproject-flake8", + #"flake8-bugbear>=20.0", + "black>=20.0", + "isort>=5.0", + "mypy>=0.0", + "pylint>=2.0", + "invoke>=2.1", + "bumpversion>=0.6", + #"types-requests>=0.0", + #"pre-commit>=2.0", # https://pre-commit.com/ +] + +test = [ + "pytest>=7.3", + "pytest-cov>=2.12", + "pytest-xdist>=2.0", + "coverage>=7.2", + "tox>=4.5", + "safety>=1.0", + "bandit>=1.0", + "flake8>=3.0", + #"flake8-bugbear>=20.0", + "black>=20.0", + "isort>=5.0", + "mypy>=0.0", + "pylint>=2.0", + "pythonlab" + #"types-requests>=0.0", +] + +docs = [ + "sphinx>=7.0", + "python-docs-theme>=2023.3", + "myst-parser>=1.0", + #"types-requests>=0.0", +] + +[project.urls] +"Homepage" = "https://gitlab.com/opensourcelab/laborchestrator" + +[tool.pytest.ini_options] +minversion = "6.0" + +addopts = [ + "-v", + #"-n=auto", +] +filterwarnings = [ + # https://github.com/pypa/pip/issues/11975 + "ignore:.*pkg_resources.*:DeprecationWarning", +] + +[tool.black] +line-length = 120 + +[tool.isort] +line_length = 120 +profile = "black" +skip = [ + "venv", +] + +[tool.flake8] +max-line-length = 120 +extend-ignore = "E203,E501,W293" +exclude = [ + "venv", +] + +[tool.bandit] +exclude_dirs = ["tests"] +skips = ["B101"] + +[tool.coverage.report] +exclude_lines = [ + "if __name__ == .__main__.:", + "@abstractmethod", + "@abc.abstractmethod", + "def __repr__(self):", + # re-enable the standard pragma + "pragma: no cover", + "raise NotImplementedError", + "if TYPE_CHECKING", +] +skip_empty = true +omit = [ + "setup.py", + # auto-generated by grpcio_tools.protoc + "*_pb2.py", + # will be copied to the server, so this source code is never executed +] +precision = 2 +# end setuptools diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000000000000000000000000000000000000..609c6b4b6bde435d51cc45c7a9a3a62a86772dda --- /dev/null +++ b/setup.cfg @@ -0,0 +1,2 @@ +[run] +omit = *experiment.py,*process_*.py,*old*.py diff --git a/setup.py b/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..d7950add20d60a7ca7b9e9f719452e6425cf85a4 --- /dev/null +++ b/setup.py @@ -0,0 +1,30 @@ +import os +import re + +from setuptools import setup, find_packages + + +REGEX_COMMENT = re.compile(r"[\s^]#(.*)") + +# allow setup.py to be run from any path +os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) + +dir_path = os.path.dirname(os.path.realpath(__file__)) + +with open(os.path.join(dir_path, "VERSION"), "r") as version_file: + version = str(version_file.readline()).strip() + + +def parse_requirements(filename): + filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), filename) + with open(filename, "rt") as filehandle: + requirements = filehandle.readlines()[2:] + return tuple(filter(None, (REGEX_COMMENT.sub("", line).strip() for line in requirements))) + + +setup( + name="laborchestrator", + version=version, + packages=find_packages(), + include_package_data=True, +) diff --git a/tasks.py b/tasks.py new file mode 100644 index 0000000000000000000000000000000000000000..ea9b7ed3cb4761458eebf50ce5ba8d24dbbd7e1b --- /dev/null +++ b/tasks.py @@ -0,0 +1,462 @@ +""" +Tasks for maintaining the project. + +Execute 'invoke --list' for guidance on using Invoke +""" +import os +import sys +import platform +import shutil +import webbrowser +from pathlib import Path +from distutils.util import strtobool +import venv + +import pytest +from invoke import task, exceptions # type: ignore + +OS_PLATFORM = platform.system() +HOME_DIR = str(Path.home()) +ROOT_DIR = Path(__file__).parent +BIN_DIR = ROOT_DIR.joinpath("bin") +SETUP_FILE = ROOT_DIR.joinpath("setup.py") +TEST_DIR = ROOT_DIR.joinpath("tests") +SOURCE_DIR = ROOT_DIR.joinpath("laborchestrator") +TOX_DIR = ROOT_DIR.joinpath(".tox") +JUNIT_XML_FILE = BIN_DIR.joinpath("report.xml") +COVERAGE_XML_FILE = BIN_DIR.joinpath("coverage.xml") +COVERAGE_HTML_DIR = BIN_DIR.joinpath("coverage_html") +COVERAGE_HTML_FILE = COVERAGE_HTML_DIR.joinpath("index.html") +DOCS_DIR = ROOT_DIR.joinpath("docs") +DOCS_SOURCE_DIR = DOCS_DIR.joinpath("source") +DOCS_BUILD_DIR = DOCS_DIR.joinpath("_build") +DOCS_INDEX = DOCS_BUILD_DIR.joinpath("index.html") +PYTHON_DIRS = [str(d) for d in [SOURCE_DIR, TEST_DIR]] +SAFETY_REQUIREMENTS_FILE = BIN_DIR.joinpath("safety_requirements.txt") +PYPI_URL = "https://pypi.python.org/api/pypi/pypi/simple" +PYTHON_VERSION = 3.9 +CI_PROJECT_NAME = "lab-orchestrator" +CI_REGISTRY_IMAGE = "registry.gitlab.com/https://gitlab.com/opensourcelab/laborchestrator" +DOCKERFILE = "Dockerfile" +DOCKER_BUILD_PLATFORM = "--platform linux/amd64" +VENV_MODULE_NAME = "venv" + + + +def _delete_file(file): + """ + If the file exists, delete it + + :param file: The file to delete + """ + try: + file.unlink(missing_ok=True) + except TypeError: + # missing_ok argument added in 3.8 + try: + file.unlink() + except FileNotFoundError: + pass + + +def _run(_c, command): + """ + It runs a command + + :param _c: The context object that is passed to invoke tasks + :param command: The command to run + """ + return _c.run(command, pty=platform.system() != 'Windows') + + +def _get_registry_path_str(python_version): + """ + It takes a build tag and a Python version, and returns a string that is the path to the image in the registry + + :param python_version: The version of Python to use + :return: The registry path for the image. + """ + ci_commit_ref_name = os.popen("git symbolic-ref --short -q HEAD").read().strip() + build_tag = ci_commit_ref_name if ci_commit_ref_name else "latest" + image_name = f"{CI_PROJECT_NAME}:py{python_version}-{build_tag}" + registry_path = f"{CI_REGISTRY_IMAGE}/{image_name}" + return registry_path + + +@task(help={'check': "Checks if source is formatted without applying changes"}) +def format(_c, check=False): + """ + It runs the `black` and `isort` tools on the Python code in the `PYTHON_DIRS` directories + + :param _c: The context object that is passed to invoke tasks + :param check: If True, the code will be checked for formatting, but not changed, defaults to False (optional) + """ + python_dirs_string = " ".join(PYTHON_DIRS) + # Run black + black_options = "--check" if check else "" + _run(_c, f"black {black_options} {python_dirs_string}") + # Run isort + isort_options = "--check-only --diff" if check else "" + _run(_c, f"isort {isort_options} {python_dirs_string}") + + +@task +def lint_flake8(_c): + """ + It runs the flake8 linter on all Python files in the project + + :param _c: The context object that is passed to invoke tasks + """ + _run(_c, f"flake8 {' '.join(PYTHON_DIRS)}") + + +@task(lint_flake8) +def lint(_): + """ + It runs all linting tools on all Python files in the project + """ + + +@task +def security_bandit(_c): + """ + It runs bandit security checks on the source directory + + :param _c: The command to run + """ + _run(_c, f"bandit -c pyproject.toml -r {SOURCE_DIR}") + + +@task +def security_safety(_c): + """ + It runs security checks on package dependencies + + :param _c: The context object that is passed to the task + """ + Path(BIN_DIR).mkdir(parents=True, exist_ok=True) + _run(_c, f"poetry export --dev --format=requirements.txt --without-hashes --output={SAFETY_REQUIREMENTS_FILE}") + _run(_c, f"safety check --file={SAFETY_REQUIREMENTS_FILE} --full-report") + + +@task(security_bandit, security_safety) +def security(_): + """ + It runs all security checks + """ + + +@task( + optional=["coverage"], + help={ + "coverage": 'Add coverage, ="html" for html output or ="xml" for xml output', + "junit": "Output a junit xml report", + }, +) +def test(_, coverage=None, junit=False): + """ + It runs the tests in the current directory + + :param _: The context object that is passed to invoke tasks + :param coverage: Generates coverage report, "html" for html output or "xml" for xml output (optional) + :param junit: If True, the test results will be written to a JUnit XML file, defaults to False (optional) + """ + pytest_args = ["-v"] + + if junit: + pytest_args.append(f"--junitxml={JUNIT_XML_FILE}") + + if coverage is not None: + pytest_args.append(f"--cov={SOURCE_DIR}") + + if coverage == "html": + pytest_args.append(f"--cov-report=html:{COVERAGE_HTML_DIR}") + elif coverage == "xml": + pytest_args.append(f"--cov-report=xml:{COVERAGE_XML_FILE}") + + pytest_args.append(str(TEST_DIR)) + return_code = pytest.main(pytest_args) + + if coverage == "html": + webbrowser.open(COVERAGE_HTML_FILE.as_uri()) + + if return_code: + raise exceptions.Exit("Tests failed", code=return_code) + + +@task +def clean_docs(_c): + """ + It takes a list of strings and returns a list of strings + + :param _c: The context object that is passed to invoke tasks + """ + _run(_c, f"rm -fr {DOCS_BUILD_DIR}") + _run(_c, f"rm -fr {DOCS_SOURCE_DIR}") + + +@task(pre=[clean_docs], help={"launch": "Launch documentation in the web browser"}) +def docs(_c, launch=True): + """ + It generates and opens the documentation for the project + + :param _c: The context object that is passed to invoke tasks + :param launch: If True, the docs will be opened in a browser. defaults to True (optional) + """ + # Generate autodoc stub files + _run(_c, f"sphinx-apidoc -e -P -o {DOCS_SOURCE_DIR} {SOURCE_DIR}") + # Generate docs + _run(_c, f"sphinx-build -b html {DOCS_DIR} {DOCS_BUILD_DIR}") + if launch: + webbrowser.open(DOCS_INDEX.as_uri()) + + +@task +def clean_build(_c): + """ + It cleans all the Python build and distribution artifacts + + :param _c: The context object that is passed to invoke tasks + """ + _run(_c, "rm -fr build/") + _run(_c, "rm -fr dist/") + _run(_c, "rm -fr .eggs/") + _run(_c, "find . -name '*.egg-info' -exec rm -fr {} +") + _run(_c, "find . -name '*.egg' -exec rm -f {} +") + + +@task +def clean_python(_c): + """ + It removes all the Python artifacts + + :param _c: The context object that is passed to invoke tasks + """ + _run(_c, "find . -name '*.pyc' -exec rm -f {} +") + _run(_c, "find . -name '*.pyo' -exec rm -f {} +") + _run(_c, "find . -name '*~' -exec rm -f {} +") + _run(_c, "find . -name '__pycache__' -exec rm -fr {} +") + + +@task +def clean_tests(_): + """ + It deletes all the test artifacts + + :param _: The context object that is passed to invoke tasks + """ + _delete_file(JUNIT_XML_FILE) + _delete_file(COVERAGE_XML_FILE) + shutil.rmtree(COVERAGE_HTML_DIR, ignore_errors=True) + shutil.rmtree(BIN_DIR, ignore_errors=True) + shutil.rmtree(TOX_DIR, ignore_errors=True) + + +@task(pre=[clean_build, clean_python, clean_tests, clean_docs]) +def clean(_): + """ + It runs all clean sub-tasks + + :param _: The context object that is passed to invoke tasks + """ + pass + + +@task( + pre=[clean_python], + optional=["python_version"], + help={ + "python_version": 'Python version to use, e.g. "3.9"', + }, +) +def docker_build(_c, python_version=PYTHON_VERSION, target="test"): + """ + It builds a Docker image with the given tag using the given Python version + + :param _c: The context object that is passed to invoke tasks + :param python_version: The base python version to use + :param target: The target to build ("test", "regression"), defaults to "test" (optional) + """ + build_args = f"--build-arg PYTHON_BASE={python_version} --build-arg PYPI_URL={PYPI_URL}" + registry_path = _get_registry_path_str(python_version) + cache = f"--cache-from {registry_path}" + target_tag = f"--target {target}" + _run( + _c, + f"docker build {build_args} {DOCKER_BUILD_PLATFORM} {cache} -f {DOCKERFILE} -t {registry_path} {target_tag} .", + ) + + +@task +def docker_pull(_c, python_version=PYTHON_VERSION): + """ + It pulls the image from the local registry, or if it doesn't exist, it prints a message + + :param _c: The context object that is passed to invoke tasks + :param python_version: The base python version to use + """ + registry_path = _get_registry_path_str(python_version) + _run(_c, f'docker pull {registry_path} || echo "No pre-made image available"') + + +@task +def docker_push(_c, python_version=PYTHON_VERSION): + """ + It pushes the image to the registry + + :param _c: The context object that is passed to invoke tasks + :param python_version: The base python version to use + """ + registry_path = _get_registry_path_str(python_version) + _run(_c, f"docker push {registry_path}") + + +@task +def docker_test(_c, python_version=PYTHON_VERSION): + """ + It runs the tests in a docker container + + :param _c: The context object that is passed to invoke tasks + :param python_version: The base python version to use + :param target: The target to test ("test", "regression"), defaults to "test" (optional) + """ + volume_mount = ( + f"--volume {BIN_DIR}:/laborchestrator/bin/ --volume {ROOT_DIR}:/laborchestrator:rw" + ) + registry_path = _get_registry_path_str(python_version) + pytest_arg = f"pytest -v --cov-report xml:/laborchestrator/bin/coverage.xml {TEST_DIR}" + Path(BIN_DIR).mkdir(parents=True, exist_ok=True) + _run(_c, f"docker run {DOCKER_BUILD_PLATFORM} {volume_mount} {registry_path} {pytest_arg}") + + +@task +def docker_shell(_c, python_version=PYTHON_VERSION): + """ + It opens shell in the docker container + + :param _c: The context object that is passed to invoke tasks + :param python_version: The base python version to use + """ + volume_mount = ( + f"--volume {BIN_DIR}:/laborchestrator/bin/ --volume {ROOT_DIR}:/laborchestrator:rw" + ) + registry_path = _get_registry_path_str(python_version) + bash_path = "/bin/bash" + _run(_c, f"docker run -it {DOCKER_BUILD_PLATFORM} {volume_mount} {registry_path} {bash_path}") + + +@task +def init_repo(_c): + """Initialise the repository with git-LFS and git flow + + :param _c: The context object that is passed to invoke tasks + :type _c: context object + """ + # check, if it is already a git repo + # otherwise run git init + _run(_c, "git-lfs install") + _run(_c, "git flow init") + +@task(pre=[clean]) +def release_twine( + _c, + tag_name, + pypi_user, + pypi_pass, + pypi_publish_repository="https://artifactory.aws.gel.ac/artifactory/api/pypi/pypi_genomics_dev", + pip_repository_index="https://artifactory.aws.gel.ac/artifactory/api/pypi/pypi/simple", +): + """ + It makes a release of the Python package and publishes to the GEL PyPI Artifactory using setup.py and twine + + :param _c: The context object that is passed to invoke tasks + :param tag_name: The name of the tag that triggered the workflow + :param pypi_user: The username of the account that has access to the repository + :param pypi_pass: The password for the pypi user + :param pypi_publish_repository: The URL of the repository to publish to (optional) + :param pip_repository_index: The URL of the pip repository to use for installing twine (optional) + """ + version_str = tag_name.replace("v", "") + _run(_c, f'echo "Build tag - {version_str}."') + _run(_c, f"echo {version_str} > VERSION") + pypirc_str = ( + "[distutils]\n" + "index-servers = gel_pypi\n" + "\n" + "[gel_pypi]\n" + f"repository: {pypi_publish_repository}\n" + f"username: {pypi_user}\n" + f"password: {pypi_pass}\n)" + ) + _run(_c, f'printf "{pypirc_str}" > ~/.pypirc') + _run(_c, f"pip install -i {pip_repository_index} twine") + _run(_c, 'mkdir -p dist && rm -rf dist/* || echo "Nothing found in dist/"; python setup.py sdist;') + _run(_c, f'twine upload --repository-url {pypi_publish_repository} -u "{pypi_user}" -p "{pypi_pass}" dist/*') + + +@task +def generate_reqs(_c): + """ + It generates requirements.txt and requirements_dev.txt using poetry (dependencies from pyproject.toml). + + :param _c: The context object that is passed to invoke tasks + """ + _run(_c, f"poetry export --without dev --without-hashes -f requirements.txt -o {ROOT_DIR}/requirements.txt") + _run(_c, f"poetry export --only dev --without-hashes -f requirements.txt -o {ROOT_DIR}/requirements_dev.txt") +# --------------- installation helper functions, please do not modify ----------------------------- + +def query_yes_no(question, default_answer="yes", help=""): + """Ask user at stdin a yes or no question + + :param question: question text to user + :param default_answer: should be "yes" or "no" + :param help: help text string + :return: :type: bool + """ + if default_answer == "yes": + prompt_txt = "{question} [Y/n] ".format(question=question) + elif default_answer == "no": # explicit no + prompt_txt = "{question} [y/N] ".format(question=question) + else: + raise ValueError("default_answer must be 'yes' or 'no'!") + + while True: + try: + answer = input(prompt_txt) + if answer: + if answer == "?": + print(help) + continue + else: + return strtobool(answer) + else: + return strtobool(default_answer) + except ValueError: + sys.stderr.write("Please respond with 'yes' or 'no' " + "(or 'y' or 'n').\n") + except KeyboardInterrupt: + sys.stderr.write("Query interrupted by user, exiting now ...") + exit(0) + + +def query(question, default_answer="", help=""): + """Ask user a question + + :param question: question text to user + :param default_answer: any default answering text string + :param help: help text string + :return: stripped answer string + """ + prompt_txt = "{question} [{default_answer}] ".format(question=question, default_answer=default_answer) + + while True: + answer = input(prompt_txt).strip() + + if answer: + if answer == "?": + print(help) + continue + else: + return answer + else: + return default_answer \ No newline at end of file diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..df1c8803b8dbe9b44bd94d87ff2b17949139ca82 --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1 @@ +"""Unit test package for laborchestrator.""" diff --git a/tests/orchestrator_costruction_test.py b/tests/orchestrator_costruction_test.py new file mode 100755 index 0000000000000000000000000000000000000000..ca57614424d276917ede2ae95691937e4a6b70f5 --- /dev/null +++ b/tests/orchestrator_costruction_test.py @@ -0,0 +1,27 @@ +import pytest +from laborchestrator.orchestrator_implementation import Orchestrator +from laborchestrator.orchestrator_interface import ( + FormalOrchestratorConfigInterface, + FormalLoggingInterface, + FormalProcessStepControllerInterface, + FormalProcessControllerInterface, +) + + +def test_construction(): + orchestrator = Orchestrator() + + print(f"orchestrator fulfills requirements of {FormalOrchestratorConfigInterface}: {isinstance(orchestrator, FormalOrchestratorConfigInterface)}") + print(f"orchestrator fulfills requirements of {FormalLoggingInterface}: {isinstance(orchestrator, FormalLoggingInterface)}") + print(f"orchestrator fulfills requirements of {FormalProcessStepControllerInterface}: {isinstance(orchestrator, FormalProcessStepControllerInterface)}") + print(f"orchestrator fulfills requirements of {FormalProcessControllerInterface}: {isinstance(orchestrator, FormalProcessControllerInterface)}") + + assert isinstance(orchestrator, FormalOrchestratorConfigInterface) + assert isinstance(orchestrator, FormalLoggingInterface) + assert isinstance(orchestrator, FormalProcessStepControllerInterface) + assert isinstance(orchestrator, FormalProcessControllerInterface) + print("success") + + +if __name__ == "__main__": + test_construction() diff --git a/tests/orchestrator_interface_test.py b/tests/orchestrator_interface_test.py new file mode 100755 index 0000000000000000000000000000000000000000..5cad43b56b517d3f455be27e6583aff881aca14e --- /dev/null +++ b/tests/orchestrator_interface_test.py @@ -0,0 +1,45 @@ +""" +Creates a **PythonLabOrchestrator** OrchestratorInterface. +Uses the ProcessFinder to add multiple processes and removes one of them. +Tests the list of processes in the orchestrator. +""" + + +import pytest +from laborchestrator.orchestrator_implementation import Orchestrator +from os import path + + +to_add = [ + "dynamic_resource_process.py", + "growth_centrifugation_process.py", + "runtime_decision_process.py", +] + + +def test_process_management(): + # create an orchestrator + orchestrator = Orchestrator() + filenames = [path.join(path.dirname(__file__), 'test_data', file) for file in to_add] + + # find all importable processes and import those, that match the names in to_add + for filename in filenames: + orchestrator.add_process(file_path=filename, name=path.splitext(path.basename(filename))[0]) + + # check whether all processes have been added + loaded_processes = orchestrator.processes + for file in to_add: + name = file.strip('.py') + assert name in [p.name for p in loaded_processes] + + # remove one process + to_remove = to_add[0].strip('.py') + orchestrator.remove_processes([to_remove]) + + # check, that the process got removed + assert to_remove not in [p.name for p in orchestrator.processes] + + +if __name__ == "__main__": + test_process_management() + print("Success :-)") diff --git a/tests/scheduler_connection_test.py b/tests/scheduler_connection_test.py new file mode 100755 index 0000000000000000000000000000000000000000..d49d964ae177f7d9a4ee686ed9df928453ad5bce --- /dev/null +++ b/tests/scheduler_connection_test.py @@ -0,0 +1,73 @@ +""" +Starts a scheduler server and tries to establish a connection. +Sends a scheduling task to the server and assumes a reasonable solution +""" +import time +import pytest +from sila2.framework import CommandExecutionStatus +from os import path + +from pythonlabscheduler.sila_server import Server as SchedulerServer +from laborchestrator.engine import ScheduleManager, WFGManager +from laborchestrator.structures import SchedulingInstance, ProcessExecutionState +from laborchestrator.pythonlab_reader import PythonLabReader +from laborchestrator.workflowgraph import WorkFlowGraph as wfg +try: + from .test_data.inc_read_process import IncReadProcess +except: + from test_data.inc_read_process import IncReadProcess + + +@pytest.fixture +def server(scope='session'): + server = SchedulerServer() + server.start_insecure('127.0.0.1', 50071) + yield server + server.stop() + + +def test_connection_with_scheduler_server(server): + # create a schedule manager and give it a problem instance with a loaded process + jssp = SchedulingInstance() + schedule_manager = ScheduleManager(jssp) + schedule_manager.hold_rescheduling() + sm_process = PythonLabReader.read_process(IncReadProcess(), 'P1') + sm_process.status = ProcessExecutionState.SCHEDULED + jssp.add_process(sm_process) + + # necessary to set origin information to movements + wfg_manager = WFGManager(jssp, schedule_manager) + wfg_manager.set_origins() + + # try to connect the manager to the server + schedule_manager.try_scheduler_connection() + + yml_filename = path.join(path.abspath(path.dirname(__file__)), 'test_data', 'sila_server_config_changed.yml') + with open(yml_filename, 'r') as in_stream: + schedule_manager.configure_lab(in_stream.read()) + assert schedule_manager.is_connected_to_scheduler() + + # parse a process and get the workflow graph as a string + jobs = schedule_manager.extract_near_future(20) + + scheduler_client = schedule_manager.scheduler_client + + # select the quick heuristic + schedule_manager.scheduler_client.SchedulingService.SelectAlgorithm("BottleneckPD") + + # test the new structure + sila_graph = wfg.create_sila_structure_from_jobs(jobs.values(), jssp.combined_wfg) + cmd = scheduler_client.SchedulingService.ComputeSchedule(WorkflowGraph=sila_graph, MaxComputationTime=5) + + # wait for the command to finish and get the result + while not cmd.done: + time.sleep(.1) + assert cmd.status == CommandExecutionStatus.finishedSuccessfully + wfg.create_schedule_from_sila_struct(cmd.get_responses().Result.Schedule) + + +if __name__ == "__main__": + server = SchedulerServer() + server.start_insecure("127.0.0.1", 50080) + test_connection_with_scheduler_server(server) + print("Success :-)") diff --git a/tests/simulation_test.py b/tests/simulation_test.py new file mode 100755 index 0000000000000000000000000000000000000000..3a11ebc717c8877c971b79a603a32f9f7eecf949 --- /dev/null +++ b/tests/simulation_test.py @@ -0,0 +1,52 @@ +import pytest +from laborchestrator.orchestrator_implementation import Orchestrator +from pythonlabscheduler.sila_server import Server as SchedulerServer +from os import path +import sys +import time + + +# this simulation should not take more than 20 seconds. Otherwise, it will fail. +MAX_SIMULATION_TIME = 20 + + +@pytest.fixture +def server(scope='session'): + server = SchedulerServer() + server.start_insecure('127.0.0.1', 50071) + yield server + server.stop() + + +def test_simulated_experiment(server): + # this is necessary for the process parsing via python-lab-string to work + sys.path.append(path.join(path.abspath(__file__), 'test_data')) + orchestrator = Orchestrator() + orchestrator.schedule_manager.try_scheduler_connection() + yml_filename = path.join(path.abspath(path.dirname(__file__)), 'test_data', 'sila_server_config_changed.yml') + with open(yml_filename, 'r') as in_stream: + orchestrator.schedule_manager.configure_lab(in_stream.read()) + orchestrator.schedule_manager.scheduler_client.SchedulingService.SelectAlgorithm("BottleneckPD") + filename = path.join(path.dirname(__file__), 'test_data', 'runtime_decision_process.py') + orchestrator.add_process(file_path=filename, name='P1') + timeout = time.time() + 4 + while not orchestrator.schedule_manager.is_connected_to_scheduler(): + assert time.time() < timeout + time.sleep(.2) + orchestrator.simulate_all_processes(150) + + timeout = time.time() + MAX_SIMULATION_TIME + while not orchestrator.process_finished('P1'): + time.sleep(.1) + assert time.time() < timeout + #print(f"test took {MAX_SIMULATION_TIME - (timeout - time.time())} seconds") + #print("Process successfully finished") + + +if __name__ == "__main__": + server = SchedulerServer() + server.start_insecure("127.0.0.1", 50080) + test_simulated_experiment(server) + + + diff --git a/tests/test_data/__init__.py b/tests/test_data/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/test_data/dynamic_resource_process.py b/tests/test_data/dynamic_resource_process.py new file mode 100644 index 0000000000000000000000000000000000000000..ec24c99aa28db496f056aa6bcbfd0b250af89b6c --- /dev/null +++ b/tests/test_data/dynamic_resource_process.py @@ -0,0 +1,70 @@ +from pythonlab.resources.services.moving import MoverServiceResource +from pythonlab.resources.services.centrifugation import CentrifugeServiceResource +from pythonlab.resources.services.analysis import PlateReaderServiceResource +from pythonlab.resources.services.labware_storage import LabwareStorageResource as ContainerStorageResource +from pythonlab.resources.services.incubation import IncubatorServiceResource +from pythonlab.resources.services.barcode import BarcodeReaderServiceResource +from pythonlab.resources.services.liquid_handling import LiquidHandlerServiceResource +from pythonlab.resource import LabwareResource as ContainerResource +from pythonlab.resource import DynamicLabwareResource as ReagentResource +from pythonlab.process import PLProcess + + +class Milestone3Process(PLProcess): + def __init__(self, priority=10): # 0 has highest priority + + self.num_mw_plates = 8 # different number + + super().__init__(priority=priority) + + def create_resources(self): + self.hotel = ContainerStorageResource(proc=self, name="Carousel", capacity=200) + self.incubator1 = IncubatorServiceResource(proc=self, name="Cytomat1550_1") + self.incubator2 = IncubatorServiceResource(proc=self, name="Cytomat1550_2") + self.incubator4 = IncubatorServiceResource(proc=self, name="Cytomat2C") + self.robot_arm = MoverServiceResource(proc=self, name="F5") + self.mover_pool = MoverServiceResource(proc=self, name=None) + self.reader = PlateReaderServiceResource(proc=self, name="VarioskanLUX") + self.barcode_reader = BarcodeReaderServiceResource(proc=self, name="BarCodeReaderMS3") + self.centrifuge = CentrifugeServiceResource(proc=self, name="Rotanta") + self.pipetter = LiquidHandlerServiceResource(proc=self, name="Bravo") + self.containers = [ContainerResource(proc=self, name=f"expression_cont_{cont}", lidded=True, filled=True) + for cont in range(self.num_mw_plates)] + self.induction_container = ReagentResource(proc=self, name="InductMedium", filled=True, outside_cost=50, + priority=10) + + def init_service_resources(self): + # setting start position of containers + super().init_service_resources() + for cont in self.containers: + cont.set_start_position( + self.hotel, self.hotel.next_free_position) + self.induction_container.set_start_position(self.incubator4, 22) + + def process(self): + incubation_duration = 3600 + duration_centrifugation = 1200 + induction_protocol = "SMA_Liquid_Transfer_1pl.pro" + est_induction_duration = 70 + + for cont in self.containers: + self.robot_arm.read_barcode(cont, reader=self.barcode_reader) + self.robot_arm.move(cont, target_loc=self.incubator1) + for cont in self.containers[:4]: + self.incubator1.incubate(cont, duration=incubation_duration/2, temperature=310) + self.robot_arm.move(cont, target_loc=self.reader, lidded=False) + self.reader.single_read(cont, method='211001_varioskan_single_well_600') + self.robot_arm.move(cont, target_loc=self.pipetter, lidded=False, position=8) + self.pipetter.executeProtocol(cont, induction_protocol, reagents=[self.induction_container], + reagent_pos=[3], duration=est_induction_duration) + self.robot_arm.move(cont, target_loc=self.incubator1, lidded=True) + self.incubator1.incubate(cont, duration=incubation_duration/2, temperature=310) + for cont in self.containers[4:]: + self.incubator1.incubate(cont, duration=incubation_duration, temperature=310) + + # move to centrifuge + for cont in self.containers: + self.robot_arm.move(cont, target_loc=self.centrifuge) + self.centrifuge.centrifuge([cont], duration=duration_centrifugation, rpm=50) + self.robot_arm.move(cont, target_loc=self.hotel, lidded=True) + print("FIN") diff --git a/tests/test_data/empty_process.py b/tests/test_data/empty_process.py new file mode 100644 index 0000000000000000000000000000000000000000..2da1faaabb87dae875be733075472ab6a27aaca9 --- /dev/null +++ b/tests/test_data/empty_process.py @@ -0,0 +1,42 @@ +from abc import ABC + +from pythonlab.resources.services.moving import MoverServiceResource +from pythonlab.resources.services.centrifugation import CentrifugeServiceResource +from pythonlab.resources.services.analysis import PlateReaderServiceResource +from pythonlab.resources.services.labware_storage import LabwareStorageResource as ContainerStorageResource +from pythonlab.resources.services.incubation import IncubatorServiceResource +from pythonlab.resources.services.liquid_handling import LiquidHandlerServiceResource +from pythonlab.resource import LabwareResource as ContainerResource +from pythonlab.resource import DynamicLabwareResource as ReagentResource +from pythonlab.process import PLProcess + + +class EmptyProcess(PLProcess, ABC): + def __init__(self, process_name: str, num_plates: int = 0, priority=10): # 0 has highest priority + + self.num_mw_plates = num_plates # different number + self.name = process_name + + super().__init__(priority=priority) + + def create_resources(self): + self.hotel = ContainerStorageResource(proc=self, name="Carousel", capacity=200) + self.rotanta_transfer = ContainerStorageResource(proc=self, name="Transfer") + self.incubator1 = IncubatorServiceResource(proc=self, name="Incubator1") + self.incubator2 = IncubatorServiceResource(proc=self, name="Incubator2") + self.incubator3 = IncubatorServiceResource(proc=self, name="Incubator3") + self.incubator4 = IncubatorServiceResource(proc=self, name="Incubator4") + self.robot_arm = MoverServiceResource(proc=self, name="Mover") + self.reader = PlateReaderServiceResource(proc=self, name="PlateReader1") + self.reader2 = PlateReaderServiceResource(proc=self, name="Plate_Reader2") + self.pipetter = LiquidHandlerServiceResource(proc=self, name="Liquid_Handler") + self.centrifuge = CentrifugeServiceResource(proc=self, name="Centrifuge") + self.reader_pool = PlateReaderServiceResource(proc=self, name=None) + self.mover_pool = MoverServiceResource(proc=self, name=None) + + # only container 0 will be marked filled for testing purposes + self.containers = [ContainerResource(proc=self, name=f"{self.name}_cont_{cont}", lidded=True, filled=False) + for cont in range(self.num_mw_plates)] + + def process(self): + raise NotImplementedError diff --git a/tests/test_data/growth_centrifugation_process.py b/tests/test_data/growth_centrifugation_process.py new file mode 100644 index 0000000000000000000000000000000000000000..f4dc1ad1edbd06b9c4023f5f63b347e199d0f860 --- /dev/null +++ b/tests/test_data/growth_centrifugation_process.py @@ -0,0 +1,95 @@ +from pythonlab.resources.services.moving import MoverServiceResource +from pythonlab.resources.services.centrifugation import CentrifugeServiceResource +from pythonlab.resources.services.analysis import PlateReaderServiceResource +from pythonlab.resources.services.labware_storage import LabwareStorageResource as ContainerStorageResource +from pythonlab.resources.services.incubation import IncubatorServiceResource +from pythonlab.resource import LabwareResource as ContainerResource +from pythonlab.process import PLProcess + + +class Milestone2Process(PLProcess): + def __init__(self, priority=10): # 0 has highest priority + + self.num_mw_plates = 4 # different number + + super().__init__(priority=priority) + + def create_resources(self): + self.hotel = ContainerStorageResource(proc=self, name="Carousel", capacity=200) + self.incubator = IncubatorServiceResource(proc=self, name="Incubator1") + self.robot_arm = MoverServiceResource(proc=self, name="Mover") + self.reader = PlateReaderServiceResource(proc=self, name="Plate_Reader1") + self.centrifuge = CentrifugeServiceResource(proc=self, name="Centrifuge") + + self.containers = [ContainerResource(proc=self, name=f"expression_cont_{cont}", lidded=True, filled=False) + for cont in range(self.num_mw_plates)] + self.containers[0].priority = 2 + + def init_service_resources(self): + # setting start position of containers + super().init_service_resources() + for cont in self.containers: + cont.set_start_position( + self.hotel, self.hotel.next_free_position) + + def process(self): + incubation_duration = 3600 + duration_centrifugation = 1200 + + cont1 = self.containers[0] + cont2 = self.containers[1] + cont3 = self.containers[2] + cont4 = self.containers[3] + # loop construction follows later + # read the barcodes + self.robot_arm.read_barcode(cont1) + self.robot_arm.read_barcode(cont2) + self.robot_arm.read_barcode(cont3) + self.robot_arm.read_barcode(cont4) + # move to incubator + self.robot_arm.move(cont1, target_loc=self.incubator) + self.robot_arm.move(cont2, target_loc=self.incubator) + self.robot_arm.move(cont3, target_loc=self.incubator) + self.robot_arm.move(cont4, target_loc=self.incubator) + # incubate first half + self.incubator.incubate(cont1, duration=incubation_duration/2, temperature=310) + self.incubator.incubate(cont2, duration=incubation_duration/2, temperature=310) + # move to reader + self.robot_arm.move(cont1, target_loc=self.reader, lidded=False) + self.robot_arm.move(cont2, target_loc=self.reader, lidded=False) + # make absorbance measurement + self.reader.single_read(cont1, wavelengths=[600, 660], temperature=305, method='211001_varioskan_single_well_600') + self.reader.single_read(cont2, wavelengths=[600, 660], temperature=305, method='211001_varioskan_single_well_600') + # move back to incubator + self.robot_arm.move(cont1, target_loc=self.incubator, lidded=True) + self.robot_arm.move(cont2, target_loc=self.incubator, lidded=True) + # incubate second half + self.incubator.incubate(cont1, duration=incubation_duration/2, temperature=310) + self.incubator.incubate(cont2, duration=incubation_duration/2, temperature=310) + # incubate the others without measurement + self.incubator.incubate(cont3, duration=incubation_duration, temperature=310) + self.incubator.incubate(cont4, duration=incubation_duration, temperature=310) + # move to centrifuge + self.robot_arm.move(cont1, target_loc=self.centrifuge) + self.robot_arm.move(cont2, target_loc=self.centrifuge) + self.robot_arm.move(cont3, target_loc=self.centrifuge) + self.robot_arm.move(cont4, target_loc=self.centrifuge) + # centrifuge + self.centrifuge.centrifuge(self.containers, duration=duration_centrifugation, rpm=50) + # move to reader + self.robot_arm.move(cont1, target_loc=self.reader, lidded=False) + self.robot_arm.move(cont2, target_loc=self.reader, lidded=False) + self.robot_arm.move(cont3, target_loc=self.reader, lidded=False) + self.robot_arm.move(cont4, target_loc=self.reader, lidded=False) + # absorbance read + self.reader.single_read(cont1, wavelengths=[600, 660], temperature=305, method='211001_varioskan_single_well_600') + self.reader.single_read(cont2, wavelengths=[600, 660], temperature=305, method='211001_varioskan_single_well_600') + self.reader.single_read(cont3, wavelengths=[600, 660], temperature=305, method='211001_varioskan_single_well_600') + self.reader.single_read(cont4, wavelengths=[600, 660], temperature=305, method='211001_varioskan_single_well_600') + # move back to hotel + self.robot_arm.move(cont1, target_loc=self.hotel, lidded=True) + self.robot_arm.move(cont2, target_loc=self.hotel, lidded=True) + self.robot_arm.move(cont3, target_loc=self.hotel, lidded=True) + self.robot_arm.move(cont4, target_loc=self.hotel, lidded=True) + + print("FIN") diff --git a/tests/test_data/inc_read_process.py b/tests/test_data/inc_read_process.py new file mode 100755 index 0000000000000000000000000000000000000000..ccd247cc0ac16bc647ae7ab2f7f80cb639673252 --- /dev/null +++ b/tests/test_data/inc_read_process.py @@ -0,0 +1,56 @@ +from pythonlab.resources.services.moving import MoverServiceResource +from pythonlab.resources.services.analysis import PlateReaderServiceResource +from pythonlab.resources.services.labware_storage import LabwareStorageResource as ContainerStorageResource +from pythonlab.resources.services.incubation import IncubatorServiceResource +from pythonlab.resource import LabwareResource as ContainerResource +from pythonlab.process import PLProcess + + +class IncReadProcess(PLProcess): + def __init__(self, + priority=10): # 0 has highest priority + + self.num_mw_plates = 2 # different number + + super().__init__(priority=priority) + + def create_resources(self): + self.hotel = ContainerStorageResource(proc=self, name="Carousel", capacity=200) + self.incubator = IncubatorServiceResource(proc=self, name="Incubator1") + self.robot_arm = MoverServiceResource(proc=self, name="Mover") + self.reader = PlateReaderServiceResource(proc=self, name="Plate_Reader") + + self.containers = [ContainerResource(proc=self, name=f"expression_cont_{cont}", lidded=True, filled=True) + for cont in range(self.num_mw_plates)] + self.containers[0].priority = 2 + + def init_service_resources(self): + # setting start position of containers + super().init_service_resources() + for cont in self.containers: + cont.set_start_position( + self.hotel, self.hotel.next_free_position+14) + + def process(self): + incubation_duration = 120 + + cont1 = self.containers[0] + for cont in self.containers: + self.robot_arm.read_barcode(cont) + # move to incubator + self.robot_arm.move(cont, target_loc=self.incubator) + # incubate + self.incubator.incubate(cont, duration=incubation_duration, temperature=310) + if cont == cont1: + # move to reader + self.robot_arm.move(cont, target_loc=self.reader, lidded=False) + # make absorbance measurement + self.reader.single_read(cont, method='211001_varioskan_single_well_600', duration=60) + # move back to hotel + for cont in self.containers: + if cont == cont1: + self.robot_arm.move(cont, target_loc=self.hotel, lidded=True) + else: + self.robot_arm.move(cont, target_loc=self.hotel, position=30, lidded=True) + + print("FIN") diff --git a/tests/test_data/load_conflic_test1.py b/tests/test_data/load_conflic_test1.py new file mode 100644 index 0000000000000000000000000000000000000000..2a2530a23b2a260ed85a76177a1d78bbb9f96e17 --- /dev/null +++ b/tests/test_data/load_conflic_test1.py @@ -0,0 +1,70 @@ +from abc import ABC + +from pythonlab.resources.services.moving import MoverServiceResource +from pythonlab.resources.services.centrifugation import CentrifugeServiceResource +from pythonlab.resources.services.analysis import PlateReaderServiceResource +from pythonlab.resources.services.labware_storage import LabwareStorageResource as ContainerStorageResource +from pythonlab.resources.services.incubation import IncubatorServiceResource +from pythonlab.resources.services.liquid_handling import LiquidHandlerServiceResource +from pythonlab.resource import LabwareResource as ContainerResource +from pythonlab.resource import DynamicLabwareResource as ReagentResource +from pythonlab.process import PLProcess + + +class LoadConflictTest(PLProcess, ABC): + def __init__(self, num_plates: int = 4, priority=10): # 0 has highest priority + + self.num_mw_plates = num_plates # different number + self.name = "LoadConflictTest" + + super().__init__(priority=priority) + + def create_resources(self): + self.hotel = ContainerStorageResource(proc=self, name="Carousel", capacity=200) + self.tip_stack = ContainerStorageResource(proc=self, name="TipRack", capacity=2) + self.rotanta_transfer = ContainerStorageResource(proc=self, name="Transfer") + self.pipetter = LiquidHandlerServiceResource(proc=self, name="Liquid_Handler") + self.incubator1 = IncubatorServiceResource(proc=self, name="Incubator1") + self.incubator2 = IncubatorServiceResource(proc=self, name="Incubator2") + self.incubator3 = IncubatorServiceResource(proc=self, name="Incubator3") + self.incubator4 = IncubatorServiceResource(proc=self, name="Incubator4") + self.robot_arm = MoverServiceResource(proc=self, name="Mover") + self.reader = PlateReaderServiceResource(proc=self, name="PlateReader1") + self.reader2 = PlateReaderServiceResource(proc=self, name="Plate_Reader2") + self.pipetter = LiquidHandlerServiceResource(proc=self, name="Liquid_Handler") + self.centrifuge = CentrifugeServiceResource(proc=self, name="Centrifuge") + self.reader_pool = PlateReaderServiceResource(proc=self, name=None) + self.mover_pool = MoverServiceResource(proc=self, name=None) + + # only container 0 will be marked filled for testing purposes + self.containers = [ContainerResource(proc=self, name=f"{self.name}_cont_{cont}", lidded=True, filled=False) + for cont in range(self.num_mw_plates)] + self.tip_set1 = ReagentResource(proc=self, name="TipSet1", outside_cost=0) + #self.reagent1 = ReagentResource(proc=self, name="Reagent1", outside_cost=50) + #self.reagent2 = ReagentResource(proc=self, name="Reagent1", outside_cost=50) + + def init_service_resources(self): + for i, cont in enumerate(self.containers): + cont.set_start_position(self.hotel, i) + self.tip_set1.set_start_position(self.hotel, 100) + + def process(self): + protocol = "some_liquid_handling" + cont1 = self.containers[0] + cont2 = self.containers[1] + cont3 = self.containers[2] + reagent1 = self.containers[3] + + self.robot_arm.move(cont1, self.pipetter, position=8) + self.robot_arm.move(reagent1, self.pipetter, position=4) + self.pipetter.executeProtocol(labware=[cont1, reagent1], protocol=protocol, duration=180) + self.robot_arm.move(cont1, self.incubator4) + self.robot_arm.move(cont2, self.pipetter, position=7) + self.pipetter.executeProtocol(labware=[cont2, reagent1], protocol=protocol, duration=180) + self.robot_arm.move(reagent1, self.incubator4) + self.robot_arm.move(cont3, self.pipetter, position=6) + self.robot_arm.move(self.tip_set1, self.tip_stack) + self.pipetter.executeProtocol(labware=[cont2, cont3, self.tip_set1], protocol=protocol, duration=180) + self.robot_arm.move(cont2, self.incubator4) + self.robot_arm.move(cont3, self.incubator4) + self.robot_arm.move(self.tip_set1, self.hotel, position=100) diff --git a/tests/test_data/runtime_decision_process.py b/tests/test_data/runtime_decision_process.py new file mode 100644 index 0000000000000000000000000000000000000000..75c9d056ce061aeda816962343b3ac6e2f5008cb --- /dev/null +++ b/tests/test_data/runtime_decision_process.py @@ -0,0 +1,75 @@ +from numpy import random + +from pythonlab.resources.services.moving import MoverServiceResource +from pythonlab.resources.services.centrifugation import CentrifugeServiceResource +from pythonlab.resources.services.analysis import PlateReaderServiceResource +from pythonlab.resources.services.labware_storage import LabwareStorageResource as ContainerStorageResource +from pythonlab.resources.services.incubation import IncubatorServiceResource +from pythonlab.resources.services.liquid_handling import LiquidHandlerServiceResource +from pythonlab.resource import LabwareResource as ContainerResource +from pythonlab.process import PLProcess + + +class RuntimeDecisionProcess(PLProcess): + def __init__(self, priority=10): # 0 has highest priority + self.num_plates = 2 + self.name = "RuntimeDecisionProcess" + super().__init__(priority=priority)#, num_plates=2, process_name="RuntimeDecisionProcess") + + def create_resources(self): + self.hotel = ContainerStorageResource(proc=self, name="Carousel") + self.rotanta_transfer = ContainerStorageResource(proc=self, name="Transfer") + self.incubator1 = IncubatorServiceResource(proc=self, name="Incubator1") + self.incubator2 = IncubatorServiceResource(proc=self, name="Incubator2") + self.incubator3 = IncubatorServiceResource(proc=self, name="Incubator3") + self.incubator4 = IncubatorServiceResource(proc=self, name="Incubator4") + self.robot_arm = MoverServiceResource(proc=self, name="Mover") + self.reader = PlateReaderServiceResource(proc=self, name="PlateReader") + self.reader2 = PlateReaderServiceResource(proc=self, name="Plate_Reader2") + self.pipetter = LiquidHandlerServiceResource(proc=self, name="Liquid_Handler") + self.centrifuge = CentrifugeServiceResource(proc=self, name="Centrifuge") + self.reader_pool = PlateReaderServiceResource(proc=self, name=None) + self.mover_pool = MoverServiceResource(proc=self, name=None) + + # only container 0 will be marked filled for testing purposes + self.containers = [ContainerResource(proc=self, name=f"{self.name}_cont_{cont}", lidded=True, filled=False) + for cont in range(self.num_plates)] + + def init_service_resources(self): + # setting start position of containers + super().init_service_resources() + for cont in self.containers: + cont.set_start_position( + self.hotel, self.hotel.next_free_position) + + def process(self): + incubation_duration1 = 3 + incubation_duration2 = 9 + + cont1 = self.containers[0] + cont2 = self.containers[1] + for cont in self.containers: + # read the barcodes + self.mover_pool.read_barcode(cont) + # move to incubator + self.mover_pool.move(cont, target_loc=self.incubator1) + # incubate + self.incubator1.incubate(cont1, duration=incubation_duration1, temperature=310) + self.incubator1.incubate(cont2, duration=incubation_duration2, temperature=310) + # move to reader + self.mover_pool.move(cont1, target_loc=self.reader_pool, lidded=False) + # make absorbance measurement + absorb = self.reader_pool.single_read(cont1, method='211001_varioskan_single_well_600') + aver_abs = self.average(absorb) + if aver_abs < .6: + # do some more incubation + self.mover_pool.move(cont1, target_loc=self.incubator1, lidded=True) + self.incubator1.incubate(cont1, duration=incubation_duration1, temperature=310) + + # move back to hotel + self.mover_pool.move(cont1, target_loc=self.hotel, lidded=True) + self.mover_pool.move(cont2, target_loc=self.hotel, position=27) + + def average(self, l): + abs = random.rand()+.1 + return abs diff --git a/tests/test_data/sila_server_config_changed.yml b/tests/test_data/sila_server_config_changed.yml new file mode 100644 index 0000000000000000000000000000000000000000..38681699bd10ea8a7b0af6b78d95d36d0356a60a --- /dev/null +++ b/tests/test_data/sila_server_config_changed.yml @@ -0,0 +1,50 @@ +# ______ Lab Automation setup ____________ +description: + - "Demo lab configuration. Only the information relevant for the scheduler is contained. + For meaning of the fields, visit the documentation." +# _________ Incubators ____________ +sila_servers: + incubators: + Incubator1: + capacity: 32 + Incubator2: + capacity: 32 + Incubator3: + capacity: 32 + Incubator4: + capacity: 30 + # _________ Plate Readers ____________ + plate_readers: + Plate_Reader: + capacity: 1 + Plate_Reader2: + capacity: 1 + # _________ Liquid Handlers ____________ + liquid_handlers: + Liquid_Handler: + capacity: 6 + process_capacity: 1 + allows_overlap: False + movers: + Mover: + capacity: 1 + allows_overlap: False + centrifuges: + Centrifuge: + min_capacity: 4 + capacity: 4 + allows_overlap: False + storage: + Carousel: + capacity: 150 + Transfer: + capacity: 4 + +# ------ Translation to used resources in the process description ------ +pythonlab_translation: + incubators: IncubatorServiceResource + centrifuges: CentrifugeServiceResource + movers: MoverServiceResource + liquid_handlers: LiquidHandlerServiceResource + plate_readers: PlateReaderServiceResource + storage: LabwareStorageResource