diff --git a/.dockerignore b/.dockerignore index 122da4923..81f91339f 100644 --- a/.dockerignore +++ b/.dockerignore @@ -37,20 +37,47 @@ !target/x86_64-unknown-linux-gnu/release/controller !target/x86_64-unknown-linux-gnu/debug/agent !target/x86_64-unknown-linux-gnu/release/agent +!target/x86_64-unknown-linux-gnu/debug/agent-full +!target/x86_64-unknown-linux-gnu/release/agent-full !target/x86_64-unknown-linux-gnu/debug/udev-video-broker !target/x86_64-unknown-linux-gnu/release/udev-video-broker +!target/x86_64-unknown-linux-gnu/debug/webhook-configuration +!target/x86_64-unknown-linux-gnu/release/webhook-configuration +!target/x86_64-unknown-linux-gnu/release/udev-discovery-handler +!target/x86_64-unknown-linux-gnu/release/debug-echo-discovery-handler +!target/x86_64-unknown-linux-gnu/release/http-discovery-handler +!target/x86_64-unknown-linux-gnu/release/onvif-discovery-handler +!target/x86_64-unknown-linux-gnu/release/opcua-discovery-handler !target/aarch64-unknown-linux-gnu/debug/controller !target/aarch64-unknown-linux-gnu/release/controller !target/aarch64-unknown-linux-gnu/debug/agent !target/aarch64-unknown-linux-gnu/release/agent +!target/aarch64-unknown-linux-gnu/debug/agent-full +!target/aarch64-unknown-linux-gnu/release/agent-full !target/aarch64-unknown-linux-gnu/debug/udev-video-broker !target/aarch64-unknown-linux-gnu/release/udev-video-broker +!target/aarch64-unknown-linux-gnu/debug/webhook-configuration +!target/aarch64-unknown-linux-gnu/release/webhook-configuration +!target/aarch64-unknown-linux-gnu/release/udev-discovery-handler +!target/aarch64-unknown-linux-gnu/release/debug-echo-discovery-handler +!target/aarch64-unknown-linux-gnu/release/http-discovery-handler +!target/aarch64-unknown-linux-gnu/release/onvif-discovery-handler +!target/aarch64-unknown-linux-gnu/release/opcua-discovery-handler !target/armv7-unknown-linux-gnueabihf/debug/controller !target/armv7-unknown-linux-gnueabihf/release/controller !target/armv7-unknown-linux-gnueabihf/debug/agent !target/armv7-unknown-linux-gnueabihf/release/agent +!target/armv7-unknown-linux-gnueabihf/debug/agent-full +!target/armv7-unknown-linux-gnueabihf/release/agent-full !target/armv7-unknown-linux-gnueabihf/debug/udev-video-broker !target/armv7-unknown-linux-gnueabihf/release/udev-video-broker +!target/armv7-unknown-linux-gnueabihf/debug/webhook-configuration +!target/armv7-unknown-linux-gnueabihf/release/webhook-configuration +!target/armv7-unknown-linux-gnueabihf/release/udev-discovery-handler +!target/armv7-unknown-linux-gnueabihf/release/debug-echo-discovery-handler +!target/armv7-unknown-linux-gnueabihf/release/http-discovery-handler +!target/armv7-unknown-linux-gnueabihf/release/onvif-discovery-handler +!target/armv7-unknown-linux-gnueabihf/release/opcua-discovery-handler # Cross toml file needs to be available for making the cross build containers !Cross.toml diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 4ec3b06c1..635b3f725 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,3 +1,3 @@ # https://help.github.com/en/articles/about-code-owners#codeowners-syntax -* @bfjelds @kate-goldenring @jiria @britel \ No newline at end of file +* @bfjelds @kate-goldenring @jiria @britel @romoh diff --git a/.github/actions/build-component-per-arch/main.js b/.github/actions/build-component-per-arch/main.js index 665e6c9db..d3e4a7e5f 100644 --- a/.github/actions/build-component-per-arch/main.js +++ b/.github/actions/build-component-per-arch/main.js @@ -62,11 +62,12 @@ async function shell_cmd(cmd) { core.setFailed(`Failed with unknown platform: ${core.getInput('platform')}`) return } + console.log(`Makefile build target suffix: ${makefile_target_suffix}`) if (core.getInput('build_rust') == '1') { console.log(`Install Rust`) - child_process.execSync(`curl https://sh.rustup.rs | sh -s -- -y --default-toolchain=1.41.0`); + child_process.execSync(`curl https://sh.rustup.rs | sh -s -- -y --default-toolchain=1.51.0`); const bindir = `${process.env.HOME}/.cargo/bin`; process.env.PATH = `${process.env.PATH}:${bindir}`; diff --git a/.github/workflows/auto-update-dependencies.yml b/.github/workflows/auto-update-dependencies.yml new file mode 100644 index 000000000..766a8188a --- /dev/null +++ b/.github/workflows/auto-update-dependencies.yml @@ -0,0 +1,33 @@ +name: Dependencies autoupdate + +on: + workflow_dispatch: # can be manually run if needed + schedule: + - cron: '0 0 1 * *' # run on first day of each month at 12:00 am UTC time + +env: + CARGO_TERM_COLOR: always + +jobs: + auto-update-dependencies: + runs-on: ubuntu-latest + + steps: + - name: Checkout the head commit of the branch + uses: actions/checkout@v2 + with: + persist-credentials: false + + - name: Install Linux requirements + run: | + apt_dependencies="git curl libssl-dev pkg-config libudev-dev libv4l-dev" + echo "Run apt update and apt install the following dependencies: $apt_dependencies" + sudo apt update + sudo apt install -y $apt_dependencies + + - name: Check for dependency updates + uses: romoh/dependencies-autoupdate@v1.1 + with: + token: ${{ secrets.AKRI_BOT_TOKEN }} + update-command: "'cargo update && cargo test'" + on-changes-command: "'./version.sh -u -p'" diff --git a/.github/workflows/build-agent-container.yml b/.github/workflows/build-agent-container.yml index db45d0ce8..f092b7155 100644 --- a/.github/workflows/build-agent-container.yml +++ b/.github/workflows/build-agent-container.yml @@ -1,41 +1,16 @@ -name: Build Agent +name: Build Agents on: - push: - branches: [ main ] - paths: - - .github/actions/build-component-per-arch/** - - .github/actions/build-component-multi-arch/** - - .github/workflows/build-agent-container.yml - - build/containers/Dockerfile.agent - - agent/** - - shared/** - - version.txt - - build/akri-containers.mk - - Makefile - pull_request: - branches: [ main ] - paths: - - .github/actions/build-component-per-arch/** - - .github/actions/build-component-multi-arch/** - - .github/workflows/build-agent-container.yml - - build/containers/Dockerfile.agent - - agent/** - - shared/** - - version.txt - - build/akri-containers.mk - - Makefile - release: - types: - - published - -env: - AKRI_COMPONENT: agent - MAKEFILE_COMPONENT: agent + workflow_run: + workflows: ["Build Production Rust Code"] + branches: [main] + types: + - completed jobs: per-arch: + if: ${{ github.event.workflow_run.conclusion == 'success' }} runs-on: ubuntu-latest timeout-minutes: 40 strategy: @@ -44,6 +19,9 @@ jobs: - arm64v8 - arm32v7 - amd64 + akri-component: + - agent + - agent-full steps: - name: Checkout the head commit of the branch @@ -60,27 +38,43 @@ jobs: yarn install yarn add @actions/core @actions/github @actions/exec fs - - name: Run Per-Arch component build for ${{ env.AKRI_COMPONENT }} + - name: Download rust build artifacts + uses: dawidd6/action-download-artifact@v2 + with: + workflow: ${{ github.event.workflow_run.workflow_id }} + workflow_conclusion: success + commit: ${{ github.event.workflow_run.head_sha }} + name: rust-${{ matrix.arch }}-binaries + path: /tmp + + - name: Unpack Rust binaries + run: | + tar -xvf /tmp/rust-${{ matrix.arch }}-binaries.tar + + - name: Run Per-Arch component build for ${{ matrix.akri-component }} uses: ./.github/actions/build-component-per-arch with: - github_event_name: ${{ github.event_name }} + github_event_name: ${{ github.event.workflow_run.event }} github_ref: ${{ github.ref }} - github_event_action: ${{ github.event.action }} - github_merged: ${{ github.event.pull_request.merged }} - container_name: ${{ env.AKRI_COMPONENT }} + container_name: ${{ matrix.akri-component }} container_prefix: ghcr.io/deislabs/akri container_registry_base_url: ghcr.io container_registry_username: ${{ secrets.crUsername }} container_registry_password: ${{ secrets.crPassword }} - makefile_component_name: ${{ env.MAKEFILE_COMPONENT }} + makefile_component_name: ${{ matrix.akri-component }} platform: ${{ matrix.arch }} - build_rust: "1" + build_rust: "0" multi-arch: - if: (github.event_name == 'release') || (github.event_name == 'push' && github.ref == 'refs/heads/main') || (startsWith(github.event_name, 'pull_request') && github.event.action == 'closed' && github.event.pull_request.merged == true && github.ref != 'refs/heads/main') + if: ${{ github.event.workflow_run.conclusion == 'success' }} && ((github.event.workflow_run.event == 'release') || (github.event.workflow_run.event == 'push' && github.ref == 'refs/heads/main')) needs: per-arch runs-on: ubuntu-latest timeout-minutes: 5 + strategy: + matrix: + akri-component: + - agent + - agent-full steps: - name: Checkout repo @@ -97,13 +91,13 @@ jobs: yarn install yarn add @actions/core @actions/github @actions/exec fs - - name: Run Multi-Arch component build for ${{ env.AKRI_COMPONENT }} + - name: Run Multi-Arch component build for ${{ matrix.akri-component }} uses: ./.github/actions/build-component-multi-arch with: - github_event_name: ${{ github.event_name }} - container_name: ${{ env.AKRI_COMPONENT }} + github_event_name: ${{ github.event.workflow_run.event }} + container_name: ${{ matrix.akri-component }} container_prefix: ghcr.io/deislabs/akri container_registry_base_url: ghcr.io container_registry_username: ${{ secrets.crUsername }} container_registry_password: ${{ secrets.crPassword }} - makefile_component_name: ${{ env.MAKEFILE_COMPONENT }} + makefile_component_name: ${{ matrix.akri-component }} diff --git a/.github/workflows/build-anomaly-detection-app-container.yml b/.github/workflows/build-anomaly-detection-app-container.yml index 3a0afc5d2..130a1fa53 100644 --- a/.github/workflows/build-anomaly-detection-app-container.yml +++ b/.github/workflows/build-anomaly-detection-app-container.yml @@ -11,6 +11,7 @@ on: - samples/apps/anomaly-detection-app/** - version.txt - build/akri-containers.mk + - build/akri-python-containers.mk - Makefile pull_request: branches: [ main ] @@ -22,6 +23,7 @@ on: - samples/apps/anomaly-detection-app/** - version.txt - build/akri-containers.mk + - build/akri-python-containers.mk - Makefile release: types: diff --git a/.github/workflows/build-controller-container.yml b/.github/workflows/build-controller-container.yml index 1330df84c..8c512f252 100644 --- a/.github/workflows/build-controller-container.yml +++ b/.github/workflows/build-controller-container.yml @@ -1,33 +1,11 @@ name: Build Controller on: - push: - branches: [ main ] - paths: - - .github/actions/build-component-per-arch/** - - .github/actions/build-component-multi-arch/** - - .github/workflows/build-controller-container.yml - - build/containers/Dockerfile.controller - - controller/** - - shared/** - - version.txt - - build/akri-containers.mk - - Makefile - pull_request: - branches: [ main ] - paths: - - .github/actions/build-component-per-arch/** - - .github/actions/build-component-multi-arch/** - - .github/workflows/build-controller-container.yml - - build/containers/Dockerfile.controller - - controller/** - - shared/** - - version.txt - - build/akri-containers.mk - - Makefile - release: - types: - - published + workflow_run: + workflows: ["Build Production Rust Code"] + branches: [main] + types: + - completed env: AKRI_COMPONENT: controller @@ -36,6 +14,7 @@ env: jobs: per-arch: + if: ${{ github.event.workflow_run.conclusion == 'success' }} runs-on: ubuntu-latest timeout-minutes: 40 strategy: @@ -59,13 +38,24 @@ jobs: yarn install yarn add @actions/core @actions/github @actions/exec fs + - name: Download rust build artifacts + uses: dawidd6/action-download-artifact@v2 + with: + workflow: ${{ github.event.workflow_run.workflow_id }} + workflow_conclusion: success + commit: ${{ github.event.workflow_run.head_sha }} + name: rust-${{ matrix.arch }}-binaries + path: /tmp + + - name: Unpack Rust binaries + run: | + tar -xvf /tmp/rust-${{ matrix.arch }}-binaries.tar + - name: Run Per-Arch component build for ${{ env.AKRI_COMPONENT }} uses: ./.github/actions/build-component-per-arch with: - github_event_name: ${{ github.event_name }} + github_event_name: ${{ github.event.workflow_run.event }} github_ref: ${{ github.ref }} - github_event_action: ${{ github.event.action }} - github_merged: ${{ github.event.pull_request.merged }} container_name: ${{ env.AKRI_COMPONENT }} container_prefix: ghcr.io/deislabs/akri container_registry_base_url: ghcr.io @@ -73,10 +63,10 @@ jobs: container_registry_password: ${{ secrets.crPassword }} makefile_component_name: ${{ env.MAKEFILE_COMPONENT }} platform: ${{ matrix.arch }} - build_rust: "1" + build_rust: "0" multi-arch: - if: (github.event_name == 'release') || (github.event_name == 'push' && github.ref == 'refs/heads/main') || (startsWith(github.event_name, 'pull_request') && github.event.action == 'closed' && github.event.pull_request.merged == true && github.ref != 'refs/heads/main') + if: ${{ github.event.workflow_run.conclusion == 'success' }} && ((github.event.workflow_run.event == 'release') || (github.event.workflow_run.event == 'push' && github.ref == 'refs/heads/main')) needs: per-arch runs-on: ubuntu-latest timeout-minutes: 5 @@ -99,7 +89,7 @@ jobs: - name: Run Multi-Arch component build for ${{ env.AKRI_COMPONENT }} uses: ./.github/actions/build-component-multi-arch with: - github_event_name: ${{ github.event_name }} + github_event_name: ${{ github.event.workflow_run.event }} container_name: ${{ env.AKRI_COMPONENT }} container_prefix: ghcr.io/deislabs/akri container_registry_base_url: ghcr.io diff --git a/.github/workflows/build-discovery-handlers.yml b/.github/workflows/build-discovery-handlers.yml new file mode 100644 index 000000000..780522f51 --- /dev/null +++ b/.github/workflows/build-discovery-handlers.yml @@ -0,0 +1,107 @@ +name: Build Discovery Handlers + +on: + workflow_run: + workflows: ["Build Production Rust Code"] + branches: [main] + types: + - completed + +jobs: + + per-arch: + if: ${{ github.event.workflow_run.conclusion == 'success' }} + runs-on: ubuntu-latest + timeout-minutes: 40 + strategy: + matrix: + arch: + - arm64v8 + - arm32v7 + - amd64 + akri-component: + - debug-echo-discovery + - udev-discovery + - onvif-discovery + - opcua-discovery + + steps: + - name: Checkout the head commit of the branch + uses: actions/checkout@v2 + with: + persist-credentials: false + + - name: Prepare To Install + uses: actions/setup-node@v1 + with: + node-version: 12 + - name: Install Deps + run: | + yarn install + yarn add @actions/core @actions/github @actions/exec fs + + - name: Download rust build artifacts + uses: dawidd6/action-download-artifact@v2 + with: + workflow: ${{ github.event.workflow_run.workflow_id }} + workflow_conclusion: success + commit: ${{ github.event.workflow_run.head_sha }} + name: rust-${{ matrix.arch }}-binaries + path: /tmp + + - name: Unpack Rust binaries + run: | + tar -xvf /tmp/rust-${{ matrix.arch }}-binaries.tar + + - name: Run Per-Arch component build for ${{ matrix.akri-component }} + uses: ./.github/actions/build-component-per-arch + with: + github_event_name: ${{ github.event.workflow_run.event }} + github_ref: ${{ github.ref }} + container_name: ${{ matrix.akri-component }} + container_prefix: ghcr.io/deislabs/akri + container_registry_base_url: ghcr.io + container_registry_username: ${{ secrets.crUsername }} + container_registry_password: ${{ secrets.crPassword }} + makefile_component_name: ${{ matrix.akri-component }} + platform: ${{ matrix.arch }} + build_rust: "0" + + multi-arch: + if: ${{ github.event.workflow_run.conclusion == 'success' }} && ((github.event.workflow_run.event == 'release') || (github.event.workflow_run.event == 'push' && github.ref == 'refs/heads/main')) + needs: per-arch + runs-on: ubuntu-latest + timeout-minutes: 5 + strategy: + matrix: + akri-component: + - debug-echo-discovery + - udev-discovery + - onvif-discovery + - opcua-discovery + + steps: + - name: Checkout repo + uses: actions/checkout@v2 + with: + persist-credentials: false + + - name: Prepare To Install + uses: actions/setup-node@v1 + with: + node-version: 12 + - name: Install Deps + run: | + yarn install + yarn add @actions/core @actions/github @actions/exec fs + + - name: Run Multi-Arch component build for ${{ matrix.akri-component }} + uses: ./.github/actions/build-component-multi-arch + with: + github_event_name: ${{ github.event.workflow_run.event }} + container_name: ${{ matrix.akri-component }} + container_prefix: ghcr.io/deislabs/akri + container_registry_base_url: ghcr.io + container_registry_username: ${{ secrets.crUsername }} + container_registry_password: ${{ secrets.crPassword }} + makefile_component_name: ${{ matrix.akri-component }} diff --git a/.github/workflows/build-onvif-video-broker-container.yml b/.github/workflows/build-onvif-video-broker-container.yml index 0f69d1afd..e0653a50d 100644 --- a/.github/workflows/build-onvif-video-broker-container.yml +++ b/.github/workflows/build-onvif-video-broker-container.yml @@ -11,6 +11,7 @@ on: - samples/brokers/onvif-video-broker/** - version.txt - build/akri-containers.mk + - build/akri-dotnet-containers.mk - Makefile pull_request: branches: [ main ] @@ -22,6 +23,7 @@ on: - samples/brokers/onvif-video-broker/** - version.txt - build/akri-containers.mk + - build/akri-dotnet-containers.mk - Makefile release: types: diff --git a/.github/workflows/build-opcua-monitoring-broker-container.yml b/.github/workflows/build-opcua-monitoring-broker-container.yml index ce2edbebf..690888bf8 100644 --- a/.github/workflows/build-opcua-monitoring-broker-container.yml +++ b/.github/workflows/build-opcua-monitoring-broker-container.yml @@ -11,6 +11,7 @@ on: - samples/brokers/opcua-monitoring-broker/** - version.txt - build/akri-containers.mk + - build/akri-dotnet-containers.mk - Makefile pull_request: branches: [ main ] @@ -22,6 +23,7 @@ on: - samples/brokers/opcua-monitoring-broker/** - version.txt - build/akri-containers.mk + - build/akri-dotnet-containers.mk - Makefile release: types: @@ -72,7 +74,7 @@ jobs: container_registry_password: ${{ secrets.crPassword }} makefile_component_name: ${{ env.MAKEFILE_COMPONENT }} platform: ${{ matrix.arch }} - build_rust: "1" + build_rust: "0" multi-arch: if: (github.event_name == 'release') || (github.event_name == 'push' && github.ref == 'refs/heads/main') || (startsWith(github.event_name, 'pull_request') && github.event.action == 'closed' && github.event.pull_request.merged == true && github.ref != 'refs/heads/main') diff --git a/.github/workflows/build-rust-code.yml b/.github/workflows/build-rust-code.yml new file mode 100644 index 000000000..5fd394948 --- /dev/null +++ b/.github/workflows/build-rust-code.yml @@ -0,0 +1,103 @@ +name: Build Production Rust Code + +on: + push: + branches: [ main ] + paths: + - .github/actions/build-component-per-arch/** + - .github/actions/build-component-multi-arch/** + - .github/workflows/build-rust-code.yml + - .github/workflows/build-agent-container.yml + - .github/workflows/build-controller-container.yml + - .github/workflows/build-udev-video-broker-container.yml + - .github/workflows/build-webhook-configuration-container.yml + - build/containers/Dockerfile.agent + - build/containers/Dockerfile.controller + - build/containers/Dockerfile.udev-video-broker + - build/containers/Dockerfile.webhook-configuration + - '**.rs' + - '**/Cargo.toml' + - '**/Cargo.lock' + - version.txt + - build/akri-containers.mk + - build/akri-rust-containers.mk + - Makefile + pull_request: + branches: [ main ] + paths: + - .github/actions/build-component-per-arch/** + - .github/actions/build-component-multi-arch/** + - .github/workflows/build-rust-code.yml + - .github/workflows/build-agent-container.yml + - .github/workflows/build-controller-container.yml + - .github/workflows/build-udev-video-broker-container.yml + - .github/workflows/build-webhook-configuration-container.yml + - build/containers/Dockerfile.agent + - build/containers/Dockerfile.controller + - build/containers/Dockerfile.udev-video-broker + - build/containers/Dockerfile.webhook-configuration + - '**.rs' + - '**/Cargo.toml' + - '**/Cargo.lock' + - version.txt + - build/akri-containers.mk + - build/akri-rust-containers.mk + - Makefile + release: + types: + - published + +jobs: + + build-rust: + runs-on: ubuntu-latest + timeout-minutes: 50 + strategy: + matrix: + arch: + - label: arm64v8 + make-target: arm64 + rust-target-path: aarch64-unknown-linux-gnu + - label: arm32v7 + make-target: arm32 + rust-target-path: armv7-unknown-linux-gnueabihf + - label: amd64 + make-target: amd64 + rust-target-path: x86_64-unknown-linux-gnu + + steps: + - name: Checkout the head commit of the branch + uses: actions/checkout@v2 + with: + persist-credentials: false + + - name: Rust install + uses: actions-rs/toolchain@v1 + with: + toolchain: 1.51.0 + override: true + components: clippy, rustfmt + + - name: Build production rust for ${{ matrix.arch.label }} + env: + AGENT_FEATURES: "agent-full onvif-feat opcua-feat udev-feat" + run: | + docker run --rm --privileged multiarch/qemu-user-static --reset -p yes + make install-cross + cross --version + make akri-cross-build-${{ matrix.arch.make-target }} + + - name: Package build binaries + run: | + tar_manifest='/tmp/tar-contents.txt' + > $tar_manifest + for f in target/${{ matrix.arch.rust-target-path }}/release/*; do filetype=$( file "$f" ); case "$filetype" in *ELF*) echo "$f" >> $tar_manifest ;; esac; done + tar -cvf /tmp/rust-${{ matrix.arch.label }}-binaries.tar `cat $tar_manifest` + + - name: Upload target binaries as artifact + uses: actions/upload-artifact@v2 + with: + name: rust-${{ matrix.arch.label }}-binaries + path: /tmp/rust-${{ matrix.arch.label }}-binaries.tar + retention-days: 1 + diff --git a/.github/workflows/build-udev-video-broker-container.yml b/.github/workflows/build-udev-video-broker-container.yml index e2e29a932..7d610124f 100644 --- a/.github/workflows/build-udev-video-broker-container.yml +++ b/.github/workflows/build-udev-video-broker-container.yml @@ -1,33 +1,11 @@ name: Build UDEV Broker on: - push: - branches: [ main ] - paths: - - .github/actions/build-component-per-arch/** - - .github/actions/build-component-multi-arch/** - - .github/workflows/build-udev-video-broker-container.yml - - build/containers/Dockerfile.udev-video-broker - - samples/brokers/udev-video-broker/** - - shared/** - - version.txt - - build/akri-containers.mk - - Makefile - pull_request: - branches: [ main ] - paths: - - .github/actions/build-component-per-arch/** - - .github/actions/build-component-multi-arch/** - - .github/workflows/build-udev-video-broker-container.yml - - build/containers/Dockerfile.udev-video-broker - - samples/brokers/udev-video-broker/** - - shared/** - - version.txt - - build/akri-containers.mk - - Makefile - release: - types: - - published + workflow_run: + workflows: ["Build Production Rust Code"] + branches: [main] + types: + - completed env: AKRI_COMPONENT: udev-video-broker @@ -36,6 +14,7 @@ env: jobs: per-arch: + if: ${{ github.event.workflow_run.conclusion == 'success' }} runs-on: ubuntu-latest timeout-minutes: 40 strategy: @@ -60,13 +39,24 @@ jobs: yarn install yarn add @actions/core @actions/github @actions/exec fs + - name: Download rust build artifacts + uses: dawidd6/action-download-artifact@v2 + with: + workflow: ${{ github.event.workflow_run.workflow_id }} + workflow_conclusion: success + commit: ${{ github.event.workflow_run.head_sha }} + name: rust-${{ matrix.arch }}-binaries + path: /tmp + + - name: Unpack Rust binaries + run: | + tar -xvf /tmp/rust-${{ matrix.arch }}-binaries.tar + - name: Run Per-Arch component build for ${{ env.AKRI_COMPONENT }} uses: ./.github/actions/build-component-per-arch with: - github_event_name: ${{ github.event_name }} + github_event_name: ${{ github.event.workflow_run.event }} github_ref: ${{ github.ref }} - github_event_action: ${{ github.event.action }} - github_merged: ${{ github.event.pull_request.merged }} container_name: ${{ env.AKRI_COMPONENT }} container_prefix: ghcr.io/deislabs/akri container_registry_base_url: ghcr.io @@ -74,10 +64,10 @@ jobs: container_registry_password: ${{ secrets.crPassword }} makefile_component_name: ${{ env.MAKEFILE_COMPONENT }} platform: ${{ matrix.arch }} - build_rust: "1" + build_rust: "0" multi-arch: - if: (github.event_name == 'release') || (github.event_name == 'push' && github.ref == 'refs/heads/main') || (startsWith(github.event_name, 'pull_request') && github.event.action == 'closed' && github.event.pull_request.merged == true && github.ref != 'refs/heads/main') + if: ${{ github.event.workflow_run.conclusion == 'success' }} && ((github.event.workflow_run.event == 'release') || (github.event.workflow_run.event == 'push' && github.ref == 'refs/heads/main')) needs: per-arch runs-on: ubuntu-latest timeout-minutes: 5 @@ -100,7 +90,7 @@ jobs: - name: Run Multi-Arch component build for ${{ env.AKRI_COMPONENT }} uses: ./.github/actions/build-component-multi-arch with: - github_event_name: ${{ github.event_name }} + github_event_name: ${{ github.event.workflow_run.event }} container_name: ${{ env.AKRI_COMPONENT }} container_prefix: ghcr.io/deislabs/akri container_registry_base_url: ghcr.io diff --git a/.github/workflows/build-video-streaming-app-container.yml b/.github/workflows/build-video-streaming-app-container.yml index a0d44872b..86c01c129 100644 --- a/.github/workflows/build-video-streaming-app-container.yml +++ b/.github/workflows/build-video-streaming-app-container.yml @@ -11,6 +11,7 @@ on: - samples/apps/video-streaming-app/** - version.txt - build/akri-containers.mk + - build/akri-python-containers.mk - Makefile pull_request: branches: [ main ] @@ -22,6 +23,7 @@ on: - samples/apps/video-streaming-app/** - version.txt - build/akri-containers.mk + - build/akri-python-containers.mk - Makefile release: types: diff --git a/.github/workflows/build-webhook-configuration-container.yml b/.github/workflows/build-webhook-configuration-container.yml new file mode 100644 index 000000000..9704a8908 --- /dev/null +++ b/.github/workflows/build-webhook-configuration-container.yml @@ -0,0 +1,96 @@ +name: Build Webhook Configuration + +on: + workflow_run: + workflows: ["Build Production Rust Code"] + branches: [main] + types: + - completed + +env: + AKRI_COMPONENT: webhook-configuration + MAKEFILE_COMPONENT: webhook-configuration + +jobs: + per-arch: + if: ${{ github.event.workflow_run.conclusion == 'success' }} + runs-on: ubuntu-latest + timeout-minutes: 60 + strategy: + matrix: + arch: + - arm64v8 + - arm32v7 + - amd64 + + steps: + - name: Checkout the head commit of the branch + uses: actions/checkout@v2 + with: + persist-credentials: false + + - name: Prepare To Install + uses: actions/setup-node@v1 + with: + node-version: 12 + - name: Install Deps + run: | + yarn install + yarn add @actions/core @actions/github @actions/exec fs + + - name: Download rust build artifacts + uses: dawidd6/action-download-artifact@v2 + with: + workflow: ${{ github.event.workflow_run.workflow_id }} + workflow_conclusion: success + commit: ${{ github.event.workflow_run.head_sha }} + name: rust-${{ matrix.arch }}-binaries + path: /tmp + + - name: Unpack Rust binaries + run: | + tar -xvf /tmp/rust-${{ matrix.arch }}-binaries.tar + + - name: Run Per-Arch component build for ${{ env.AKRI_COMPONENT }} + uses: ./.github/actions/build-component-per-arch + with: + github_event_name: ${{ github.event.workflow_run.event }} + github_ref: ${{ github.ref }} + container_name: ${{ env.AKRI_COMPONENT }} + container_prefix: ghcr.io/deislabs/akri + container_registry_base_url: ghcr.io + container_registry_username: ${{ secrets.crUsername }} + container_registry_password: ${{ secrets.crPassword }} + makefile_component_name: ${{ env.MAKEFILE_COMPONENT }} + platform: ${{ matrix.arch }} + build_rust: "0" + + multi-arch: + if: ${{ github.event.workflow_run.conclusion == 'success' }} && ((github.event.workflow_run.event == 'release') || (github.event.workflow_run.event == 'push' && github.ref == 'refs/heads/main')) + needs: per-arch + runs-on: ubuntu-latest + timeout-minutes: 60 + + steps: + - name: Checkout repo + uses: actions/checkout@v2 + + - name: Prepare To Install + uses: actions/setup-node@v1 + with: + node-version: 12 + - name: Install Deps + run: | + yarn install + yarn add @actions/core @actions/github @actions/exec fs + + - name: Run Multi-Arch component build for ${{ env.AKRI_COMPONENT }} + uses: ./.github/actions/build-component-multi-arch + with: + github_event_name: ${{ github.event.workflow_run.event }} + container_name: ${{ env.AKRI_COMPONENT }} + container_prefix: ghcr.io/deislabs/akri + container_registry_base_url: ghcr.io + container_registry_username: ${{ secrets.crUsername }} + container_registry_password: ${{ secrets.crPassword }} + makefile_component_name: ${{ env.MAKEFILE_COMPONENT }} diff --git a/.github/workflows/cancel-previous-pr-workflows.yml b/.github/workflows/cancel-previous-pr-workflows.yml new file mode 100644 index 000000000..e5e5bc1d3 --- /dev/null +++ b/.github/workflows/cancel-previous-pr-workflows.yml @@ -0,0 +1,14 @@ +name: Cancel Previous PR Workflows +on: + workflow_run: + workflows: ["Dependencies autoupdate", "Build Agents", "Build Anomaly Detection App Container", "Build Controller", "Build ONVIF Broker (.NET)", "Build OPC UA Monitoring Broker", "Build OpenCV Base", "Build Production Rust Code", "Build Rust CrossBuild", "Build UDEV Broker", "Build Video Streaming App Container", "Build Webhook Configuration", "Check Rust", "Check versioning", "Helm", "Tarpaulin Code Coverage", "Test K3s, Kubernetes, and MicroK8s"] + types: + - requested +jobs: + cancel: + runs-on: ubuntu-latest + if: github.event.workflow_run.event == 'pull_request' + steps: + - uses: styfle/cancel-workflow-action@0.8.0 + with: + workflow_id: ${{ github.event.workflow.id }} \ No newline at end of file diff --git a/.github/workflows/check-rust.yml b/.github/workflows/check-rust.yml index 8b0a026de..8c06b7b26 100644 --- a/.github/workflows/check-rust.yml +++ b/.github/workflows/check-rust.yml @@ -22,7 +22,7 @@ env: jobs: build: runs-on: ubuntu-latest - timeout-minutes: 20 + timeout-minutes: 30 steps: - name: Checkout the head commit of the branch @@ -33,7 +33,7 @@ jobs: - name: Rust install uses: actions-rs/toolchain@v1 with: - toolchain: 1.41.0 + toolchain: 1.51.0 override: true components: clippy, rustfmt - name: Install Linux requirements diff --git a/.github/workflows/run-helm.yml b/.github/workflows/run-helm.yml index ffb76df8a..15b56f1d8 100644 --- a/.github/workflows/run-helm.yml +++ b/.github/workflows/run-helm.yml @@ -34,7 +34,7 @@ jobs: - uses: azure/setup-helm@v1 - name: Lint helm chart - run: helm lint deployment/helm && echo "lint finished successfully" || echo ::warning::"lint found issues" + run: helm lint deployment/helm --values test/helm-lint-values.yaml && echo "lint finished successfully" || echo ::warning::"lint found issues" helm: @@ -68,7 +68,7 @@ jobs: sed -i s/"useDevelopmentContainers: true"/"useDevelopmentContainers: true"/g ./deployment/helm/values.yaml - name: Lint helm chart - run: helm lint deployment/helm + run: helm lint deployment/helm --values test/helm-lint-values.yaml && echo "lint finished successfully" || echo ::warning::"lint found issues" - name: Cache version.txt run: | diff --git a/.github/workflows/run-tarpaulin.yml b/.github/workflows/run-tarpaulin.yml index ab346a363..1a5e4bdb8 100644 --- a/.github/workflows/run-tarpaulin.yml +++ b/.github/workflows/run-tarpaulin.yml @@ -20,7 +20,14 @@ env: jobs: build: runs-on: ubuntu-latest - timeout-minutes: 20 + # There is a second, hidden timeout in this workflow. When the tarpaulin container is created, + # it is created with a CMD that sleeps for 600 minutes. A more reasonable value could be selected, + # but it seems easier to make it SOOOO big that timeout-minutes is likely to never be impacted by + # it. + # + # But, if this workflow is mysteriously timing out after 600 minutes, make changes to the docker + # create command in the Create tarpaulin instance step. + timeout-minutes: 30 steps: - name: Checkout the head commit of the branch @@ -29,7 +36,7 @@ jobs: persist-credentials: false - name: Create tarpaulin instance - run: docker create --network host --security-opt seccomp=unconfined -v "${PWD}:/volume" xd009642/tarpaulin:0.12.2 bash -c "echo 'sleep 20m; echo bye' > /tmp/keep_alive.sh; chmod 777 /tmp/keep_alive.sh; /tmp/keep_alive.sh" > container_id.txt + run: docker create --network host --security-opt seccomp=unconfined -v "${PWD}:/volume" xd009642/tarpaulin:0.16.0 bash -c "echo 'sleep 600m; echo bye' > /tmp/keep_alive.sh; chmod 777 /tmp/keep_alive.sh; /tmp/keep_alive.sh" > container_id.txt - name: Start tarpaulin instance run: docker start $(cat container_id.txt) - name: Install linux requirement in tarpaulin instance diff --git a/.github/workflows/run-test-cases.yml b/.github/workflows/run-test-cases.yml index 5146cf700..4d999bc7e 100644 --- a/.github/workflows/run-test-cases.yml +++ b/.github/workflows/run-test-cases.yml @@ -4,286 +4,349 @@ on: workflow_dispatch: inputs: pull_request: - branches: [ main ] + branches: [main] paths: - - test/run-end-to-end.py - - test/run-conservation-of-broker-pod.py - - test/run-helm-install-delete.py - - test/shared_test_code.py - - .github/workflows/run-test-cases.yml - - build/containers/Dockerfile.agent - - build/containers/Dockerfile.controller - - deployment/helm/** - - agent/** - - controller/** - - shared/** - - version.txt - - build/akri-containers.mk - - Makefile + - test/run-end-to-end.py + - test/run-conservation-of-broker-pod.py + - test/run-helm-install-delete.py + - test/run-webhook.py + - test/shared_test_code.py + - .github/workflows/run-test-cases.yml + - build/containers/Dockerfile.agent + - build/containers/Dockerfile.controller + - deployment/helm/** + - agent/** + - controller/** + - shared/** + - version.txt + - build/akri-containers.mk + - Makefile push: - branches: [ main ] + branches: [main] paths: - - test/run-end-to-end.py - - test/run-conservation-of-broker-pod.py - - test/run-helm-install-delete.py - - test/shared_test_code.py - - .github/workflows/run-test-cases.yml - - build/containers/Dockerfile.agent - - build/containers/Dockerfile.controller - - deployment/helm/** - - agent/** - - controller/** - - shared/** - - version.txt - - build/akri-containers.mk - - Makefile + - test/run-end-to-end.py + - test/run-conservation-of-broker-pod.py + - test/run-helm-install-delete.py + - test/run-webhook.py + - test/shared_test_code.py + - .github/workflows/run-test-cases.yml + - build/containers/Dockerfile.agent + - build/containers/Dockerfile.controller + - deployment/helm/** + - agent/** + - controller/** + - shared/** + - version.txt + - build/akri-containers.mk + - Makefile release: types: - published - + jobs: build-containers: runs-on: ubuntu-18.04 - timeout-minutes: 35 + timeout-minutes: 60 steps: - - name: Checkout the head commit of the branch - uses: actions/checkout@v2 - with: - persist-credentials: false + - name: Checkout the head commit of the branch + uses: actions/checkout@v2 + with: + persist-credentials: false - - name: Build local containers for PR tests - if: startsWith(github.event_name, 'pull_request') - env: - BUILD_AMD64: 1 - BUILD_ARM32: 0 - BUILD_ARM64: 0 - PREFIX: ghcr.io/deislabs/akri - LABEL_PREFIX: pr - run: | - make akri-build - make controller-build-amd64 - make agent-build-amd64 - docker save ${PREFIX}/agent:${LABEL_PREFIX}-amd64 > agent.tar - docker save ${PREFIX}/controller:${LABEL_PREFIX}-amd64 > controller.tar + - name: Build local containers for PR tests + if: startsWith(github.event_name, 'pull_request') + env: + BUILD_AMD64: 1 + BUILD_ARM32: 0 + BUILD_ARM64: 0 + BUILD_SLIM_AGENT: 0 + AGENT_FEATURES: "agent-full" + PACKAGES_TO_EXCLUDE: "akri-udev akri-onvif akri-opcua udev-video-broker debug-echo-discovery-handler onvif-discovery-handler opcua-discovery-handler udev-discovery-handler" + PREFIX: ghcr.io/deislabs/akri + LABEL_PREFIX: pr + CARGO_INCREMENTAL: 0 + run: | + make akri-build + make controller-build-amd64 + make agent-full-build-amd64 + make webhook-configuration-build-amd64 + docker save ${PREFIX}/agent-full:${LABEL_PREFIX}-amd64 > agent.tar + docker save ${PREFIX}/controller:${LABEL_PREFIX}-amd64 > controller.tar + docker save ${PREFIX}/webhook-configuration:${LABEL_PREFIX}-amd64 > webhook-configuration.tar - - name: Upload Agent container as artifact - if: startsWith(github.event_name, 'pull_request') - uses: actions/upload-artifact@v2 - with: - name: agent.tar - path: agent.tar - - name: Upload Controller container as artifact - if: startsWith(github.event_name, 'pull_request') - uses: actions/upload-artifact@v2 - with: - name: controller.tar - path: controller.tar + - name: Upload Agent container as artifact + if: startsWith(github.event_name, 'pull_request') + uses: actions/upload-artifact@v2 + with: + name: agent.tar + path: agent.tar + - name: Upload Controller container as artifact + if: startsWith(github.event_name, 'pull_request') + uses: actions/upload-artifact@v2 + with: + name: controller.tar + path: controller.tar + - name: Upload Webhook-Configuration container as artifact + if: startsWith(github.event_name, 'pull_request') + uses: actions/upload-artifact@v2 + with: + name: webhook-configuration.tar + path: webhook-configuration.tar test-cases: needs: build-containers runs-on: ubuntu-18.04 - timeout-minutes: 35 + timeout-minutes: 60 strategy: fail-fast: false matrix: - kube-runtime: - - K3s-1.18 - - K3s-1.19 - - MicroK8s-1.18 - - MicroK8s-1.19 - - Kubernetes-1.16 - - Kubernetes-1.17 - - Kubernetes-1.18 - - Kubernetes-1.19 - test-case: - - end-to-end - include: - - kube-runtime: MicroK8s-1.18 - kube-version: 1.18/stable - - kube-runtime: MicroK8s-1.19 - kube-version: 1.19/stable - - kube-runtime: K3s-1.18 - kube-version: v1.18.9+k3s1 - - kube-runtime: K3s-1.19 - kube-version: v1.19.4+k3s1 - - kube-runtime: Kubernetes-1.16 - kube-version: 1.16.15-00 - - kube-runtime: Kubernetes-1.17 - kube-version: 1.17.14-00 - - kube-runtime: Kubernetes-1.18 - kube-version: 1.18.12-00 - - kube-runtime: Kubernetes-1.19 - kube-version: 1.19.4-00 - - test-case: end-to-end - test-file: test/run-end-to-end.py + kube: + - runtime: MicroK8s-1.16 + version: 1.16/stable + crictl: v1.16.0 + - runtime: MicroK8s-1.17 + version: 1.17/stable + crictl: v1.16.0 + - runtime: MicroK8s-1.18 + version: 1.18/stable + crictl: v1.17.0 + - runtime: MicroK8s-1.19 + version: 1.19/stable + crictl: v1.17.0 + - runtime: MicroK8s-1.20 + version: 1.20/stable + crictl: v1.17.0 + - runtime: MicroK8s-1.21 + version: 1.21/stable + crictl: v1.17.0 + - runtime: K3s-1.16 + version: v1.16.14+k3s1 + crictl: v1.16.0 + - runtime: K3s-1.17 + version: v1.17.17+k3s1 + crictl: v1.16.0 + - runtime: K3s-1.18 + version: v1.18.9+k3s1 + crictl: v1.17.0 + - runtime: K3s-1.19 + version: v1.19.10+k3s1 + crictl: v1.17.0 + - runtime: K3s-1.20 + version: v1.20.6+k3s1 + crictl: v1.17.0 + - runtime: Kubernetes-1.16 + version: 1.16.15-00 + crictl: UNUSED + - runtime: Kubernetes-1.17 + version: 1.17.14-00 + crictl: UNUSED + - runtime: Kubernetes-1.18 + version: 1.18.12-00 + crictl: UNUSED + - runtime: Kubernetes-1.19 + version: 1.19.4-00 + crictl: UNUSED + - runtime: Kubernetes-1.20 + version: 1.20.1-00 + crictl: UNUSED + - runtime: Kubernetes-1.21 + version: 1.21.0-00 + crictl: UNUSED + test: + - case: end-to-end + file: test/run-end-to-end.py + - case: webhook + file: test/run-webhook.py steps: - - name: Checkout the head commit of the branch - uses: actions/checkout@v2 - with: - persist-credentials: false + - name: Checkout the head commit of the branch + uses: actions/checkout@v2 + with: + persist-credentials: false + + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: 3.8 + - name: Install Python kubernetes dependency + run: | + python -m pip install --upgrade pip + pip install kubernetes - - name: Setup Python - uses: actions/setup-python@v2 - with: - python-version: 3.8 - - name: Install Python kubernetes dependency - run: | - python -m pip install --upgrade pip - pip install kubernetes + - name: Download Agent container artifact + if: startsWith(github.event_name, 'pull_request') + uses: actions/download-artifact@v2 + with: + name: agent.tar + - name: Download Controller container artifact + if: startsWith(github.event_name, 'pull_request') + uses: actions/download-artifact@v2 + with: + name: controller.tar + - name: Download Webhook-Configuration container artifact + if: startsWith(github.event_name, 'pull_request') + uses: actions/download-artifact@v2 + with: + name: webhook-configuration.tar - - name: Download Agent container artifact - if: startsWith(github.event_name, 'pull_request') - uses: actions/download-artifact@v2 - with: - name: agent.tar - - name: Download Controller container artifact - if: startsWith(github.event_name, 'pull_request') - uses: actions/download-artifact@v2 - with: - name: controller.tar + - if: startsWith(matrix.kube.runtime, 'K3s') + name: Install K3s + env: + INSTALL_K3S_VERSION: ${{ matrix.kube.version }} + run: | + sudo curl -sfL https://get.k3s.io -o install.sh + sudo chmod +x install.sh + ./install.sh server --kubelet-arg=eviction-hard="imagefs.available<1%,nodefs.available<1%" --kubelet-arg=eviction-minimum-reclaim="imagefs.available=1%,nodefs.available=1%" + sudo addgroup k3s-admin + sudo adduser $USER k3s-admin + sudo usermod -a -G k3s-admin $USER + sudo chgrp k3s-admin /etc/rancher/k3s/k3s.yaml + sudo chmod g+r /etc/rancher/k3s/k3s.yaml + sudo chmod 666 /etc/rancher/k3s/* + mkdir -p ~/.kube/ && cp /etc/rancher/k3s/k3s.yaml ~/.kube/config + curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/${{ matrix.kube.crictl }}/crictl-${{ matrix.kube.crictl }}-linux-amd64.tar.gz --output crictl-${{ matrix.kube.crictl }}-linux-amd64.tar.gz + sudo tar zxvf crictl-${{ matrix.kube.crictl }}-linux-amd64.tar.gz -C /usr/local/bin + rm -f crictl-${{ matrix.kube.crictl }}-linux-amd64.tar.gz + echo "--set agent.host.crictl=/usr/local/bin/crictl --set agent.host.dockerShimSock=/run/k3s/containerd/containerd.sock" > /tmp/cri_args_to_test.txt + echo 'kubectl' > /tmp/runtime_cmd_to_test.txt + echo '~/.kube/config' > /tmp/kubeconfig_path_to_test.txt + until kubectl get node ${HOSTNAME,,} -o jsonpath='{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status}' | grep 'Ready=True'; do echo "waiting for k3s to become ready"; sleep 10; done + if [ "${{ matrix.kube.runtime }}" == "K3s-1.16" ]; then + mkdir -p /var/lib/kubelet + if [ -d /var/lib/kubelet/device-plugins ]; then + sudo rm -rf /var/lib/kubelet/device-plugins + fi + sudo ln -s /var/lib/rancher/k3s/agent/kubelet/device-plugins /var/lib/kubelet/device-plugins + fi - - if: startsWith(matrix.kube-runtime, 'K3s') - name: Install K3s - env: - INSTALL_K3S_VERSION: ${{ matrix.kube-version }} - run: | - sudo curl -sfL https://get.k3s.io | sh - - sudo addgroup k3s-admin - sudo adduser $USER k3s-admin - sudo usermod -a -G k3s-admin $USER - sudo chgrp k3s-admin /etc/rancher/k3s/k3s.yaml - sudo chmod g+r /etc/rancher/k3s/k3s.yaml - sudo chmod 666 /etc/rancher/k3s/* - mkdir -p ~/.kube/ && cp /etc/rancher/k3s/k3s.yaml ~/.kube/config - VERSION="v1.17.0" - curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-${VERSION}-linux-amd64.tar.gz --output crictl-${VERSION}-linux-amd64.tar.gz - sudo tar zxvf crictl-$VERSION-linux-amd64.tar.gz -C $(pwd) - rm -f crictl-$VERSION-linux-amd64.tar.gz - echo '--set agent.host.crictl=$(pwd)/crictl --set agent.host.dockerShimSock=/run/k3s/containerd/containerd.sock' > /tmp/cri_args_to_test.txt - echo 'kubectl' > /tmp/runtime_cmd_to_test.txt - echo '~/.kube/config' > /tmp/kubeconfig_path_to_test.txt - until kubectl get node ${HOSTNAME,,} -o jsonpath='{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status}' | grep 'Ready=True'; do echo "waiting for k3s to become ready"; sleep 10; done + - if: (startsWith(github.event_name, 'pull_request')) && (startsWith(matrix.kube.runtime, 'K3s')) + name: Import local agent and controller to K3s + run: | + sudo k3s ctr image import agent.tar + sudo k3s ctr image import controller.tar + sudo k3s ctr image import webhook-configuration.tar - - if: (startsWith(github.event_name, 'pull_request')) && (startsWith(matrix.kube-runtime, 'K3s')) - name: Import local agent and controller to K3s - run: | - sudo k3s ctr image import agent.tar - sudo k3s ctr image import controller.tar + - if: startsWith(matrix.kube.runtime, 'Kubernetes') + name: Install Kubernetes + run: | + sudo apt-get update -y + sudo apt-get install -o Dpkg::Options::="--force-overwrite" -y --allow-downgrades kubelet=${{ matrix.kube.version }} kubeadm=${{ matrix.kube.version }} kubectl=${{ matrix.kube.version }} + kubectl version && echo "kubectl return code: $?" || echo "kubectl return code: $?" + kubeadm version && echo "kubeadm return code: $?" || echo "kubeadm return code: $?" + kubelet --version && echo "kubelet return code: $?" || echo "kubelet return code: $?" + sudo swapoff -a + sudo kubeadm init + sudo mkdir -p $HOME/.kube + sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config + sudo chown $(id -u):$(id -g) $HOME/.kube/config + kubectl taint nodes --all node-role.kubernetes.io/master- + echo '--set agent.host.crictl=/usr/bin/crictl --set agent.host.dockerShimSock=/var/run/dockershim.sock' > /tmp/cri_args_to_test.txt + echo 'kubectl' > /tmp/runtime_cmd_to_test.txt + echo '~/.kube/config' > /tmp/kubeconfig_path_to_test.txt + until kubectl get node ${HOSTNAME,,} -o jsonpath='{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status}' | grep 'Ready=True'; do echo "waiting for kubernetes to become ready"; sleep 10; done - - if: startsWith(matrix.kube-runtime, 'Kubernetes') - name: Install Kubernetes - run: | - sudo apt-get update -y - sudo apt-get install -y --allow-downgrades kubelet=${{ matrix.kube-version }} kubeadm=${{ matrix.kube-version }} kubectl=${{ matrix.kube-version }} - kubectl version && echo "kubectl return code: $?" || echo "kubectl return code: $?" - kubeadm version && echo "kubeadm return code: $?" || echo "kubeadm return code: $?" - kubelet --version && echo "kubelet return code: $?" || echo "kubelet return code: $?" - sudo swapoff -a - sudo kubeadm init - sudo mkdir -p $HOME/.kube - sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config - sudo chown $(id -u):$(id -g) $HOME/.kube/config - kubectl taint nodes --all node-role.kubernetes.io/master- - echo '--set agent.host.crictl=/usr/bin/crictl --set agent.host.dockerShimSock=/var/run/dockershim.sock' > /tmp/cri_args_to_test.txt - echo 'kubectl' > /tmp/runtime_cmd_to_test.txt - echo '~/.kube/config' > /tmp/kubeconfig_path_to_test.txt - until kubectl get node ${HOSTNAME,,} -o jsonpath='{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status}' | grep 'Ready=True'; do echo "waiting for kubernetes to become ready"; sleep 10; done + - if: (startsWith(github.event_name, 'pull_request')) && (startsWith(matrix.kube.runtime, 'Kubernetes')) + name: Import local agent and controller to Kubernetes + run: | + sudo docker load --input agent.tar + sudo docker load --input controller.tar + sudo docker load --input webhook-configuration.tar - - if: (startsWith(github.event_name, 'pull_request')) && (startsWith(matrix.kube-runtime, 'Kubernetes')) - name: Import local agent and controller to Kubernetes - run: | - sudo docker load --input agent.tar - sudo docker load --input controller.tar + - if: startsWith(matrix.kube.runtime, 'MicroK8s') + name: Install MicroK8s + run: | + set -x + sudo snap install microk8s --classic --channel=${{ matrix.kube.version }} + sudo microk8s status --wait-ready + sudo usermod -a -G microk8s $USER + sudo ls -la $HOME/.kube + echo sudo chown $(id -u):$(id -g) $HOME/.kube + sudo chown -f -R $USER $HOME/.kube --verbose + sudo sh -c "microk8s.kubectl config view --raw >~/.kube/config" + sudo cat ~/.kube/config + sudo microk8s.enable rbac dns + sudo sed -i 's/memory.available<100Mi,nodefs.available<1Gi,imagefs.available<1Gi/memory.available<25Mi,nodefs.available<50Mi,imagefs.available<50Mi/' /var/snap/microk8s/current/args/kubelet + sudo systemctl restart snap.microk8s.daemon-kubelet + until sudo microk8s.status --wait-ready; do sleep 5s; echo "Try again"; done + curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/${{ matrix.kube.crictl }}/crictl-${{ matrix.kube.crictl }}-linux-amd64.tar.gz --output crictl-${{ matrix.kube.crictl }}-linux-amd64.tar.gz + sudo tar zxvf crictl-${{ matrix.kube.crictl }}-linux-amd64.tar.gz -C /usr/local/bin + rm -f crictl-${{ matrix.kube.crictl }}-linux-amd64.tar.gz + echo '--set agent.host.crictl=/usr/local/bin/crictl --set agent.host.dockerShimSock=/var/snap/microk8s/common/run/containerd.sock' > /tmp/cri_args_to_test.txt + echo 'microk8s kubectl' > /tmp/runtime_cmd_to_test.txt + echo '~/.kube/config' > /tmp/kubeconfig_path_to_test.txt - - if: startsWith(matrix.kube-runtime, 'MicroK8s') - name: Install MicroK8s - run: | - sudo snap install microk8s --classic --channel=${{ matrix.kube-version }} - sudo microk8s status --wait-ready - sudo usermod -a -G microk8s $USER - sudo ls -la $HOME/.kube - echo sudo chown $(id -u):$(id -g) $HOME/.kube - sudo chown -f -R $USER $HOME/.kube --verbose - sudo sh -c "microk8s.kubectl config view --raw >~/.kube/config" - sudo cat ~/.kube/config - sudo microk8s.enable helm3 - sudo microk8s.enable rbac - sudo microk8s.enable dns - sudo microk8s.status --wait-ready - VERSION="v1.17.0" - curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-${VERSION}-linux-amd64.tar.gz --output crictl-${VERSION}-linux-amd64.tar.gz - sudo tar zxvf crictl-$VERSION-linux-amd64.tar.gz -C /usr/local/bin - rm -f crictl-$VERSION-linux-amd64.tar.gz - echo '--set agent.host.crictl=/usr/local/bin/crictl --set agent.host.dockerShimSock=/var/snap/microk8s/common/run/containerd.sock' > /tmp/cri_args_to_test.txt - echo 'microk8s kubectl' > /tmp/runtime_cmd_to_test.txt - echo '~/.kube/config' > /tmp/kubeconfig_path_to_test.txt + - if: (startsWith(github.event_name, 'pull_request')) && (startsWith(matrix.kube.runtime, 'MicroK8s')) + name: Import local agent and controller to MicroK8s + run: | + sudo microk8s.status --wait-ready + until sudo microk8s ctr images ls; do sleep 5s; echo "Try again"; done + sudo microk8s ctr images ls + sudo microk8s ctr --debug --timeout 60s images import agent.tar + sudo microk8s ctr --debug --timeout 60s images import controller.tar + sudo microk8s ctr --debug --timeout 60s images import webhook-configuration.tar + sudo microk8s ctr images ls - - if: (startsWith(github.event_name, 'pull_request')) && (startsWith(matrix.kube-runtime, 'MicroK8s')) - name: Import local agent and controller to MicroK8s - run: | - sleep 15 # 60, 30, 15, and 5 all work in simple tests ... no sleep fails for 1.19.3 - sudo microk8s.status --wait-ready - sudo microk8s ctr images ls - sudo microk8s ctr --debug --timeout 10s images import agent.tar - sudo microk8s ctr --debug --timeout 10s images import controller.tar - sudo microk8s ctr images ls + - name: Add Akri Helm Chart + run: helm repo add akri-helm-charts https://deislabs.github.io/akri/ - - name: Add Akri Helm Chart - run: helm repo add akri-helm-charts https://deislabs.github.io/akri/ - - # For push and release, we need to wait for the Helm chart and - # associated containers to build. - - if: github.event_name == 'push' || github.event_name == 'release' - name: Set sleep duration before running script to 1500 - run: echo 1500 > /tmp/sleep_duration.txt + # For push and release, we need to wait for the Helm chart and + # associated containers to build. + - if: github.event_name == 'push' || github.event_name == 'release' + name: Set sleep duration before running script to 2700 + run: echo 2700 > /tmp/sleep_duration.txt - # For pull_request, use the locally built containers. - - if: startsWith(github.event_name, 'pull_request') - name: Tell Helm to use the 'local' labels for container images - run: | - git fetch origin main - git show origin/main:version.txt > /tmp/version_to_test.txt - echo '--set agent.image.pullPolicy=Never,agent.image.tag=pr-amd64,controller.image.pullPolicy=Never,controller.image.tag=pr-amd64' > /tmp/extra_helm_args.txt - # For non-PR (i.e. push, release, manual), version.txt is corresponds - # to an existing Helm chart. - - if: (!(startsWith(github.event_name, 'pull_request'))) - name: Use current version for push - run: cat version.txt > /tmp/version_to_test.txt + # For pull_request, use the locally built containers. + - if: startsWith(github.event_name, 'pull_request') + name: Tell Helm to use the 'local' labels for container images + run: | + git fetch origin main + git show origin/main:version.txt > /tmp/version_to_test.txt + echo '--set agent.image.pullPolicy=Never,agent.image.tag=pr-amd64,controller.image.pullPolicy=Never,controller.image.tag=pr-amd64,webhookConfiguration.image.pullPolicy=Never,webhookConfiguration.image.tag=pr-amd64' > /tmp/extra_helm_args.txt + # For non-PR (i.e. push, release, manual), version.txt is corresponds + # to an existing Helm chart. + - if: (!(startsWith(github.event_name, 'pull_request'))) + name: Use current version for push + run: cat version.txt > /tmp/version_to_test.txt - # For workflow_dispatch and pull_request, use the files in deployment/helm - # as basis for helm install ... this enables us to test any changes made to - # the helm chart files in a PR (where no helm chart is published) - - if: github.event_name != 'push' && github.event_name != 'release' - name: Tell Helm to use the files in deployment/helm to build chart - run: | - echo './deployment/helm' > /tmp/helm_chart_location.txt - # For push, use a specific version of the `akri-dev` charts that are built and - # published by the helm workflow. - - if: github.event_name == 'push' - name: Tell Helm to use the `akri-dev` published charts - run: | - echo "akri-helm-charts/akri-dev --version $(cat /tmp/version_to_test.txt)" > /tmp/helm_chart_location.txt - # For release, use a specific version of the `akri` charts that are built and - # published by the helm workflow. - - if: github.event_name == 'release' - name: Tell Helm to use the `akri` published charts - run: | - echo "akri-helm-charts/akri --version $(cat /tmp/version_to_test.txt)" > /tmp/helm_chart_location.txt + # For workflow_dispatch and pull_request, use the files in deployment/helm + # as basis for helm install ... this enables us to test any changes made to + # the helm chart files in a PR (where no helm chart is published) + - if: github.event_name != 'push' && github.event_name != 'release' + name: Tell Helm to use the files in deployment/helm to build chart + run: | + echo './deployment/helm' > /tmp/helm_chart_location.txt + # For push, use a specific version of the `akri-dev` charts that are built and + # published by the helm workflow. + - if: github.event_name == 'push' + name: Tell Helm to use the `akri-dev` published charts + run: | + echo "akri-helm-charts/akri-dev --version $(cat /tmp/version_to_test.txt)" > /tmp/helm_chart_location.txt + # For release, use a specific version of the `akri` charts that are built and + # published by the helm workflow. + - if: github.event_name == 'release' + name: Tell Helm to use the `akri` published charts + run: | + echo "akri-helm-charts/akri --version $(cat /tmp/version_to_test.txt)" > /tmp/helm_chart_location.txt - - name: Execute test script ${{ matrix.test-file }} - run: python ${{ matrix.test-file }} - - name: Upload Agent log as artifact - if: always() - uses: actions/upload-artifact@v2 - with: - name: ${{ matrix.kube-runtime }}-${{ matrix.test-case }}-agent-log - path: /tmp/agent_log.txt - - name: Upload controller log as artifact - if: always() - uses: actions/upload-artifact@v2 - with: - name: ${{ matrix.kube-runtime }}-${{ matrix.test-case }}-controller-log - path: /tmp/controller_log.txt + - name: Execute test script ${{ matrix.test.file }} + run: python ${{ matrix.test.file }} + - name: Upload Agent log as artifact + if: always() + uses: actions/upload-artifact@v2 + with: + name: ${{ matrix.kube.runtime }}-${{ matrix.test.case }}-agent-log + path: /tmp/agent_log.txt + - name: Upload controller log as artifact + if: always() + uses: actions/upload-artifact@v2 + with: + name: ${{ matrix.kube.runtime }}-${{ matrix.test.case }}-controller-log + path: /tmp/controller_log.txt + - name: Upload webhook log as artifact + if: always() + uses: actions/upload-artifact@v2 + with: + name: ${{ matrix.kube.runtime }}-${{ matrix.test.case }}-webhook-log + path: /tmp/webhook_log.txt diff --git a/.github/workflows/security-audit.yml b/.github/workflows/security-audit.yml new file mode 100644 index 000000000..efb05c301 --- /dev/null +++ b/.github/workflows/security-audit.yml @@ -0,0 +1,43 @@ +# This is a security audit workflow that runs security audit checks and send an email in case any vulnerabilities are detected. + +name: Security Audit +on: + schedule: + - cron: '0 0 * * *' #runs daily at 12:00 am UTC + +jobs: + security_audit: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v1 + - name: Run security audit check + id: cargo-audit + if: github.repository == 'deislabs/akri' # only run on main repo and not forks + continue-on-error: true + uses: actions-rs/audit-check@v1 + with: + # token is only used for creating the audit report and does not impact the + # functionality or success/failure of the job in case the token is unavailable + token: ${{ secrets.GITHUB_TOKEN }} + + # sends an email if security audit failed + - name: Send mail + if: steps.cargo-audit.outcome != 'success' && github.repository == 'deislabs/akri' # only run on main repo and not forks + uses: dawidd6/action-send-mail@v2 + with: + server_address: smtp-mail.outlook.com + server_port: 587 + username: ${{secrets.AKRI_BOT_EMAIL}} + password: ${{secrets.AKRI_BOT_PASSWORD}} + subject: "Security vulnerability detected in ${{github.repository}}" + body: |- + A security vulnerability was detected in one or more of Akri's dependencies. For more details, check the output of the [security audit workflow](https://github.com/${{github.repository}}/actions/runs/${{github.run_id}}) + + Hint: In most cases, running the [auto-update dependencies](https://github.com/deislabs/akri/actions/workflows/auto-update-dependencies.yml) workflow will fix the issue. + + -Your friendly Akri bot 🤖 + to: ${{secrets.AKRI_TEAM_EMAIL}} + from: ${{secrets.AKRI_BOT_EMAIL}} + content_type: text/html + convert_markdown: true + diff --git a/CHANGELOG.md b/CHANGELOG.md index 4ac27b876..1c3ee29b4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,128 @@ +# v0.6.5 + +## Announcing Akri v0.6.5! +Akri v0.6.5 is a pre-release of Akri. + +To find out more about Akri, check out our [README](https://github.com/deislabs/akri/blob/v0.6.5/README.md) and start [contributing](https://github.com/deislabs/akri/blob/v0.6.5/docs/contributing.md) today! + +## New Features +The v0.6.5 release introduces Akri's Logo, new features such as a new extensibility model for Discovery Handlers and a Configuration validating webhook, DevOps improvements, and more. + +**New Discovery Handler extensibility model** +* feat: Discovery Handlers now live behind a [gRPC interface](https://github.com/deislabs/akri/blob/v0.6.5/discovery-utils/proto/discovery.proto) (https://github.com/deislabs/akri/pull/252), so Discovery Handlers can be written in any language without forking Akri and working within its code. See the [Discovery Handler development document] to get started creating a Discovery Handler. +* feat: Support of both default "slim" and old "full" Agent images (https://github.com/deislabs/akri/pull/279). Prior to this release, the Agent contained udev, ONVIF, and OPC UA Discovery Handlers. As of this release, Akri is moving towards a default of having no embedded Discovery Handlers in the Agent; rather, the desired Discovery Handlers can be deployed separately using Akri's Helm chart. This decreases the attack surface of the Agent and will keep it from exponential growth as new Discovery Handlers are continually supported. Discovery Handlers written in Rust can be conditionally compiled into the Agent -- reference [the development documentation for more details](https://github.com/deislabs/akri/blob/v0.6.5/docs/development.md#local-builds-and-tests). For the time being, Akri will continue to support a an Agent image with udev, ONVIF, and OPC UA Discovery Handlers. It will be used if `agent.full=true` is set when installing Akri's Helm chart. +* feat: Updates to Akri's Helm charts with templates for Akri's Discovery Handlers and renaming of values to better fit the new model. + +DevOps improvements +* feat: Workflow to auto-update dependencies (https://github.com/deislabs/akri/pull/224) +* feat: Security audit workflow (https://github.com/deislabs/akri/pull/264) +* feat: Workflow for canceling previously running workflows on PRs, reducing environmental footprint and queuing of GitHub Actions (https://github.com/deislabs/akri/pull/284) +* feat: Build all rust components in one workflow instead of previous strategy for a workflow for each build (https://github.com/deislabs/akri/pull/270) +* fix: More exhaustive linting of Akri Helm charts (https://github.com/deislabs/akri/pull/306) + +Other enhancements +* feat: [**Webhook for validating Configurations**](https://github.com/deislabs/akri/blob/v0.6.5/webhooks/validating/configuration/README.md) (https://github.com/deislabs/akri/pull/206) +* feat: Support for Akri monitoring via Prometheus (https://github.com/deislabs/akri/pull/190) + +Misc +* feat: **Akri Logo** (https://github.com/deislabs/akri/pull/149) +* fix: Allow overwriting Controller's `nodeSelectors` (https://github.com/deislabs/akri/pull/194) +* fix: Updated `mockall` version (https://github.com/deislabs/akri/pull/214) +* fix: Changed default image `PullPolicy` from `Always` to Kubernetes default (`IfNotPresent`) (https://github.com/deislabs/akri/pull/207) +* fix: Improved video streaming application (for udev demo) that polls for new service creation (https://github.com/deislabs/akri/pull/173) +* fix: Patched anomaly detection application (for OPC UA demo) to show values from all brokers (https://github.com/deislabs/akri/pull/229) +* feat: Timestamped labels for local container builds (https://github.com/deislabs/akri/pull/234) +* fix: Removed udev directory mount from Agent DaemonSet (https://github.com/deislabs/akri/pull/304) +* fix: Modified Debug Echo Discovery Handler to specify `Device.properties` and added check to e2e tests (https://github.com/deislabs/akri/pull/288) +* feat: Support for specifying environment variables broker Pods via a Configuration's `brokerProperties`. +* fix: Default memory and CPU resource requests and limits for Akri containers (https://github.com/deislabs/akri/pull/305) + +View the [full change log](https://github.com/deislabs/akri/compare/v0.1.5...v0.6.5) + +## Breaking Changes +Akri's Configuration and Instance CRDs were modified. The old version of the CRDs should be deleted with `kubectl delete instances.akri.sh configurations.akri.sh`, and the new ones will be applied with a new Akri Helm installation. +* Akri's Configuration CRD's `protocol` field was replaced with `discoveryHandler` in order to fit Akri's new Discovery Handler extensibility model and make the Configuration no longer strongly tied to Discovery Handlers. It's unused `units` field was removed and `properties` was renamed `brokerProperties` to be more descriptive. +* Akri's Instance CRD's unused `rbac` field was removed and `metadate` was renamed `brokerProperties` to be more descriptive and aligned with the Configuration CRD. + +Significant changes were made to Akri's Helm chart. Consult the latest user guide and Configurations documentation. + +By default, the Agent contains no Discovery Handlers. To deploy Discovery Handlers, they must be explicitly enabled in Akri's Helm chart. + +## Known Issues +N/A + +## Validated With + +| Distribution | Version | +|---|---| +| Kubernetes | v1.21.0 | +| Kubernetes | v1.20.1 | +| Kubernetes | v1.19.4 | +| Kubernetes | v1.18.12 | +| Kubernetes | v1.17.14 | +| Kubernetes | v1.16.15 | +| K3s | v1.20.6+k3s1 | +| K3s | v1.19.10+k3s1 | +| K3s | v1.18.9+k3s1 | +| K3s | v1.17.17+k3s1 | +| K3s | v1.16.14+k3s1 | +| MicroK8s | 1.21/stable | +| MicroK8s | 1.20/stable | +| MicroK8s | 1.19/stable | +| MicroK8s | 1.18/stable | +| MicroK8s | 1.17/stable | +| MicroK8s | 1.16/stable | + +## What's next? +Check out our [roadmap](https://github.com/deislabs/akri/blob/v0.6.5/docs/roadmap.md) to see the features we are looking forward to! + +## Release history +See [CHANGELOG.md](https://github.com/deislabs/akri/blob/v0.6.5/CHANGELOG.md) for more information on what changed in this and previous releases. +# v0.1.5 + +## Announcing Akri v0.1.5! +Akri v0.1.5 is a pre-release of Akri. + +To find out more about Akri, check out our [README](https://github.com/deislabs/akri/blob/v0.1.5/README.md) and start [contributing](https://github.com/deislabs/akri/blob/v0.1.5/docs/contributing.md) today! + +## New Features +The v0.1.5 release introduces support for OPC UA discovery along with: + +* End to end demo for discovering and utilizing OPC UA servers +* Sample anomaly detection application for OPC UA demo +* Sample OPC UA broker +* OPC UA certificate generator + +View the [full change log](https://github.com/deislabs/akri/compare/v0.0.44...v0.1.5) + +## Breaking Changes +N/A + +## Known Issues +N/A + +## Validated With + +| Distribution | Version | +|---|---| +| Kubernetes | v1.20.1 | +| Kubernetes | v1.19.4 | +| Kubernetes | v1.18.12 | +| Kubernetes | v1.17.14 | +| Kubernetes | v1.16.15 | +| K3s | v1.20.0+k3s2 | +| K3s | v1.19.4+k3s1 | +| K3s | v1.18.9+k3s1 | +| MicroK8s | 1.20/stable | +| MicroK8s | 1.19/stable | +| MicroK8s | 1.18/stable | + +## What's next? +Check out our [roadmap](https://github.com/deislabs/akri/blob/v0.1.5/docs/roadmap.md) to see the features we are looking forward to! + +## Release history +See [CHANGELOG.md](https://github.com/deislabs/akri/blob/v0.1.5/CHANGELOG.md) for more information on what changed in this and previous releases. + # v0.0.44 ## Announcing Akri v0.0.44! diff --git a/Cargo.lock b/Cargo.lock index 658cc977e..c5453fd02 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,127 +1,566 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. [[package]] -name = "addr2line" -version = "0.13.0" +name = "actix" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1be241f88f3b1e7e9a3fbe3b5a8a0f6915b5a1d7ee0d9a248d3376d01068cc60" +dependencies = [ + "actix-rt", + "actix_derive", + "bitflags", + "bytes 0.5.6", + "crossbeam-channel", + "derive_more", + "futures-channel", + "futures-util", + "log", + "once_cell", + "parking_lot 0.11.1", + "pin-project 0.4.28", + "smallvec 1.6.1", + "tokio 0.2.25", + "tokio-util 0.3.1", + "trust-dns-proto", + "trust-dns-resolver", +] + +[[package]] +name = "actix-codec" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78d1833b3838dbe990df0f1f87baf640cf6146e898166afe401839d1b001e570" +dependencies = [ + "bitflags", + "bytes 0.5.6", + "futures-core", + "futures-sink", + "log", + "pin-project 0.4.28", + "tokio 0.2.25", + "tokio-util 0.3.1", +] + +[[package]] +name = "actix-connect" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "177837a10863f15ba8d3ae3ec12fac1099099529ed20083a27fdfe247381d0dc" +dependencies = [ + "actix-codec", + "actix-rt", + "actix-service", + "actix-utils", + "derive_more", + "either", + "futures-util", + "http 0.2.3", + "log", + "openssl", + "tokio-openssl", + "trust-dns-proto", + "trust-dns-resolver", +] + +[[package]] +name = "actix-http" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "452299e87817ae5673910e53c243484ca38be3828db819b6011736fc6982e874" +dependencies = [ + "actix-codec", + "actix-connect", + "actix-rt", + "actix-service", + "actix-threadpool", + "actix-tls", + "actix-utils", + "base64 0.13.0", + "bitflags", + "brotli2", + "bytes 0.5.6", + "cookie", + "copyless", + "derive_more", + "either", + "encoding_rs", + "flate2", + "futures-channel", + "futures-core", + "futures-util", + "fxhash", + "h2 0.2.6", + "http 0.2.3", + "httparse", + "indexmap", + "itoa", + "language-tags", + "lazy_static", + "log", + "mime", + "percent-encoding 2.1.0", + "pin-project 1.0.6", + "rand 0.7.3", + "regex", + "serde", + "serde_json", + "serde_urlencoded 0.7.0", + "sha-1 0.9.4", + "slab", + "time 0.2.26", +] + +[[package]] +name = "actix-macros" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4ca8ce00b267af8ccebbd647de0d61e0674b6e61185cc7a592ff88772bed655" +dependencies = [ + "quote", + "syn", +] + +[[package]] +name = "actix-router" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ad299af73649e1fc893e333ccf86f377751eb95ff875d095131574c6f43452c" +dependencies = [ + "bytestring", + "http 0.2.3", + "log", + "regex", + "serde", +] + +[[package]] +name = "actix-rt" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "143fcc2912e0d1de2bcf4e2f720d2a60c28652ab4179685a1ee159e0fb3db227" +dependencies = [ + "actix-macros", + "actix-threadpool", + "copyless", + "futures-channel", + "futures-util", + "smallvec 1.6.1", + "tokio 0.2.25", +] + +[[package]] +name = "actix-server" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45407e6e672ca24784baa667c5d32ef109ccdd8d5e0b5ebb9ef8a67f4dfb708e" +dependencies = [ + "actix-codec", + "actix-rt", + "actix-service", + "actix-utils", + "futures-channel", + "futures-util", + "log", + "mio 0.6.23", + "mio-uds", + "num_cpus", + "slab", + "socket2 0.3.19", +] + +[[package]] +name = "actix-service" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0052435d581b5be835d11f4eb3bce417c8af18d87ddf8ace99f8e67e595882bb" +dependencies = [ + "futures-util", + "pin-project 0.4.28", +] + +[[package]] +name = "actix-testing" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47239ca38799ab74ee6a8a94d1ce857014b2ac36f242f70f3f75a66f691e791c" +dependencies = [ + "actix-macros", + "actix-rt", + "actix-server", + "actix-service", + "log", + "socket2 0.3.19", +] + +[[package]] +name = "actix-threadpool" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d209f04d002854b9afd3743032a27b066158817965bf5d036824d19ac2cc0e30" +dependencies = [ + "derive_more", + "futures-channel", + "lazy_static", + "log", + "num_cpus", + "parking_lot 0.11.1", + "threadpool", +] + +[[package]] +name = "actix-tls" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24789b7d7361cf5503a504ebe1c10806896f61e96eca9a7350e23001aca715fb" +dependencies = [ + "actix-codec", + "actix-service", + "actix-utils", + "futures-util", + "openssl", + "tokio-openssl", +] + +[[package]] +name = "actix-utils" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e9022dec56632d1d7979e59af14f0597a28a830a9c1c7fec8b2327eb9f16b5a" +dependencies = [ + "actix-codec", + "actix-rt", + "actix-service", + "bitflags", + "bytes 0.5.6", + "either", + "futures-channel", + "futures-sink", + "futures-util", + "log", + "pin-project 0.4.28", + "slab", +] + +[[package]] +name = "actix-web" +version = "3.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e641d4a172e7faa0862241a20ff4f1f5ab0ab7c279f00c2d4587b77483477b86" +dependencies = [ + "actix-codec", + "actix-http", + "actix-macros", + "actix-router", + "actix-rt", + "actix-server", + "actix-service", + "actix-testing", + "actix-threadpool", + "actix-tls", + "actix-utils", + "actix-web-codegen", + "awc", + "bytes 0.5.6", + "derive_more", + "encoding_rs", + "futures-channel", + "futures-core", + "futures-util", + "fxhash", + "log", + "mime", + "openssl", + "pin-project 1.0.6", + "regex", + "serde", + "serde_json", + "serde_urlencoded 0.7.0", + "socket2 0.3.19", + "time 0.2.26", + "tinyvec", + "url 2.2.1", +] + +[[package]] +name = "actix-web-codegen" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad26f77093333e0e7c6ffe54ebe3582d908a104e448723eec6d43d08b07143fb" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "actix_derive" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b6a2d3371669ab3ca9797670853d61402b03d0b4b9ebf33d677dfa720203072" +checksum = "b95aceadaf327f18f0df5962fedc1bde2f870566a0b9f65c89508a3b1f79334c" dependencies = [ - "gimli", + "proc-macro2", + "quote", + "syn", ] [[package]] name = "adler" -version = "0.2.3" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2a4ec343196209d6594e19543ae87a39f96d5534d7174822a3ad825dd6ed7e" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "agent" -version = "0.1.5" -dependencies = [ - "akri-shared", +version = "0.6.5" +dependencies = [ + "akri-debug-echo", + "akri-discovery-utils 0.6.5", + "akri-onvif", + "akri-opcua", + "akri-shared 0.6.5", + "akri-udev", + "anyhow", "async-stream", "async-trait", "blake2", - "cfg-if", + "cfg-if 1.0.0", "chrono", - "env_logger", - "failure", - "futures 0.1.29", - "futures 0.3.5", + "env_logger 0.8.3", + "futures 0.3.14", "futures-core", "futures-util", - "hyper 0.11.27", + "h2 0.2.6", + "hyper 0.13.10", "k8s-openapi", "kube", - "log 0.4.8", - "mime", + "lazy_static", + "log", + "mock_instant", "mockall", - "opcua-client", - "pest", - "pest_derive", + "mockall_double", + "prometheus 0.12.0", "prost", - "rand 0.7.3", - "regex 1.3.9", + "rand 0.8.3", "serde", "serde_derive", "serde_json", "serde_yaml", - "sxd-document", - "sxd-xpath", "tempfile", - "tokio 0.2.21", + "tokio 0.2.25", "tokio-core", "tonic", "tonic-build", "tower", - "udev", - "url 2.1.1", - "uuid 0.8.1", - "xml-rs", - "yaserde", - "yaserde_derive", + "url 2.2.1", + "uuid", ] [[package]] name = "aho-corasick" -version = "0.6.10" +version = "0.7.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81ce3d38065e618af2d7b77e10c5ad9a069859b4be3c2250f674af3840d9c8a5" +checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5" dependencies = [ "memchr", ] [[package]] -name = "aho-corasick" -version = "0.7.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043164d8ba5c4c3035fec9bbee8647c0261d788f3474306f93bb65901cae0e86" +name = "akri-debug-echo" +version = "0.6.5" dependencies = [ - "memchr", + "akri-discovery-utils 0.6.5", + "akri-shared 0.6.5", + "anyhow", + "async-trait", + "env_logger 0.8.3", + "futures-util", + "log", + "serde", + "serde_derive", + "serde_json", + "serde_yaml", + "tokio 0.2.25", + "tonic", ] [[package]] -name = "akri-shared" -version = "0.1.5" +name = "akri-discovery-utils" +version = "0.6.3" +source = "git+https://github.com/deislabs/akri?branch=main#26794100674e3e2d3899ed8570a284181c6eaa00" +dependencies = [ + "akri-shared 0.6.3", + "anyhow", + "futures 0.3.14", + "log", + "prost", + "serde", + "serde_derive", + "serde_yaml", + "tokio 0.2.25", + "tonic", + "tonic-build", + "tower", +] + +[[package]] +name = "akri-discovery-utils" +version = "0.6.5" dependencies = [ + "akri-shared 0.6.5", + "anyhow", + "async-trait", + "futures 0.3.14", + "log", + "prost", + "serde", + "serde_derive", + "serde_yaml", + "tempfile", + "tokio 0.2.25", + "tonic", + "tonic-build", + "tower", +] + +[[package]] +name = "akri-onvif" +version = "0.6.5" +dependencies = [ + "akri-discovery-utils 0.6.5", + "akri-shared 0.6.5", + "anyhow", "async-trait", "bytes 0.5.6", + "env_logger 0.8.3", + "futures-util", + "hyper 0.13.10", + "log", + "mockall", + "serde", + "serde_derive", + "serde_json", + "serde_yaml", + "sxd-document", + "sxd-xpath", + "tokio 0.2.25", + "tonic", + "uuid", + "xml-rs", + "yaserde", + "yaserde_derive", +] + +[[package]] +name = "akri-opcua" +version = "0.6.5" +dependencies = [ + "akri-discovery-utils 0.6.5", + "akri-shared 0.6.5", + "anyhow", + "async-trait", + "env_logger 0.8.3", + "futures-util", + "log", + "mockall", + "opcua-client", + "prost", + "serde", + "serde_derive", + "serde_json", + "serde_yaml", + "tokio 0.2.25", + "tonic", + "url 2.2.1", +] + +[[package]] +name = "akri-shared" +version = "0.6.3" +source = "git+https://github.com/deislabs/akri?branch=main#26794100674e3e2d3899ed8570a284181c6eaa00" +dependencies = [ + "anyhow", + "async-trait", "either", - "env_logger", - "failure", - "futures 0.1.29", - "futures 0.3.5", + "env_logger 0.6.2", + "futures 0.1.31", + "futures 0.3.14", "futures-util", - "hyper 0.11.27", - "hyper 0.13.7", "k8s-openapi", "kube", - "log 0.4.8", + "log", "mockall", + "prometheus 0.11.0", "rand 0.7.3", "serde", "serde_derive", "serde_json", "serde_yaml", - "sxd-document", - "sxd-xpath", - "tokio 0.2.21", + "tokio 0.2.25", "tokio-core", "tokio-signal", + "tonic", + "tower", + "warp", ] [[package]] -name = "anyhow" -version = "1.0.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85bb70cc08ec97ca5450e6eba421deeea5f172c0fc61f78b5357b2a8e8be195f" +name = "akri-shared" +version = "0.6.5" +dependencies = [ + "anyhow", + "async-trait", + "either", + "env_logger 0.8.3", + "futures 0.1.31", + "futures 0.3.14", + "futures-util", + "k8s-openapi", + "kube", + "log", + "mockall", + "prometheus 0.11.0", + "rand 0.8.3", + "serde", + "serde_derive", + "serde_json", + "serde_yaml", + "tokio 0.2.25", + "tokio-core", + "tokio-signal", + "tonic", + "tower", + "warp", +] [[package]] -name = "arc-swap" -version = "0.4.7" +name = "akri-udev" +version = "0.6.5" +dependencies = [ + "akri-discovery-utils 0.6.5", + "anyhow", + "async-trait", + "env_logger 0.8.3", + "futures-util", + "log", + "mockall", + "pest", + "pest_derive", + "prost", + "regex", + "serde", + "serde_derive", + "serde_json", + "serde_yaml", + "tokio 0.2.25", + "tonic", + "udev", +] + +[[package]] +name = "anyhow" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d25d88fd6b8041580a654f9d0c581a047baee2b3efee13275f2fc392fc75034" +checksum = "28b2cd92db5cbd74e8e5028f7e27dd7aa3090e89e4f2a197cc7c8dfb69c7063b" [[package]] name = "arrayref" @@ -131,43 +570,126 @@ checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" [[package]] name = "arrayvec" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" +checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" + +[[package]] +name = "async-channel" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2114d64672151c0c5eaa5e131ec84a74f06e1e559830dabba01ca30605d66319" +dependencies = [ + "concurrent-queue", + "event-listener", + "futures-core", +] [[package]] name = "async-compression" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9021768bcce77296b64648cc7a7460e3df99979b97ed5c925c38d1cc83778d98" +checksum = "b72c1f1154e234325b50864a349b9c8e56939e266a4c307c0f159812df2f9537" dependencies = [ "bytes 0.5.6", "flate2", "futures-core", "memchr", - "pin-project-lite", + "pin-project-lite 0.2.6", ] [[package]] -name = "async-std" -version = "1.6.2" +name = "async-executor" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00d68a33ebc8b57800847d00787307f84a562224a14db069b0acefe4c2abbf5d" +checksum = "eb877970c7b440ead138f6321a3b5395d6061183af779340b65e20c0fede9146" dependencies = [ "async-task", - "crossbeam-utils", + "concurrent-queue", + "fastrand", + "futures-lite", + "once_cell", + "vec-arena", +] + +[[package]] +name = "async-global-executor" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9586ec52317f36de58453159d48351bc244bc24ced3effc1fce22f3d48664af6" +dependencies = [ + "async-channel", + "async-executor", + "async-io", + "async-mutex", + "blocking", + "futures-lite", + "num_cpus", + "once_cell", +] + +[[package]] +name = "async-io" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9315f8f07556761c3e48fec2e6b276004acf426e6dc068b2c2251854d65ee0fd" +dependencies = [ + "concurrent-queue", + "fastrand", + "futures-lite", + "libc", + "log", + "nb-connect", + "once_cell", + "parking", + "polling", + "vec-arena", + "waker-fn", + "winapi 0.3.9", +] + +[[package]] +name = "async-lock" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1996609732bde4a9988bc42125f55f2af5f3c36370e27c778d5191a4a1b63bfb" +dependencies = [ + "event-listener", +] + +[[package]] +name = "async-mutex" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479db852db25d9dbf6204e6cb6253698f175c15726470f78af0d918e99d6156e" +dependencies = [ + "event-listener", +] + +[[package]] +name = "async-std" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9f06685bad74e0570f5213741bea82158279a4103d988e57bfada11ad230341" +dependencies = [ + "async-channel", + "async-global-executor", + "async-io", + "async-lock", + "crossbeam-utils 0.8.3", "futures-channel", "futures-core", "futures-io", + "futures-lite", + "gloo-timers", "kv-log-macro", - "log 0.4.8", + "log", "memchr", "num_cpus", "once_cell", - "pin-project-lite", + "pin-project-lite 0.2.6", "pin-utils", - "slab 0.4.2", - "smol", + "slab", "wasm-bindgen-futures", ] @@ -194,21 +716,27 @@ dependencies = [ [[package]] name = "async-task" -version = "3.0.0" +version = "4.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17772156ef2829aadc587461c7753af20b7e8db1529bc66855add962a3b35d3" +checksum = "e91831deabf0d6d7ec49552e489aed63b7456a7a3c46cff62adad428110b0af0" [[package]] name = "async-trait" -version = "0.1.36" +version = "0.1.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a265e3abeffdce30b2e26b7a11b222fe37c6067404001b434101457d0385eb92" +checksum = "36ea56748e10732c49404c153638a15ec3d6211ec5ff35d9bb20e13b93576adf" dependencies = [ "proc-macro2", "quote", "syn", ] +[[package]] +name = "atomic-waker" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "065374052e7df7ee4047b1160cca5e1467a12351a40b3da123c870ba0b8eda2a" + [[package]] name = "atty" version = "0.2.14" @@ -222,43 +750,40 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" - -[[package]] -name = "backtrace" -version = "0.3.50" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46254cf2fdcdf1badb5934448c1bcbe046a56537b3987d96c51a7afc5d03f293" -dependencies = [ - "addr2line", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", -] +checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] -name = "base64" -version = "0.6.0" +name = "awc" +version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96434f987501f0ed4eb336a411e0631ecd1afa11574fe148587adc4ff96143c9" +checksum = "b381e490e7b0cfc37ebc54079b0413d8093ef43d14a4e4747083f7fa47a9e691" dependencies = [ - "byteorder", - "safemem 0.2.0", + "actix-codec", + "actix-http", + "actix-rt", + "actix-service", + "base64 0.13.0", + "bytes 0.5.6", + "cfg-if 1.0.0", + "derive_more", + "futures-core", + "log", + "mime", + "openssl", + "percent-encoding 2.1.0", + "rand 0.7.3", + "serde", + "serde_json", + "serde_urlencoded 0.7.0", ] [[package]] -name = "base64" -version = "0.9.3" +name = "base-x" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "489d6c0ed21b11d038c31b6ceccca973e65d73ba3bd8ecb9a2babf5546164643" -dependencies = [ - "byteorder", - "safemem 0.3.3", -] +checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" [[package]] name = "base64" @@ -281,6 +806,12 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" +[[package]] +name = "base64" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" + [[package]] name = "bitflags" version = "1.2.1" @@ -289,21 +820,20 @@ checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" [[package]] name = "blake2" -version = "0.8.1" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94cb07b0da6a73955f8fb85d24c466778e70cda767a568229b104f0264089330" +checksum = "10a5720225ef5daecf08657f23791354e1685a8c91a4c60c7f3d3b2892f978f4" dependencies = [ - "byte-tools", "crypto-mac", - "digest", - "opaque-debug", + "digest 0.9.0", + "opaque-debug 0.3.0", ] [[package]] name = "blake2b_simd" -version = "0.5.10" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8fb2d74254a3a0b5cac33ac9f8ed0e44aa50378d9dbb2e5d83bd21ed1dc2c8a" +checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587" dependencies = [ "arrayref", "arrayvec", @@ -319,7 +849,16 @@ dependencies = [ "block-padding", "byte-tools", "byteorder", - "generic-array", + "generic-array 0.12.4", +] + +[[package]] +name = "block-buffer" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" +dependencies = [ + "generic-array 0.14.4", ] [[package]] @@ -333,22 +872,53 @@ dependencies = [ [[package]] name = "blocking" -version = "0.4.6" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d17efb70ce4421e351d61aafd90c16a20fb5bfe339fcdc32a86816280e62ce0" +checksum = "c5e170dbede1f740736619b776d7251cb1b9095c435c34d8ca9f57fcd2f335e9" dependencies = [ - "futures-channel", - "futures-util", + "async-channel", + "async-task", + "atomic-waker", + "fastrand", + "futures-lite", "once_cell", - "parking", - "waker-fn", +] + +[[package]] +name = "brotli-sys" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4445dea95f4c2b41cde57cc9fee236ae4dbae88d8fcbdb4750fc1bb5d86aaecd" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "brotli2" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cb036c3eade309815c15ddbacec5b22c4d1f3983a774ab2eac2e3e9ea85568e" +dependencies = [ + "brotli-sys", + "libc", +] + +[[package]] +name = "buf_redux" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b953a6887648bb07a535631f2bc00fbdb2a2216f135552cb3f534ed136b9c07f" +dependencies = [ + "memchr", + "safemem", ] [[package]] name = "bumpalo" -version = "3.4.0" +version = "3.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" +checksum = "63396b8a4b9de3f4fdfb320ab6080762242f66a8ef174c49d8e19b674db4cdbe" [[package]] name = "byte-tools" @@ -358,9 +928,9 @@ checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" [[package]] name = "byteorder" -version = "1.3.4" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" +checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" @@ -378,6 +948,21 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38" +[[package]] +name = "bytes" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b700ce4376041dcd0a327fd0097c41095743c4c8af8887265942faf1100bd040" + +[[package]] +name = "bytestring" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90706ba19e97b90786e19dc0d5e2abd80008d99d4c0c5d1ad0b5e72cec7c494d" +dependencies = [ + "bytes 1.0.1", +] + [[package]] name = "cache-padded" version = "1.1.1" @@ -386,9 +971,9 @@ checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba" [[package]] name = "cc" -version = "1.0.58" +version = "1.0.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a06fb2e53271d7c279ec1efea6ab691c35a2ae67ec0d91d7acec0caf13b518" +checksum = "e3c69b077ad434294d3ce9f1f6143a2a4b89a8a2d54ef813d85003a4fd1137fd" [[package]] name = "cfg-if" @@ -396,16 +981,56 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + [[package]] name = "chrono" -version = "0.4.13" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c74d84029116787153e02106bf53e66828452a4b325cc8652b788b5967c0a0b6" +checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" dependencies = [ + "libc", "num-integer", "num-traits", "serde", - "time", + "time 0.1.44", + "winapi 0.3.9", +] + +[[package]] +name = "clap" +version = "3.0.0-beta.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bd1061998a501ee7d4b6d449020df3266ca3124b941ec56cf2005c3779ca142" +dependencies = [ + "atty", + "bitflags", + "clap_derive", + "indexmap", + "lazy_static", + "os_str_bytes", + "strsim", + "termcolor", + "textwrap", + "unicode-width", + "vec_map", +] + +[[package]] +name = "clap_derive" +version = "3.0.0-beta.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "370f715b81112975b1b69db93e0b56ea4cd4e5002ac43b2da8474106a54096a1" +dependencies = [ + "heck", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -419,13 +1044,19 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "1.1.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f83c06aff61f2d899eb87c379df3cbf7876f14471dcab474e0b6dc90ab96c080" +checksum = "30ed07550be01594c6026cff2a1d7fe9c8f683caa798e12b68694ac9e88286a3" dependencies = [ "cache-padded", ] +[[package]] +name = "const_fn" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "076a6803b0dacd6a88cfe64deba628b01533ff5ef265687e6938280c1afd0a28" + [[package]] name = "constant_time_eq" version = "0.1.5" @@ -434,31 +1065,56 @@ checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" [[package]] name = "controller" -version = "0.1.5" +version = "0.6.5" dependencies = [ - "akri-shared", + "akri-shared 0.6.5", + "anyhow", "async-std", "async-trait", "chrono", - "env_logger", - "failure", - "futures 0.3.5", + "env_logger 0.6.2", + "futures 0.3.14", "k8s-openapi", "kube", - "log 0.4.8", + "lazy_static", + "log", "mockall", + "prometheus 0.12.0", "serde", "serde_derive", "serde_json", "serde_yaml", - "tokio 0.2.21", + "tokio 0.2.25", ] +[[package]] +name = "convert_case" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" + +[[package]] +name = "cookie" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03a5d7b21829bc7b4bf4754a978a241ae54ea55a40f92bb20216e54096f4b951" +dependencies = [ + "percent-encoding 2.1.0", + "time 0.2.26", + "version_check", +] + +[[package]] +name = "copyless" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2df960f5d869b2dd8532793fde43eb5427cceb126c929747a26823ab0eeb536" + [[package]] name = "core-foundation" -version = "0.7.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171" +checksum = "0a89e2ae426ea83155dccf10c0fa6b1463ef6d5fcb44cee0b224a408fa640a62" dependencies = [ "core-foundation-sys", "libc", @@ -466,17 +1122,33 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.7.0" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" + +[[package]] +name = "cpuid-bool" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" +checksum = "8aebca1129a03dc6dc2b127edd729435bbc4a37e1d5f4d7513165089ceb02634" [[package]] name = "crc32fast" -version = "1.2.0" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" +dependencies = [ + "cfg-if 1.0.0", +] + +[[package]] +name = "crossbeam-channel" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1" +checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87" dependencies = [ - "cfg-if", + "crossbeam-utils 0.7.2", + "maybe-uninit", ] [[package]] @@ -486,7 +1158,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285" dependencies = [ "crossbeam-epoch", - "crossbeam-utils", + "crossbeam-utils 0.7.2", "maybe-uninit", ] @@ -497,8 +1169,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" dependencies = [ "autocfg", - "cfg-if", - "crossbeam-utils", + "cfg-if 0.1.10", + "crossbeam-utils 0.7.2", "lazy_static", "maybe-uninit", "memoffset", @@ -511,8 +1183,8 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" dependencies = [ - "cfg-if", - "crossbeam-utils", + "cfg-if 0.1.10", + "crossbeam-utils 0.7.2", "maybe-uninit", ] @@ -523,20 +1195,64 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" dependencies = [ "autocfg", - "cfg-if", + "cfg-if 0.1.10", + "lazy_static", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7e9d99fa91428effe99c5c6d4634cdeba32b8cf784fc428a2a687f61a952c49" +dependencies = [ + "autocfg", + "cfg-if 1.0.0", "lazy_static", ] [[package]] name = "crypto-mac" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" +checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ - "generic-array", + "generic-array 0.14.4", "subtle", ] +[[package]] +name = "ctor" +version = "0.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e98e2ad1a782e33928b96fc3948e7c355e5af34ba4de7670fe8bac2a3b2006d" +dependencies = [ + "quote", + "syn", +] + +[[package]] +name = "debug-echo-discovery-handler" +version = "0.6.5" +dependencies = [ + "akri-debug-echo", + "akri-discovery-utils 0.6.5", + "env_logger 0.8.3", + "log", + "tokio 0.2.25", +] + +[[package]] +name = "derive_more" +version = "0.99.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f82b1b72f1263f214c0f823371768776c4f5841b942c9883aa8e5ec584fd0ba6" +dependencies = [ + "convert_case", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "difference" version = "2.0.0" @@ -549,7 +1265,16 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" dependencies = [ - "generic-array", + "generic-array 0.12.4", +] + +[[package]] +name = "digest" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +dependencies = [ + "generic-array 0.14.4", ] [[package]] @@ -558,7 +1283,7 @@ version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13aea89a5c93364a98e9b37b2fa237effbb694d5cfe01c5b70941f7eb087d5e3" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "dirs-sys", ] @@ -573,6 +1298,12 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "discard" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" + [[package]] name = "downcast" version = "0.10.0" @@ -581,23 +1312,35 @@ checksum = "4bb454f0228b18c7f4c3b0ebbee346ed9c52e7443b0999cd543ff3571205701d" [[package]] name = "dtoa" -version = "0.4.6" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "134951f4028bdadb9b84baf4232681efbf277da25144b9b0ad65df75946c422b" +checksum = "56899898ce76aaf4a0f24d914c97ea6ed976d42fec6ad33fcbb0a1103e07b2b0" [[package]] name = "either" -version = "1.5.3" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3" +checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "encoding_rs" -version = "0.8.23" +version = "0.8.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" +dependencies = [ + "cfg-if 1.0.0", +] + +[[package]] +name = "enum-as-inner" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8ac63f94732332f44fe654443c46f6375d1939684c17b0afb6cb56b0456e171" +checksum = "7c5f0096a91d210159eceb2ff5e1c4da18388a170e1e3ce948aac9c8fdbbf595" dependencies = [ - "cfg-if", + "heck", + "proc-macro2", + "quote", + "syn", ] [[package]] @@ -607,33 +1350,30 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aafcde04e90a5226a6443b7aabdb016ba2f8307c847d524724bd9b346dd1a2d3" dependencies = [ "atty", - "humantime", - "log 0.4.8", - "regex 1.3.9", + "humantime 1.3.0", + "log", + "regex", "termcolor", ] [[package]] -name = "failure" -version = "0.1.8" +name = "env_logger" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d32e9bd16cc02eae7db7ef620b392808b89f6a5e16bb3497d159c6b92a0f4f86" +checksum = "17392a012ea30ef05a610aa97dfb49496e71c9f676b27879922ea5bdf60d9d3f" dependencies = [ - "backtrace", - "failure_derive", + "atty", + "humantime 2.1.0", + "log", + "regex", + "termcolor", ] [[package]] -name = "failure_derive" -version = "0.1.8" +name = "event-listener" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa4da3c766cd7a0db8242e326e9e4e081edd567072893ed320008189715366a4" -dependencies = [ - "proc-macro2", - "quote", - "syn", - "synstructure", -] +checksum = "f7531096570974c3a9dcf9e4b8e1cede1ec26cf5046219fb3b9d897503b9be59" [[package]] name = "fake-simd" @@ -643,9 +1383,12 @@ checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" [[package]] name = "fastrand" -version = "1.3.3" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36a9cb09840f81cd211e435d00a4e487edd263dc3c8ff815c32dd76ad668ebed" +checksum = "ca5faf057445ce5c9d4329e382b2ce7ca38550ef3b73a5348362d5f24e0c7fe3" +dependencies = [ + "instant", +] [[package]] name = "fixedbitset" @@ -655,11 +1398,11 @@ checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" [[package]] name = "flate2" -version = "1.0.16" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68c90b0fc46cf89d227cc78b40e494ff81287a92dd07631e5af0d06fe3cf885e" +checksum = "cd3aec53de10fe96d7d8c565eb17f2c687bb5518a2ec453b5b1252964526abe0" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "crc32fast", "libc", "miniz_oxide", @@ -667,9 +1410,9 @@ dependencies = [ [[package]] name = "float-cmp" -version = "0.6.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da62c4f1b81918835a8c6a484a397775fff5953fe83529afd51b05f5c6a6617d" +checksum = "e1267f4ac4f343772758f7b1bdcbe767c218bbab93bb432acbf5162bbf85a6c4" dependencies = [ "num-traits", ] @@ -696,16 +1439,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] -name = "fragile" -version = "0.3.0" +name = "form_urlencoded" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f8140122fa0d5dcb9fc8627cfce2b37cc1500f752636d46ea28bc26785c2f9" +checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +dependencies = [ + "matches", + "percent-encoding 2.1.0", +] [[package]] -name = "fuchsia-cprng" -version = "0.1.1" +name = "fragile" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" +checksum = "69a039c3498dc930fe810151a34ba0c1c70b02b8625035592e74432f678591f2" [[package]] name = "fuchsia-zircon" @@ -725,15 +1472,15 @@ checksum = "3dcaa9ae7725d12cdb85b3ad99a434db70b468c09ded17e012d86b5c1010f7a7" [[package]] name = "futures" -version = "0.1.29" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b980f2816d6ee8673b6517b52cb0e808a180efc92e5c19d02cdda79066703ef" +checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.5" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e05b85ec287aac0dc34db7d4a569323df697f9c55b99b15d6b4ef8cde49f613" +checksum = "a9d5813545e459ad3ca1bff9915e9ad7f1a47dc6a91b627ce321d5863b7dd253" dependencies = [ "futures-channel", "futures-core", @@ -746,9 +1493,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.5" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f366ad74c28cca6ba456d95e6422883cfb4b252a83bed929c83abfdbbf2967d5" +checksum = "ce79c6a52a299137a6013061e0cf0e688fce5d7f1bc60125f520912fdb29ec25" dependencies = [ "futures-core", "futures-sink", @@ -756,25 +1503,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59f5fff90fd5d971f936ad674802482ba441b6f09ba5e15fd8b39145582ca399" - -[[package]] -name = "futures-cpupool" -version = "0.1.8" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4" -dependencies = [ - "futures 0.1.29", - "num_cpus", -] +checksum = "098cd1c6dda6ca01650f1a37a794245eb73181d0d4d4e955e2f3c37db7af1815" [[package]] name = "futures-executor" -version = "0.3.5" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10d6bb888be1153d3abeb9006b11b02cf5e9b209fda28693c31ae1e4e012e314" +checksum = "10f6cb7042eda00f0049b1d2080aa4b93442997ee507eb3828e8bd7577f94c9d" dependencies = [ "futures-core", "futures-task", @@ -783,15 +1520,30 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.5" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "365a1a1fb30ea1c03a830fdb2158f5236833ac81fa0ad12fe35b29cddc35cb04" + +[[package]] +name = "futures-lite" +version = "1.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de27142b013a8e869c14957e6d2edeef89e97c289e69d042ee3a49acd8b51789" +checksum = "b4481d0cd0de1d204a4fa55e7d45f07b1d958abcb06714b3446438e2eff695fb" +dependencies = [ + "fastrand", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite 0.2.6", + "waker-fn", +] [[package]] name = "futures-macro" -version = "0.3.5" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0b5a30a4328ab5473878237c447333c093297bded83a4983d10f4deea240d39" +checksum = "668c6733a182cd7deb4f1de7ba3bf2120823835b3bcfbeacf7d2c4a773c1bb8b" dependencies = [ "proc-macro-hack", "proc-macro2", @@ -801,18 +1553,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.5" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f2032893cb734c7a05d85ce0cc8b8c4075278e93b24b66f9de99d6eb0fa8acc" +checksum = "5c5629433c555de3d82861a7a4e3794a4c40040390907cfbfd7143a92a426c23" [[package]] name = "futures-task" -version = "0.3.5" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdb66b5f09e22019b1ab0830f7785bcea8e7a42148683f99214f73f8ec21a626" -dependencies = [ - "once_cell", -] +checksum = "ba7aa51095076f3ba6d9a1f702f74bd05ec65f555d70d2033d55ba8d69f581bc" [[package]] name = "futures-timer" @@ -822,9 +1571,9 @@ checksum = "a1de7508b218029b0f01662ed8f61b1c964b3ae99d6f25462d0f55a595109df6" [[package]] name = "futures-util" -version = "0.3.5" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8764574ff08b701a084482c3c7031349104b07ac897393010494beaa18ce32c6" +checksum = "3c144ad54d60f23927f0a6b6d816e4271278b64f005ad65e4e35291d2de9c025" dependencies = [ "futures-channel", "futures-core", @@ -833,73 +1582,187 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project", + "pin-project-lite 0.2.6", "pin-utils", "proc-macro-hack", "proc-macro-nested", - "slab 0.4.2", + "slab", +] + +[[package]] +name = "fxhash" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" +dependencies = [ + "byteorder", ] [[package]] name = "generic-array" -version = "0.12.3" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd" +dependencies = [ + "typenum", +] + +[[package]] +name = "generic-array" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" +checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" dependencies = [ "typenum", + "version_check", +] + +[[package]] +name = "gethostname" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e692e296bfac1d2533ef168d0b60ff5897b8b70a4009276834014dd8924cc028" +dependencies = [ + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "getrandom" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] name = "getrandom" -version = "0.1.14" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" +checksum = "c9495705279e7140bf035dde1f6e750c162df8b625267cd52cc44e0b156732c8" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "libc", - "wasi", + "wasi 0.10.0+wasi-snapshot-preview1", ] [[package]] -name = "gimli" -version = "0.22.0" +name = "gloo-timers" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaf91faf136cb47367fa430cd46e37a788775e7fa104f8b4bcb3861dc389b724" +checksum = "47204a46aaff920a1ea58b11d03dec6f704287d27561724a4631e450654a891f" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", + "web-sys", +] [[package]] name = "h2" version = "0.2.6" -source = "git+https://github.com/kate-goldenring/h2#7c7ef6a579c9ce2392787c5728f805ce10f74ddf" +source = "git+https://github.com/kate-goldenring/h2?branch=master#7c7ef6a579c9ce2392787c5728f805ce10f74ddf" dependencies = [ "bytes 0.5.6", "fnv", "futures-core", "futures-sink", "futures-util", - "http 0.2.1", + "http 0.2.3", "indexmap", - "slab 0.4.2", - "tokio 0.2.21", + "slab", + "tokio 0.2.25", "tokio-util 0.3.1", "tracing", ] +[[package]] +name = "h2" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc018e188373e2777d0ef2467ebff62a08e66c3f5857b23c8fbec3018210dc00" +dependencies = [ + "bytes 1.0.1", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http 0.2.3", + "indexmap", + "slab", + "tokio 1.4.0", + "tokio-util 0.6.5", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04" + +[[package]] +name = "headers" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0b7591fb62902706ae8e7aaff416b1b0fa2c0fd0878b46dc13baa3712d8a855" +dependencies = [ + "base64 0.13.0", + "bitflags", + "bytes 1.0.1", + "headers-core", + "http 0.2.3", + "mime", + "sha-1 0.9.4", + "time 0.1.44", +] + +[[package]] +name = "headers-core" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" +dependencies = [ + "http 0.2.3", +] + [[package]] name = "heck" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" +checksum = "87cbf45460356b7deeb5e3415b5563308c0a9b057c85e12b06ad551f98d0a6ac" dependencies = [ "unicode-segmentation", ] [[package]] name = "hermit-abi" -version = "0.1.15" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "322f4de77956e22ed0e5032c359a0f1273f1f7f0d79bfa3b8ffbc730d7fbcc5c" +dependencies = [ + "libc", +] + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hostname" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3deed196b6e7f9e44a2ae8d94225d80302d81208b1bb673fd21fe634645c85a9" +checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" dependencies = [ "libc", + "match_cfg", + "winapi 0.3.9", ] [[package]] @@ -915,11 +1778,11 @@ dependencies = [ [[package]] name = "http" -version = "0.2.1" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9" +checksum = "7245cd7449cc792608c3c8a9eaf69bd4eabbabf802713748fd739c98b82f0747" dependencies = [ - "bytes 0.5.6", + "bytes 1.0.1", "fnv", "itoa", ] @@ -931,14 +1794,51 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" dependencies = [ "bytes 0.5.6", - "http 0.2.1", + "http 0.2.3", +] + +[[package]] +name = "http-body" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dfb77c123b4e2f72a2069aeae0b4b4949cc7e966df277813fc16347e7549737" +dependencies = [ + "bytes 1.0.1", + "http 0.2.3", + "pin-project-lite 0.2.6", +] + +[[package]] +name = "http-discovery-handler" +version = "0.1.0" +dependencies = [ + "akri-discovery-utils 0.6.3", + "anyhow", + "async-trait", + "env_logger 0.6.2", + "log", + "reqwest 0.10.10", + "tokio 0.2.25", + "tonic", ] [[package]] name = "httparse" -version = "1.3.4" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a1ce40d6fc9764887c2fdc7305c3dcc429ba11ff981c1509416afd5697e4437" + +[[package]] +name = "httpdate" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "494b4d60369511e7dea41cf646832512a94e542f68bb9c49e54518e0f468eb47" + +[[package]] +name = "httpdate" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd179ae861f0c2e53da70d892f5f3029f9594be0c41dc5269cd371691b1dc2f9" +checksum = "05842d0d43232b23ccb7060ecb0f0626922c21f30012e97b767b30afd4a5d4b9" [[package]] name = "humantime" @@ -949,55 +1849,58 @@ dependencies = [ "quick-error", ] +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + [[package]] name = "hyper" -version = "0.11.27" +version = "0.13.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34a590ca09d341e94cddf8e5af0bbccde205d5fbc2fa3c09dd67c7f85cea59d7" +checksum = "8a6f157065790a3ed2f88679250419b5cdd96e714a0d65f7797fd337186e96bb" dependencies = [ - "base64 0.9.3", - "bytes 0.4.12", - "futures 0.1.29", - "futures-cpupool", + "bytes 0.5.6", + "futures-channel", + "futures-core", + "futures-util", + "h2 0.2.6", + "http 0.2.3", + "http-body 0.3.1", "httparse", - "iovec", - "language-tags", - "log 0.4.8", - "mime", - "net2", - "percent-encoding 1.0.1", - "relay", - "time", - "tokio-core", - "tokio-io", - "tokio-proto", - "tokio-service", - "unicase", - "want 0.0.4", + "httpdate 0.3.2", + "itoa", + "pin-project 1.0.6", + "socket2 0.3.19", + "tokio 0.2.25", + "tower-service", + "tracing", + "want", ] [[package]] name = "hyper" -version = "0.13.7" +version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e68a8dd9716185d9e64ea473ea6ef63529252e3e27623295a0378a19665d5eb" +checksum = "1e5f105c494081baa3bf9e200b279e27ec1623895cd504c7dbef8d0b080fcf54" dependencies = [ - "bytes 0.5.6", + "bytes 1.0.1", "futures-channel", "futures-core", "futures-util", - "h2", - "http 0.2.1", - "http-body", + "h2 0.3.2", + "http 0.2.3", + "http-body 0.4.1", "httparse", + "httpdate 1.0.0", "itoa", - "pin-project", - "socket2", - "time", - "tokio 0.2.21", + "pin-project 1.0.6", + "socket2 0.4.0", + "tokio 1.4.0", "tower-service", "tracing", - "want 0.3.0", + "want", ] [[package]] @@ -1007,9 +1910,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d979acc56dcb5b8dddba3917601745e877576475aa046df3226eabdecef78eed" dependencies = [ "bytes 0.5.6", - "hyper 0.13.7", + "hyper 0.13.10", "native-tls", - "tokio 0.2.21", + "tokio 0.2.25", "tokio-tls", ] @@ -1026,9 +1929,9 @@ dependencies = [ [[package]] name = "idna" -version = "0.2.0" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02e2673c30ee86b5b96a9cb52ad15718aa1f966f5ab9ad54a8b95d5ca33120a9" +checksum = "89829a5d69c23d348314a7ac337fe39173b61149a9864deabd260983aed48c21" dependencies = [ "matches", "unicode-bidi", @@ -1037,11 +1940,30 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.4.0" +version = "1.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c398b2b113b55809ceb9ee3e753fcbac793f1956663f3c36549c1346015c2afe" +checksum = "824845a0bf897a9042383849b02c1bc219c2383772efcd5c6f9766fa4b81aef3" dependencies = [ "autocfg", + "hashbrown", +] + +[[package]] +name = "input_buffer" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19a8a95243d5a0398cae618ec29477c6e3cb631152be5c19481f80bc71559754" +dependencies = [ + "bytes 0.5.6", +] + +[[package]] +name = "instant" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61124eeebbd69b8190558df225adf7e4caafce0d743919e5d6b19652314ec5ec" +dependencies = [ + "cfg-if 1.0.0", ] [[package]] @@ -1053,6 +1975,24 @@ dependencies = [ "libc", ] +[[package]] +name = "ipconfig" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7e2f18aece9709094573a9f24f483c4f65caa4298e2f7ae1b71cc65d853fad7" +dependencies = [ + "socket2 0.3.19", + "widestring", + "winapi 0.3.9", + "winreg 0.6.2", +] + +[[package]] +name = "ipnet" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" + [[package]] name = "itertools" version = "0.8.2" @@ -1064,15 +2004,15 @@ dependencies = [ [[package]] name = "itoa" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6" +checksum = "dd25036021b0de88a0aff6b850051563c6516d0bf53f8638938edbb9de732736" [[package]] name = "js-sys" -version = "0.3.41" +version = "0.3.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4b9172132a62451e56142bff9afc91c8e4a4500aa5b847da36815b63bfda916" +checksum = "2d99f9e3e84b8f67f846ef5b4cbbc3b1c29f6c759fcbce6f01aa0e73d932a24c" dependencies = [ "wasm-bindgen", ] @@ -1091,7 +2031,7 @@ dependencies = [ "serde", "serde-value", "serde_json", - "url 2.1.1", + "url 2.2.1", ] [[package]] @@ -1114,20 +2054,20 @@ dependencies = [ "chrono", "dirs", "either", - "futures 0.3.5", + "futures 0.3.14", "futures-timer", - "http 0.2.1", + "http 0.2.3", "k8s-openapi", - "log 0.4.8", + "log", "openssl", - "reqwest", + "reqwest 0.10.10", "serde", "serde_derive", "serde_json", "serde_yaml", "thiserror", - "time", - "url 2.1.1", + "time 0.1.44", + "url 2.2.1", ] [[package]] @@ -1136,7 +2076,7 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" dependencies = [ - "log 0.4.8", + "log", ] [[package]] @@ -1153,9 +2093,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.72" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9f8082297d534141b30c8d39e9b1773713ab50fdbe4ff30f750d063b3bfd701" +checksum = "9385f66bf6105b241aa65a61cb923ef20efc665cb9f9bb50ac2f0c4b7f378d41" [[package]] name = "libudev-sys" @@ -1169,9 +2109,9 @@ dependencies = [ [[package]] name = "linked-hash-map" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dd5a6d5999d9907cda8ed67bbd137d3af8085216c2ac62de5be860bd41f304a" +checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "lock_api" @@ -1183,21 +2123,31 @@ dependencies = [ ] [[package]] -name = "log" -version = "0.3.9" +name = "lock_api" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b" +checksum = "5a3c91c24eae6777794bb1997ad98bbb87daf92890acab859f7eaa4320333176" dependencies = [ - "log 0.4.8", + "scopeguard", ] [[package]] name = "log" -version = "0.4.8" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" +dependencies = [ + "cfg-if 1.0.0", + "value-bag", +] + +[[package]] +name = "lru-cache" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" +checksum = "31e24f1ad8321ca0e8a1e0ac13f23cb668e6f5466c2c57319f6a5cf1cc8e3b1c" dependencies = [ - "cfg-if", + "linked-hash-map", ] [[package]] @@ -1206,6 +2156,12 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" +[[package]] +name = "match_cfg" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" + [[package]] name = "matches" version = "0.1.8" @@ -1220,15 +2176,15 @@ checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" [[package]] name = "memchr" -version = "2.3.3" +version = "2.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" +checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" [[package]] name = "memoffset" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c198b026e1bbf08a937e94c6c60f9ec4a2267f5b0d2eec9c1b21b061ce2be55f" +checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" dependencies = [ "autocfg", ] @@ -1251,41 +2207,55 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.4.0" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be0f75932c1f6cfae3c04000e40114adf955636e19040f9c0a2c380702aa1c7f" +checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" dependencies = [ "adler", + "autocfg", ] [[package]] name = "mio" -version = "0.6.22" +version = "0.6.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fce347092656428bc8eaf6201042cb551b8d67855af7374542a92a0fbfcac430" +checksum = "4afd66f5b91bf2a3bc13fad0e21caedac168ca4c707504e75585648ae80e4cc4" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "fuchsia-zircon", "fuchsia-zircon-sys", "iovec", "kernel32-sys", "libc", - "log 0.4.8", - "miow 0.2.1", + "log", + "miow 0.2.2", "net2", - "slab 0.4.2", + "slab", "winapi 0.2.8", ] +[[package]] +name = "mio" +version = "0.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf80d3e903b34e0bd7282b218398aec54e082c840d9baf8339e0080a0c542956" +dependencies = [ + "libc", + "log", + "miow 0.3.7", + "ntapi", + "winapi 0.3.9", +] + [[package]] name = "mio-named-pipes" version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0840c1c50fd55e521b247f949c241c9997709f23bd7f023b9762cd561e935656" dependencies = [ - "log 0.4.8", - "mio", - "miow 0.3.5", + "log", + "mio 0.6.23", + "miow 0.3.7", "winapi 0.3.9", ] @@ -1297,14 +2267,14 @@ checksum = "afcb699eb26d4332647cc848492bbc15eafb26f08d0304550d5aa1f612e066f0" dependencies = [ "iovec", "libc", - "mio", + "mio 0.6.23", ] [[package]] name = "miow" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c1f2f3b1cf331de6896aabf6e9d55dca90356cc9960cca7eaaf408a355ae919" +checksum = "ebd808424166322d4a38da87083bfddd3ac4c131334ed55856112eb06d46944d" dependencies = [ "kernel32-sys", "net2", @@ -1314,21 +2284,29 @@ dependencies = [ [[package]] name = "miow" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07b88fb9795d4d36d62a012dfbf49a8f5cf12751f36d31a9dbe66d528e58979e" +checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" dependencies = [ - "socket2", "winapi 0.3.9", ] +[[package]] +name = "mock_instant" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "717e29a243b81f8130e31e24e04fb151b04a44b5a7d05370935f7d937e9de06d" +dependencies = [ + "once_cell", +] + [[package]] name = "mockall" -version = "0.6.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b95a7e7cfbce0e99ebbf5356a085d3b5e320a7ef300f77cd50a7148aa362e7c2" +checksum = "18d614ad23f9bb59119b8b5670a85c7ba92c5e9adf4385c81ea00c51c8be33d5" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "downcast", "fragile", "lazy_static", @@ -1339,11 +2317,23 @@ dependencies = [ [[package]] name = "mockall_derive" -version = "0.6.0" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dd4234635bca06fc96c7368d038061e0aae1b00a764dc817e900dc974e3deea" +dependencies = [ + "cfg-if 1.0.0", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "mockall_double" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5a615a1ad92048ad5d9633251edb7492b8abc057d7a679a9898476aef173935" +checksum = "3e25b78d34b2b713b1d000d629079755cfc166e6a65f9f4c1c012a94305467c5" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "proc-macro2", "quote", "syn", @@ -1351,19 +2341,37 @@ dependencies = [ [[package]] name = "multimap" -version = "0.8.1" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" + +[[package]] +name = "multipart" +version = "0.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8883adfde9756c1d30b0f519c9b8c502a94b41ac62f696453c37c7fc0a958ce" +checksum = "d050aeedc89243f5347c3e237e3e13dc76fbe4ae3742a57b94dc14f69acf76d4" +dependencies = [ + "buf_redux", + "httparse", + "log", + "mime", + "mime_guess", + "quick-error", + "rand 0.7.3", + "safemem", + "tempfile", + "twoway", +] [[package]] name = "native-tls" -version = "0.2.4" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b0d88c06fe90d5ee94048ba40409ef1d9315d86f6f38c2efdaad4fb50c58b2d" +checksum = "b8d96b2e1c8da3957d58100b09f102c6d9cfdfced01b7ec5a8974044bb09dbd4" dependencies = [ "lazy_static", "libc", - "log 0.4.8", + "log", "openssl", "openssl-probe", "openssl-sys", @@ -1373,13 +2381,23 @@ dependencies = [ "tempfile", ] +[[package]] +name = "nb-connect" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a19900e7eee95eb2b3c2e26d12a874cc80aaf750e31be6fcbe743ead369fa45d" +dependencies = [ + "libc", + "socket2 0.4.0", +] + [[package]] name = "net2" -version = "0.2.34" +version = "0.2.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ba7c918ac76704fb42afcbbb43891e72731f3dcca3bef2a19786297baf14af7" +checksum = "391630d12b68002ae1e25e8f974306474966550ad82dac6886fb8910c19568ae" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "libc", "winapi 0.3.9", ] @@ -1390,11 +2408,20 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" +[[package]] +name = "ntapi" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "num-integer" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d59457e662d541ba17869cf51cf177c0b5f0cbf476c66bdc90bf1edac4f875b" +checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" dependencies = [ "autocfg", "num-traits", @@ -1402,9 +2429,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac267bcc07f48ee5f8935ab0d24f316fb722d7a1292e2913f0cc196b29ffd611" +checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" dependencies = [ "autocfg", ] @@ -1420,16 +2447,21 @@ dependencies = [ ] [[package]] -name = "object" -version = "0.20.0" +name = "once_cell" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ab52be62400ca80aa00285d25253d7f7c437b7375c4de678f5405d3afe82ca5" +checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3" [[package]] -name = "once_cell" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b631f7e854af39a1739f401cf34a8a013dfe09eac4fa4dba91e9768bd28168d" +name = "onvif-discovery-handler" +version = "0.6.5" +dependencies = [ + "akri-discovery-utils 0.6.5", + "akri-onvif", + "env_logger 0.8.3", + "log", + "tokio 0.2.25", +] [[package]] name = "opaque-debug" @@ -1437,79 +2469,125 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" +[[package]] +name = "opaque-debug" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" + [[package]] name = "opcua-client" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb074db37fcebc35ae1280cdd87cdbda00a259f1d41da99458d1be5793d63d5d" +checksum = "0af3d9776871cb836aa6e3f57407b3c798c7c9f46ea9fae0835f8514f89585a1" dependencies = [ "chrono", - "futures 0.1.29", + "futures 0.1.31", "lazy_static", - "log 0.4.8", + "log", "opcua-core", + "opcua-crypto", "opcua-types", "serde", "serde_derive", - "time", + "time 0.1.44", "tokio 0.1.22", "tokio-codec", "tokio-io", "tokio-timer", - "url 1.7.2", ] [[package]] name = "opcua-core" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75797298070f962556f070fff8aad9746e2fa7ee5c1d0e7e6e7078f25ee42791" +checksum = "b7dd64d0071fb9e3c1409c8ad416c3df865053209a183bbf017ee193afbda7e9" dependencies = [ "bytes 0.4.12", "chrono", - "futures 0.1.29", + "futures 0.1.31", "lazy_static", - "log 0.4.8", + "log", + "opcua-crypto", "opcua-types", - "openssl", - "regex 0.2.11", + "regex", "serde", "serde_derive", "serde_yaml", "tokio 0.1.22", "tokio-io", + "url 1.7.2", +] + +[[package]] +name = "opcua-crypto" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5754e8be576bf03903a7c71b5a0a6e40a267fcc6742c83974399cdc7146362e" +dependencies = [ + "bytes 0.4.12", + "chrono", + "gethostname", + "lazy_static", + "log", + "opcua-types", + "openssl", + "serde", + "serde_derive", +] + +[[package]] +name = "opcua-discovery-handler" +version = "0.6.5" +dependencies = [ + "akri-discovery-utils 0.6.5", + "akri-opcua", + "env_logger 0.8.3", + "log", + "tokio 0.2.25", ] [[package]] name = "opcua-types" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7bf90304fb0ec73e8deea05aedaa2021f173911a2c8407386b7f55eeb1d6e24" +checksum = "247dd311285d4f846219b3809c048719d3a1fd6c7b01ba16b0a6651290913d61" dependencies = [ - "base64 0.6.0", + "base64 0.12.3", "bitflags", "byteorder", "chrono", "lazy_static", - "log 0.4.8", - "regex 0.2.11", + "log", + "regex", + "serde", + "serde_derive", + "uuid", +] + +[[package]] +name = "openapi" +version = "1.1.0" +source = "git+https://github.com/DazWilkin/openapi-admission-v1?tag=v1.1.0#60a9ba6bd64efda65cb136a21e0d6a53e962c415" +dependencies = [ + "reqwest 0.11.2", "serde", "serde_derive", + "serde_json", "url 1.7.2", - "uuid 0.5.1", ] [[package]] name = "openssl" -version = "0.10.30" +version = "0.10.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d575eff3665419f9b83678ff2815858ad9d11567e082f5ac1814baba4e2bcb4" +checksum = "a61075b62a23fef5a29815de7536d940aa35ce96d18ce0cc5076272db678a577" dependencies = [ "bitflags", - "cfg-if", + "cfg-if 1.0.0", "foreign-types", - "lazy_static", "libc", + "once_cell", "openssl-sys", ] @@ -1521,9 +2599,9 @@ checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" [[package]] name = "openssl-sys" -version = "0.9.58" +version = "0.9.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a842db4709b604f0fe5d1170ae3565899be2ad3d9cbc72dedc789ac0511f78de" +checksum = "313752393519e876837e09e1fa183ddef0be7735868dced3196f4472d536277f" dependencies = [ "autocfg", "cc", @@ -1534,18 +2612,24 @@ dependencies = [ [[package]] name = "ordered-float" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3741934be594d77de1c8461ebcbbe866f585ea616a9753aa78f2bdc69f0e4579" +checksum = "3305af35278dd29f46fcdd139e0b1fbfae2153f0e5928b39b035542dd31e37b7" dependencies = [ "num-traits", ] +[[package]] +name = "os_str_bytes" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afb2e1c3ee07430c2cf76151675e583e0f19985fa6efae47d6848a3e2c824f85" + [[package]] name = "parking" -version = "1.0.4" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1efcee3c6d23b94012e240525f131c6abaa9e5eeb8f211002d93beec3b7be350" +checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" [[package]] name = "parking_lot" @@ -1553,23 +2637,48 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252" dependencies = [ - "lock_api", - "parking_lot_core", + "lock_api 0.3.4", + "parking_lot_core 0.6.2", "rustc_version", ] +[[package]] +name = "parking_lot" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d7744ac029df22dca6284efe4e898991d28e3085c706c972bcd7da4a27a15eb" +dependencies = [ + "instant", + "lock_api 0.4.3", + "parking_lot_core 0.8.3", +] + [[package]] name = "parking_lot_core" version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b" dependencies = [ - "cfg-if", + "cfg-if 0.1.10", "cloudabi", "libc", - "redox_syscall", + "redox_syscall 0.1.57", "rustc_version", - "smallvec 0.6.13", + "smallvec 0.6.14", + "winapi 0.3.9", +] + +[[package]] +name = "parking_lot_core" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa7a782938e745763fe6907fc6ba86946d72f49fe7e21de074e08128a99fb018" +dependencies = [ + "cfg-if 1.0.0", + "instant", + "libc", + "redox_syscall 0.2.5", + "smallvec 1.6.1", "winapi 0.3.9", ] @@ -1631,7 +2740,7 @@ checksum = "54be6e404f5317079812fc8f9f5279de376d8856929e21c184ecf6bbd692a11d" dependencies = [ "maplit", "pest", - "sha-1", + "sha-1 0.8.2", ] [[package]] @@ -1646,18 +2755,38 @@ dependencies = [ [[package]] name = "pin-project" -version = "0.4.22" +version = "0.4.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "918192b5c59119d51e0cd221f4d49dde9112824ba717369e903c97d076083d0f" +dependencies = [ + "pin-project-internal 0.4.28", +] + +[[package]] +name = "pin-project" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc174859768806e91ae575187ada95c91a29e96a98dc5d2cd9a1fed039501ba6" +dependencies = [ + "pin-project-internal 1.0.6", +] + +[[package]] +name = "pin-project-internal" +version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12e3a6cdbfe94a5e4572812a0201f8c0ed98c1c452c7b8563ce2276988ef9c17" +checksum = "3be26700300be6d9d23264c73211d8190e755b6b5ca7a1b28230025511b52a5e" dependencies = [ - "pin-project-internal", + "proc-macro2", + "quote", + "syn", ] [[package]] name = "pin-project-internal" -version = "0.4.22" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a0ffd45cf79d88737d7cc85bfd5d2894bee1139b356e616fe85dc389c61aaf7" +checksum = "a490329918e856ed1b083f244e3bfe2d8c4f336407e4ea9e1a9f479ff09049e5" dependencies = [ "proc-macro2", "quote", @@ -1666,9 +2795,15 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.1.7" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" + +[[package]] +name = "pin-project-lite" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282adbf10f2698a7a77f8e983a74b2d18176c19a7fd32a45446139ae7b02b715" +checksum = "dc0e1f259c92177c30a4c9d177246edd0a3568b25756a977d0632cf8fa37e905" [[package]] name = "pin-utils" @@ -1678,64 +2813,149 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.18" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" + +[[package]] +name = "polling" +version = "2.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d36492546b6af1463394d46f0c834346f31548646f6ba10849802c9c9a27ac33" +checksum = "4fc12d774e799ee9ebae13f4076ca003b40d18a11ac0f3641e6f899618580b7b" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "log", + "wepoll-sys", + "winapi 0.3.9", +] [[package]] name = "ppv-lite86" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea" +checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" [[package]] name = "predicates" -version = "1.0.4" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "347a1b6f0b21e636bc9872fb60b83b8e185f6f5516298b8238699f7f9a531030" +checksum = "eeb433456c1a57cc93554dea3ce40b4c19c4057e41c55d4a0f3d84ea71c325aa" dependencies = [ "difference", "float-cmp", "normalize-line-endings", "predicates-core", - "regex 1.3.9", + "regex", ] [[package]] name = "predicates-core" -version = "1.0.0" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57e35a3326b75e49aa85f5dc6ec15b41108cf5aee58eabb1f274dd18b73c2451" + +[[package]] +name = "predicates-tree" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15f553275e5721409451eb85e15fd9a860a6e5ab4496eb215987502b5f5391f2" +dependencies = [ + "predicates-core", + "treeline", +] + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + +[[package]] +name = "proc-macro-hack" +version = "0.5.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" + +[[package]] +name = "proc-macro-nested" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06075c3a3e92559ff8929e7a280684489ea27fe44805174c3ebd9328dcb37178" +checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" [[package]] -name = "predicates-tree" -version = "1.0.0" +name = "proc-macro2" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e63c4859013b38a76eca2414c64911fba30def9e3202ac461a2d22831220124" +checksum = "a152013215dca273577e18d2bf00fa862b89b24169fb78c4c95aeb07992c9cec" dependencies = [ - "predicates-core", - "treeline", + "unicode-xid", ] [[package]] -name = "proc-macro-hack" -version = "0.5.16" +name = "procfs" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e0456befd48169b9f13ef0f0ad46d492cf9d2dbb918bcf38e01eed4ce3ec5e4" +checksum = "ab8809e0c18450a2db0f236d2a44ec0b4c1412d0eb936233579f0990faa5d5cd" +dependencies = [ + "bitflags", + "byteorder", + "flate2", + "hex", + "lazy_static", + "libc", +] [[package]] -name = "proc-macro-nested" -version = "0.1.6" +name = "prometheus" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a" +checksum = "c8425533e7122f0c3cc7a37e6244b16ad3a2cc32ae7ac6276e2a75da0d9c200d" +dependencies = [ + "cfg-if 1.0.0", + "fnv", + "lazy_static", + "libc", + "parking_lot 0.11.1", + "procfs", + "protobuf", + "regex", + "thiserror", +] [[package]] -name = "proc-macro2" -version = "1.0.18" +name = "prometheus" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "beae6331a816b1f65d04c45b078fd8e6c93e8071771f41b8163255bbd8d7c8fa" +checksum = "5986aa8d62380092d2f50f8b1cdba9cb9b6731ffd4b25b51fd126b6c3e05b99c" dependencies = [ - "unicode-xid", + "cfg-if 1.0.0", + "fnv", + "lazy_static", + "libc", + "memchr", + "parking_lot 0.11.1", + "procfs", + "protobuf", + "thiserror", ] [[package]] @@ -1757,7 +2977,7 @@ dependencies = [ "bytes 0.5.6", "heck", "itertools", - "log 0.4.8", + "log", "multimap", "petgraph", "prost", @@ -1789,6 +3009,12 @@ dependencies = [ "prost", ] +[[package]] +name = "protobuf" +version = "2.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b7f4a129bb3754c25a4e04032a90173c68f85168f77118ac4cb4936e7f06f92" + [[package]] name = "quick-error" version = "1.2.3" @@ -1797,48 +3023,37 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" +checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" dependencies = [ "proc-macro2", ] [[package]] name = "rand" -version = "0.3.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64ac302d8f83c0c1974bf758f6b041c6c8ada916fbb44a609158ca8b064cc76c" -dependencies = [ - "libc", - "rand 0.4.6", -] - -[[package]] -name = "rand" -version = "0.4.6" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ - "fuchsia-cprng", + "getrandom 0.1.16", "libc", - "rand_core 0.3.1", - "rdrand", - "winapi 0.3.9", + "rand_chacha 0.2.2", + "rand_core 0.5.1", + "rand_hc 0.2.0", + "rand_pcg", ] [[package]] name = "rand" -version = "0.7.3" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" dependencies = [ - "getrandom", "libc", - "rand_chacha", - "rand_core 0.5.1", - "rand_hc", - "rand_pcg", + "rand_chacha 0.3.0", + "rand_core 0.6.2", + "rand_hc 0.3.0", ] [[package]] @@ -1852,27 +3067,31 @@ dependencies = [ ] [[package]] -name = "rand_core" -version = "0.3.1" +name = "rand_chacha" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" +checksum = "e12735cf05c9e10bf21534da50a147b924d555dc7a547c42e6bb2d5b6017ae0d" dependencies = [ - "rand_core 0.4.2", + "ppv-lite86", + "rand_core 0.6.2", ] [[package]] name = "rand_core" -version = "0.4.2" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +dependencies = [ + "getrandom 0.1.16", +] [[package]] name = "rand_core" -version = "0.5.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +checksum = "34cf66eb183df1c5876e2dcf6b13d57340741e8dc255b48e40a26de954d06ae7" dependencies = [ - "getrandom", + "getrandom 0.2.2", ] [[package]] @@ -1885,21 +3104,21 @@ dependencies = [ ] [[package]] -name = "rand_pcg" -version = "0.2.1" +name = "rand_hc" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429" +checksum = "3190ef7066a446f2e7f42e239d161e905420ccab01eb967c9eb27d21b2322a73" dependencies = [ - "rand_core 0.5.1", + "rand_core 0.6.2", ] [[package]] -name = "rdrand" -version = "0.4.0" +name = "rand_pcg" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" +checksum = "16abd0c1b639e9eb4d7c50c0b8100b0d0f849be2349829c740fe8e6eb4816429" dependencies = [ - "rand_core 0.3.1", + "rand_core 0.5.1", ] [[package]] @@ -1909,64 +3128,41 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce" [[package]] -name = "redox_users" -version = "0.3.4" +name = "redox_syscall" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09b23093265f8d200fa7b4c2c76297f47e681c655f6f1285a8780d6a022f7431" +checksum = "94341e4e44e24f6b591b59e47a8a027df12e008d73fd5672dbea9cc22f4507d9" dependencies = [ - "getrandom", - "redox_syscall", - "rust-argon2", + "bitflags", ] [[package]] -name = "regex" -version = "0.2.11" +name = "redox_users" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9329abc99e39129fcceabd24cf5d85b4671ef7c29c50e972bc5afe32438ec384" +checksum = "de0737333e7a9502c789a36d7c7fa6092a49895d4faa31ca5df163857ded2e9d" dependencies = [ - "aho-corasick 0.6.10", - "memchr", - "regex-syntax 0.5.6", - "thread_local 0.3.6", - "utf8-ranges", + "getrandom 0.1.16", + "redox_syscall 0.1.57", + "rust-argon2", ] [[package]] name = "regex" -version = "1.3.9" +version = "1.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c3780fcf44b193bc4d09f36d2a3c87b251da4a046c87795a0d35f4f927ad8e6" +checksum = "957056ecddbeba1b26965114e191d2e8589ce74db242b6ea25fc4062427a5c19" dependencies = [ - "aho-corasick 0.7.13", + "aho-corasick", "memchr", - "regex-syntax 0.6.18", - "thread_local 1.0.1", -] - -[[package]] -name = "regex-syntax" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d707a4fa2637f2dca2ef9fd02225ec7661fe01a53623c1e6515b6916511f7a7" -dependencies = [ - "ucd-util", + "regex-syntax", ] [[package]] name = "regex-syntax" -version = "0.6.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26412eb97c6b088a6997e05f69403a802a92d520de2f8e63c2b65f9e0f47c4e8" - -[[package]] -name = "relay" -version = "0.1.1" +version = "0.6.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1576e382688d7e9deecea24417e350d3062d97e32e45d70b1cde65994ff1489a" -dependencies = [ - "futures 0.1.29", -] +checksum = "24d5f089152e60f62d28b835fbff2cd2e8dc0baf1ac13343bef92ab7eed84548" [[package]] name = "remove_dir_all" @@ -1979,38 +3175,97 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.10.6" +version = "0.10.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b82c9238b305f26f53443e3a4bc8528d64b8d0bee408ec949eb7bf5635ec680" +checksum = "0718f81a8e14c4dbb3b34cf23dc6aaf9ab8a0dfec160c534b3dbca1aaa21f47c" dependencies = [ "async-compression", - "base64 0.12.3", + "base64 0.13.0", "bytes 0.5.6", "encoding_rs", "futures-core", "futures-util", - "http 0.2.1", - "http-body", - "hyper 0.13.7", + "http 0.2.3", + "http-body 0.3.1", + "hyper 0.13.10", "hyper-tls", + "ipnet", "js-sys", "lazy_static", - "log 0.4.8", + "log", "mime", "mime_guess", "native-tls", "percent-encoding 2.1.0", - "pin-project-lite", + "pin-project-lite 0.2.6", "serde", "serde_json", - "serde_urlencoded", - "tokio 0.2.21", + "serde_urlencoded 0.7.0", + "tokio 0.2.25", "tokio-tls", - "url 2.1.1", + "url 2.2.1", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", + "winreg 0.7.0", +] + +[[package]] +name = "reqwest" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf12057f289428dbf5c591c74bf10392e4a8003f993405a902f20117019022d4" +dependencies = [ + "base64 0.13.0", + "bytes 1.0.1", + "encoding_rs", + "futures-core", + "futures-util", + "http 0.2.3", + "http-body 0.4.1", + "hyper 0.14.7", + "ipnet", + "js-sys", + "lazy_static", + "log", + "mime", + "mime_guess", + "percent-encoding 2.1.0", + "pin-project-lite 0.2.6", + "serde", + "serde_json", + "serde_urlencoded 0.7.0", + "tokio 1.4.0", + "url 2.2.1", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "winreg", + "winreg 0.7.0", +] + +[[package]] +name = "resolv-conf" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" +dependencies = [ + "hostname", + "quick-error", +] + +[[package]] +name = "ring" +version = "0.16.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +dependencies = [ + "cc", + "libc", + "once_cell", + "spin", + "untrusted", + "web-sys", + "winapi 0.3.9", ] [[package]] @@ -2024,22 +3279,16 @@ dependencies = [ [[package]] name = "rust-argon2" -version = "0.7.0" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bc8af4bda8e1ff4932523b94d3dd20ee30a87232323eda55903ffd71d2fb017" +checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" dependencies = [ - "base64 0.11.0", + "base64 0.13.0", "blake2b_simd", "constant_time_eq", - "crossbeam-utils", + "crossbeam-utils 0.8.3", ] -[[package]] -name = "rustc-demangle" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" - [[package]] name = "rustc_version" version = "0.2.3" @@ -2050,16 +3299,36 @@ dependencies = [ ] [[package]] -name = "ryu" -version = "1.0.5" +name = "rustls" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" +checksum = "b25a18b1bf7387f0145e7f8324e700805aade3842dd3db2e74e4cdeb4677c09e" +dependencies = [ + "base64 0.10.1", + "log", + "ring", + "sct", + "webpki", +] [[package]] -name = "safemem" -version = "0.2.0" +name = "rustls" +version = "0.18.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d1126dcf58e93cee7d098dbda643b5f92ed724f1f6a63007c1116eed6700c81" +dependencies = [ + "base64 0.12.3", + "log", + "ring", + "sct", + "webpki", +] + +[[package]] +name = "ryu" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e27a8b19b835f7aea908818e871f5cc3a5a186550c30773be987e155e8163d8f" +checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" [[package]] name = "safemem" @@ -2095,11 +3364,21 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +[[package]] +name = "sct" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3042af939fca8c3453b7af0f1c66e533a15a86169e39de2657310ade8f98d3c" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "security-framework" -version = "0.4.4" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64808902d7d99f78eaddd2b4e2509713babc3dc3c85ad6f4c447680f3c01e535" +checksum = "3670b1d2fdf6084d192bc71ead7aabe6c06aa2ea3fbd9cc3ac111fa5c2b1bd84" dependencies = [ "bitflags", "core-foundation", @@ -2110,9 +3389,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "0.4.3" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17bf11d99252f512695eb468de5516e5cf75455521e69dfe343f3b74e4748405" +checksum = "3676258fd3cfe2c9a0ec99ce3038798d847ce3e4bb17746373eb9f0f1ac16339" dependencies = [ "core-foundation-sys", "libc", @@ -2135,9 +3414,12 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.114" +version = "1.0.125" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5317f7588f0a5078ee60ef675ef96735a1442132dc645eb1d12c018620ed8cd3" +checksum = "558dc50e1a5a5fa7112ca2ce4effcb321b0300c0d4ccf0776a9f60cd89031171" +dependencies = [ + "serde_derive", +] [[package]] name = "serde-value" @@ -2151,9 +3433,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.114" +version = "1.0.125" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a0be94b04690fbaed37cddffc5c134bf537c8e3329d53e982fe04c374978f8e" +checksum = "b093b7a2bb58203b5da3056c05b4ec1fed827dcfdb37347a8841695263b3d06d" dependencies = [ "proc-macro2", "quote", @@ -2162,9 +3444,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.56" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3433e879a558dde8b5e8feb2a04899cf34fdde1fafb894687e52105fc1162ac3" +checksum = "799e97dc9fdae36a5c8b8f2cae9ce2ee9fdce2058c57a93e6099d919fd982f79" dependencies = [ "itoa", "ryu", @@ -2180,14 +3462,26 @@ dependencies = [ "dtoa", "itoa", "serde", - "url 2.1.1", + "url 2.2.1", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edfa57a7f8d9c1d260a549e7224100f6c43d43f9103e06dd8b4095a9b2b43ce9" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", ] [[package]] name = "serde_yaml" -version = "0.8.13" +version = "0.8.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae3e2dd40a7cdc18ca80db804b7f461a39bb721160a85c9a1fa30134bf3c02a5" +checksum = "15654ed4ab61726bf918a39cb8d98a2e2995b002387807fa6ba58fdf7f59bb23" dependencies = [ "dtoa", "linked-hash-map", @@ -2201,27 +3495,39 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7d94d0bede923b3cea61f3f1ff57ff8cdfd77b400fb8f9998949e0cf04163df" dependencies = [ - "block-buffer", - "digest", + "block-buffer 0.7.3", + "digest 0.8.1", "fake-simd", - "opaque-debug", + "opaque-debug 0.2.3", ] [[package]] -name = "signal-hook-registry" -version = "1.2.0" +name = "sha-1" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94f478ede9f64724c5d173d7bb56099ec3e2d9fc2774aac65d34b8b890405f41" +checksum = "dfebf75d25bd900fd1e7d11501efab59bc846dbc76196839663e6637bba9f25f" dependencies = [ - "arc-swap", - "libc", + "block-buffer 0.9.0", + "cfg-if 1.0.0", + "cpuid-bool", + "digest 0.9.0", + "opaque-debug 0.3.0", ] [[package]] -name = "slab" -version = "0.3.0" +name = "sha1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17b4fcaed89ab08ef143da37bc52adbcc04d4a69014f4c1208d6b51f0c47bc23" +checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" + +[[package]] +name = "signal-hook-registry" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6" +dependencies = [ + "libc", +] [[package]] name = "slab" @@ -2231,57 +3537,115 @@ checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" [[package]] name = "smallvec" -version = "0.2.1" +version = "0.6.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c8cbcd6df1e117c2210e13ab5109635ad68a929fcbb8964dc965b76cb5ee013" +checksum = "b97fcaeba89edba30f044a10c6a3cc39df9c3f17d7cd829dd1446cab35f890e0" +dependencies = [ + "maybe-uninit", +] [[package]] name = "smallvec" -version = "0.6.13" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" + +[[package]] +name = "socket2" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7b0758c52e15a8b5e3691eae6cc559f08eee9406e548a4477ba4e67770a82b6" +checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" dependencies = [ - "maybe-uninit", + "cfg-if 1.0.0", + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "socket2" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e3dfc207c526015c632472a77be09cf1b6e46866581aecae5cc38fb4235dea2" +dependencies = [ + "libc", + "winapi 0.3.9", +] + +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + +[[package]] +name = "standback" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e113fb6f3de07a243d434a56ec6f186dfd51cb08448239fe7bcae73f87ff28ff" +dependencies = [ + "version_check", +] + +[[package]] +name = "stdweb" +version = "0.4.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5" +dependencies = [ + "discard", + "rustc_version", + "stdweb-derive", + "stdweb-internal-macros", + "stdweb-internal-runtime", + "wasm-bindgen", +] + +[[package]] +name = "stdweb-derive" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" +dependencies = [ + "proc-macro2", + "quote", + "serde", + "serde_derive", + "syn", +] + +[[package]] +name = "stdweb-internal-macros" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" +dependencies = [ + "base-x", + "proc-macro2", + "quote", + "serde", + "serde_derive", + "serde_json", + "sha1", + "syn", ] [[package]] -name = "smol" -version = "0.1.18" +name = "stdweb-internal-runtime" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "620cbb3c6e34da57d3a248cda0cd01cd5848164dc062e764e65d06fe3ea7aed5" -dependencies = [ - "async-task", - "blocking", - "concurrent-queue", - "fastrand", - "futures-io", - "futures-util", - "libc", - "once_cell", - "scoped-tls 1.0.0", - "slab 0.4.2", - "socket2", - "wepoll-sys-stjepang", - "winapi 0.3.9", -] +checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" [[package]] -name = "socket2" -version = "0.3.12" +name = "strsim" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03088793f677dce356f3ccc2edb1b314ad191ab702a5de3faf49304f7e104918" -dependencies = [ - "cfg-if", - "libc", - "redox_syscall", - "winapi 0.3.9", -] +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "subtle" -version = "1.0.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" +checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" [[package]] name = "sxd-document" @@ -2306,70 +3670,61 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "936cae2873c940d92e697597c5eee105fb570cd5689c695806f672883653349b" -dependencies = [ - "proc-macro2", - "quote", - "unicode-xid", -] - -[[package]] -name = "synstructure" -version = "0.12.4" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" +checksum = "3ce15dd3ed8aa2f8eeac4716d6ef5ab58b6b9256db41d7e1a0224c2788e8fd87" dependencies = [ "proc-macro2", "quote", - "syn", "unicode-xid", ] -[[package]] -name = "take" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b157868d8ac1f56b64604539990685fa7611d8fa9e5476cf0c02cf34d32917c5" - [[package]] name = "tempfile" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9" +checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "libc", - "rand 0.7.3", - "redox_syscall", + "rand 0.8.3", + "redox_syscall 0.2.5", "remove_dir_all", "winapi 0.3.9", ] [[package]] name = "termcolor" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f" +checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" dependencies = [ "winapi-util", ] +[[package]] +name = "textwrap" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "203008d98caf094106cfaba70acfed15e18ed3ddb7d94e49baec153a2b462789" +dependencies = [ + "unicode-width", +] + [[package]] name = "thiserror" -version = "1.0.20" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dfdd070ccd8ccb78f4ad66bf1982dc37f620ef696c6b5028fe2ed83dd3d0d08" +checksum = "e0f4a65597094d4483ddaed134f409b2cb7c1beccf25201a9f73c719254fa98e" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.20" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd80fc12f73063ac132ac92aceea36734f04a1d93c1240c6944e23a3b8841793" +checksum = "7765189610d8241a44529806d6fd1f2e0a08734313a35d5b3a556f92b381f3c0" dependencies = [ "proc-macro2", "quote", @@ -2377,38 +3732,77 @@ dependencies = [ ] [[package]] -name = "thread_local" -version = "0.3.6" +name = "threadpool" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" dependencies = [ - "lazy_static", + "num_cpus", ] [[package]] -name = "thread_local" -version = "1.0.1" +name = "time" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" +checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" dependencies = [ - "lazy_static", + "libc", + "wasi 0.10.0+wasi-snapshot-preview1", + "winapi 0.3.9", ] [[package]] name = "time" -version = "0.1.43" +version = "0.2.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +checksum = "08a8cbfbf47955132d0202d1662f49b2423ae35862aee471f3ba4b133358f372" dependencies = [ + "const_fn", "libc", + "standback", + "stdweb", + "time-macros", + "version_check", "winapi 0.3.9", ] +[[package]] +name = "time-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "957e9c6e26f12cb6d0dd7fc776bb67a706312e7299aed74c8dd5b17ebb27e2f1" +dependencies = [ + "proc-macro-hack", + "time-macros-impl", +] + +[[package]] +name = "time-macros-impl" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5c3be1edfad6027c69f5491cf4cb310d1a71ecd6af742788c6ff8bced86b8fa" +dependencies = [ + "proc-macro-hack", + "proc-macro2", + "quote", + "standback", + "syn", +] + [[package]] name = "tinyvec" -version = "0.3.3" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b5220f05bb7de7f3f53c7c065e1199b3172696fe2db9f9c4d8ad9b4ee74c342" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53953d2d3a5ad81d9f844a32f14ebb121f50b650cd59d0ee2a07cf13c617efed" +checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" @@ -2417,8 +3811,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a09c0b5bb588872ab2f09afa13ee6e9dac11e10a0ec9e8e3ba39a5a5d530af6" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", - "mio", + "futures 0.1.31", + "mio 0.6.23", "num_cpus", "tokio-codec", "tokio-current-thread", @@ -2436,9 +3830,9 @@ dependencies = [ [[package]] name = "tokio" -version = "0.2.21" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d099fa27b9702bed751524694adbe393e18b36b204da91eb1cbbbbb4a5ee2d58" +checksum = "6703a273949a90131b290be1fe7b039d0fc884aa1935860dfcbe056f28cd8092" dependencies = [ "bytes 0.5.6", "fnv", @@ -2447,17 +3841,31 @@ dependencies = [ "lazy_static", "libc", "memchr", - "mio", + "mio 0.6.23", "mio-named-pipes", "mio-uds", "num_cpus", - "pin-project-lite", + "pin-project-lite 0.1.12", "signal-hook-registry", - "slab 0.4.2", + "slab", "tokio-macros", "winapi 0.3.9", ] +[[package]] +name = "tokio" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "134af885d758d645f0f0505c9a8b3f9bf8a348fd822e112ab5248138348f1722" +dependencies = [ + "autocfg", + "bytes 1.0.1", + "libc", + "memchr", + "mio 0.7.11", + "pin-project-lite 0.2.6", +] + [[package]] name = "tokio-codec" version = "0.1.2" @@ -2465,21 +3873,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25b2998660ba0e70d18684de5d06b70b70a3a747469af9dea7618cc59e75976b" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.31", "tokio-io", ] [[package]] name = "tokio-core" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aeeffbbb94209023feaef3c196a41cbcdafa06b4a6f893f68779bb5e53796f71" +checksum = "87b1395334443abca552f63d4f61d0486f12377c2ba8b368e523f89e828cffd4" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.31", "iovec", - "log 0.4.8", - "mio", + "log", + "mio 0.6.23", "scoped-tls 0.1.2", "tokio 0.1.22", "tokio-executor", @@ -2494,7 +3902,7 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1de0e32a83f131e002238d7ccde18211c0a5397f60cbfffcb112868c2e0e20e" dependencies = [ - "futures 0.1.29", + "futures 0.1.31", "tokio-executor", ] @@ -2504,8 +3912,8 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb2d1b8f4548dbf5e1f7818512e9c406860678f29c300cdf0ebac72d1a3a1671" dependencies = [ - "crossbeam-utils", - "futures 0.1.29", + "crossbeam-utils 0.7.2", + "futures 0.1.31", ] [[package]] @@ -2514,7 +3922,7 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "297a1206e0ca6302a0eed35b700d292b275256f596e2f3fea7729d5e629b6ff4" dependencies = [ - "futures 0.1.29", + "futures 0.1.31", "tokio-io", "tokio-threadpool", ] @@ -2526,15 +3934,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57fc868aae093479e3131e3d165c93b1c7474109d13c90ec0dda2a1bbfff0674" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", - "log 0.4.8", + "futures 0.1.31", + "log", ] [[package]] name = "tokio-macros" -version = "0.2.5" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" +checksum = "e44da00bfc73a25f814cd8d7e57a68a5c31b74b3152a0a1d1f590c97ed06265a" dependencies = [ "proc-macro2", "quote", @@ -2542,21 +3950,13 @@ dependencies = [ ] [[package]] -name = "tokio-proto" -version = "0.1.1" +name = "tokio-openssl" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fbb47ae81353c63c487030659494b295f6cb6576242f907f203473b191b0389" +checksum = "3c4b08c5f4208e699ede3df2520aca2e82401b2de33f45e96696a074480be594" dependencies = [ - "futures 0.1.29", - "log 0.3.9", - "net2", - "rand 0.3.23", - "slab 0.3.0", - "smallvec 0.2.1", - "take", - "tokio-core", - "tokio-io", - "tokio-service", + "openssl", + "tokio 0.2.25", ] [[package]] @@ -2565,26 +3965,29 @@ version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09bc590ec4ba8ba87652da2068d150dcada2cfa2e07faae270a5e0409aa51351" dependencies = [ - "crossbeam-utils", - "futures 0.1.29", + "crossbeam-utils 0.7.2", + "futures 0.1.31", "lazy_static", - "log 0.4.8", - "mio", + "log", + "mio 0.6.23", "num_cpus", - "parking_lot", - "slab 0.4.2", + "parking_lot 0.9.0", + "slab", "tokio-executor", "tokio-io", "tokio-sync", ] [[package]] -name = "tokio-service" -version = "0.1.0" +name = "tokio-rustls" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24da22d077e0f15f55162bdbdc661228c1581892f52074fb242678d015b45162" +checksum = "3068d891551949b37681724d6b73666787cc63fa8e255c812a41d2513aff9775" dependencies = [ - "futures 0.1.29", + "futures-core", + "rustls 0.16.0", + "tokio 0.2.25", + "webpki", ] [[package]] @@ -2593,9 +3996,9 @@ version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0c34c6e548f101053321cba3da7cbb87a610b85555884c41b07da2eb91aff12" dependencies = [ - "futures 0.1.29", + "futures 0.1.31", "libc", - "mio", + "mio 0.6.23", "mio-uds", "signal-hook-registry", "tokio-executor", @@ -2611,7 +4014,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edfe50152bc8164fcc456dab7891fa9bf8beaf01c5ee7e1dd43a397c3cf87dee" dependencies = [ "fnv", - "futures 0.1.29", + "futures 0.1.31", ] [[package]] @@ -2621,9 +4024,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98df18ed66e3b72e742f185882a9e201892407957e45fbff8da17ae7a7c51f72" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.31", "iovec", - "mio", + "mio 0.6.23", "tokio-io", "tokio-reactor", ] @@ -2636,12 +4039,12 @@ checksum = "df720b6581784c118f0eb4310796b12b1d242a7eb95f716a8367855325c25f89" dependencies = [ "crossbeam-deque", "crossbeam-queue", - "crossbeam-utils", - "futures 0.1.29", + "crossbeam-utils 0.7.2", + "futures 0.1.31", "lazy_static", - "log 0.4.8", + "log", "num_cpus", - "slab 0.4.2", + "slab", "tokio-executor", ] @@ -2651,9 +4054,9 @@ version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93044f2d313c95ff1cb7809ce9a7a05735b012288a888b62d4434fd58c94f296" dependencies = [ - "crossbeam-utils", - "futures 0.1.29", - "slab 0.4.2", + "crossbeam-utils 0.7.2", + "futures 0.1.31", + "slab", "tokio-executor", ] @@ -2664,7 +4067,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a70f4fcd7b3b24fb194f837560168208f669ca8cb70d0c4b862944452396343" dependencies = [ "native-tls", - "tokio 0.2.21", + "tokio 0.2.25", +] + +[[package]] +name = "tokio-tungstenite" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d9e878ad426ca286e4dcae09cbd4e1973a7f8987d97570e2469703dd7f5720c" +dependencies = [ + "futures-util", + "log", + "pin-project 0.4.28", + "tokio 0.2.25", + "tungstenite", ] [[package]] @@ -2674,9 +4090,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2a0b10e610b39c38b031a2fcab08e4b82f16ece36504988dcbd81dbba650d82" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", - "log 0.4.8", - "mio", + "futures 0.1.31", + "log", + "mio 0.6.23", "tokio-codec", "tokio-io", "tokio-reactor", @@ -2689,11 +4105,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab57a4ac4111c8c9dbcf70779f6fc8bc35ae4b2454809febac840ad19bd7e4e0" dependencies = [ "bytes 0.4.12", - "futures 0.1.29", + "futures 0.1.31", "iovec", "libc", - "log 0.4.8", - "mio", + "log", + "mio 0.6.23", "mio-uds", "tokio-codec", "tokio-io", @@ -2709,9 +4125,9 @@ dependencies = [ "bytes 0.5.6", "futures-core", "futures-sink", - "log 0.4.8", - "pin-project-lite", - "tokio 0.2.21", + "log", + "pin-project-lite 0.1.12", + "tokio 0.2.25", ] [[package]] @@ -2722,10 +4138,25 @@ checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" dependencies = [ "bytes 0.5.6", "futures-core", + "futures-io", + "futures-sink", + "log", + "pin-project-lite 0.1.12", + "tokio 0.2.25", +] + +[[package]] +name = "tokio-util" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5143d049e85af7fbc36f5454d990e62c2df705b3589f123b71f441b6b59f443f" +dependencies = [ + "bytes 1.0.1", + "futures-core", "futures-sink", - "log 0.4.8", - "pin-project-lite", - "tokio 0.2.21", + "log", + "pin-project-lite 0.2.6", + "tokio 1.4.0", ] [[package]] @@ -2740,14 +4171,15 @@ dependencies = [ "bytes 0.5.6", "futures-core", "futures-util", - "http 0.2.1", - "http-body", - "hyper 0.13.7", + "http 0.2.3", + "http-body 0.3.1", + "hyper 0.13.10", "percent-encoding 1.0.1", - "pin-project", + "pin-project 0.4.28", "prost", "prost-derive", - "tokio 0.2.21", + "tokio 0.2.25", + "tokio-rustls", "tokio-util 0.2.0", "tower", "tower-balance", @@ -2797,10 +4229,10 @@ dependencies = [ "futures-core", "futures-util", "indexmap", - "pin-project", + "pin-project 0.4.28", "rand 0.7.3", - "slab 0.4.2", - "tokio 0.2.21", + "slab", + "tokio 0.2.25", "tower-discover", "tower-layer", "tower-load", @@ -2817,8 +4249,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4887dc2a65d464c8b9b66e0e4d51c2fd6cf5b3373afc72805b0a60bce00446a" dependencies = [ "futures-core", - "pin-project", - "tokio 0.2.21", + "pin-project 0.4.28", + "tokio 0.2.25", "tower-layer", "tower-service", "tracing", @@ -2831,15 +4263,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f6b5000c3c54d269cc695dff28136bb33d08cbf1df2c48129e143ab65bf3c2a" dependencies = [ "futures-core", - "pin-project", + "pin-project 0.4.28", "tower-service", ] [[package]] name = "tower-layer" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a35d656f2638b288b33495d1053ea74c40dc05ec0b92084dd71ca5566c4ed1dc" +checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" [[package]] name = "tower-limit" @@ -2848,8 +4280,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92c3040c5dbed68abffaa0d4517ac1a454cd741044f33ab0eefab6b8d1361404" dependencies = [ "futures-core", - "pin-project", - "tokio 0.2.21", + "pin-project 0.4.28", + "tokio 0.2.25", "tower-layer", "tower-load", "tower-service", @@ -2862,9 +4294,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8cc79fc3afd07492b7966d7efa7c6c50f8ed58d768a6075dd7ae6591c5d2017b" dependencies = [ "futures-core", - "log 0.4.8", - "pin-project", - "tokio 0.2.21", + "log", + "pin-project 0.4.28", + "tokio 0.2.25", "tower-discover", "tower-service", ] @@ -2876,7 +4308,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f021e23900173dc315feb4b6922510dae3e79c689b74c089112066c11f0ae4e" dependencies = [ "futures-core", - "pin-project", + "pin-project 0.4.28", "tower-layer", "tower-service", ] @@ -2887,7 +4319,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce50370d644a0364bf4877ffd4f76404156a248d104e2cc234cd391ea5cdc965" dependencies = [ - "tokio 0.2.21", + "tokio 0.2.25", "tower-service", ] @@ -2900,8 +4332,8 @@ dependencies = [ "futures-core", "futures-util", "indexmap", - "log 0.4.8", - "tokio 0.2.21", + "log", + "tokio 0.2.25", "tower-service", ] @@ -2912,17 +4344,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6727956aaa2f8957d4d9232b308fe8e4e65d99db30f42b225646e86c9b6a952" dependencies = [ "futures-core", - "pin-project", - "tokio 0.2.21", + "pin-project 0.4.28", + "tokio 0.2.25", "tower-layer", "tower-service", ] [[package]] name = "tower-service" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" +checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tower-timeout" @@ -2930,8 +4362,8 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "127b8924b357be938823eaaec0608c482d40add25609481027b96198b2e4b31e" dependencies = [ - "pin-project", - "tokio 0.2.21", + "pin-project 0.4.28", + "tokio 0.2.25", "tower-layer", "tower-service", ] @@ -2944,27 +4376,28 @@ checksum = "d1093c19826d33807c72511e68f73b4a0469a3f22c2bd5f7d5212178b4b89674" dependencies = [ "futures-core", "futures-util", - "pin-project", + "pin-project 0.4.28", "tower-service", ] [[package]] name = "tracing" -version = "0.1.16" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e2a2de6b0d5cbb13fc21193a2296888eaab62b6044479aafb3c54c01c29fcd" +checksum = "01ebdc2bb4498ab1ab5f5b73c5803825e60199229ccba0698170e3be0e7f959f" dependencies = [ - "cfg-if", - "log 0.4.8", + "cfg-if 1.0.0", + "log", + "pin-project-lite 0.2.6", "tracing-attributes", "tracing-core", ] [[package]] name = "tracing-attributes" -version = "0.1.9" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0693bf8d6f2bf22c690fc61a9d21ac69efdbb894a17ed596b9af0f01e64b84b" +checksum = "c42e6fa53307c8a17e4ccd4dc81cf5ec38db9209f59b222210375b54ee40d1e2" dependencies = [ "proc-macro2", "quote", @@ -2973,20 +4406,20 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.11" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94ae75f0d28ae10786f3b1895c55fe72e79928fd5ccdebb5438c75e93fec178f" +checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f" dependencies = [ "lazy_static", ] [[package]] name = "tracing-futures" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab7bb6f14721aa00656086e9335d363c5c8747bae02ebe32ea2c7dece5689b4c" +checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ - "pin-project", + "pin-project 1.0.6", "tracing", ] @@ -2997,10 +4430,43 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] -name = "try-lock" -version = "0.1.0" +name = "trust-dns-proto" +version = "0.19.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cad71a0c0d68ab9941d2fb6e82f8fb2e86d9945b94e1661dd0aaea2b88215a9" +dependencies = [ + "async-trait", + "cfg-if 1.0.0", + "enum-as-inner", + "futures 0.3.14", + "idna 0.2.2", + "lazy_static", + "log", + "rand 0.7.3", + "smallvec 1.6.1", + "thiserror", + "tokio 0.2.25", + "url 2.2.1", +] + +[[package]] +name = "trust-dns-resolver" +version = "0.19.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2aa4715743892880f70885373966c83d73ef1b0838a664ef0c76fffd35e7c2" +checksum = "710f593b371175db53a26d0b38ed2978fafb9e9e8d3868b1acd753ea18df0ceb" +dependencies = [ + "cfg-if 0.1.10", + "futures 0.3.14", + "ipconfig", + "lazy_static", + "log", + "lru-cache", + "resolv-conf", + "smallvec 1.6.1", + "thiserror", + "tokio 0.2.25", + "trust-dns-proto", +] [[package]] name = "try-lock" @@ -3008,6 +4474,34 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +[[package]] +name = "tungstenite" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0308d80d86700c5878b9ef6321f020f29b1bb9d5ff3cab25e75e23f3a492a23" +dependencies = [ + "base64 0.12.3", + "byteorder", + "bytes 0.5.6", + "http 0.2.3", + "httparse", + "input_buffer", + "log", + "rand 0.7.3", + "sha-1 0.9.4", + "url 2.2.1", + "utf-8", +] + +[[package]] +name = "twoway" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59b11b2b5241ba34be09c3cc85a36e56e48f9888862e19cedf23336d35316ed1" +dependencies = [ + "memchr", +] + [[package]] name = "typed-arena" version = "1.7.0" @@ -3016,9 +4510,9 @@ checksum = "a9b2228007eba4120145f785df0f6c92ea538f5a3635a612ecf4e334c8c1446d" [[package]] name = "typenum" -version = "1.12.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33" +checksum = "879f6906492a7cd215bfa4cf595b600146ccfac0c79bcbd1f3000162af5e8b06" [[package]] name = "ucd-trie" @@ -3026,33 +4520,40 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c" -[[package]] -name = "ucd-util" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c85f514e095d348c279b1e5cd76795082cf15bd59b93207832abe0b1d8fed236" - [[package]] name = "udev" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24953d50a3bce0f5f5a9a2766567072dc9af8096f8c40ea81815da651066bc9f" +checksum = "048df778e99eea028c08cca7853b9b521df6948b59bb29ab8bb737c057f58e6d" dependencies = [ "libc", "libudev-sys", ] +[[package]] +name = "udev-discovery-handler" +version = "0.6.5" +dependencies = [ + "akri-discovery-utils 0.6.5", + "akri-udev", + "env_logger 0.8.3", + "log", + "tokio 0.2.25", +] + [[package]] name = "udev-video-broker" -version = "0.1.5" +version = "0.6.5" dependencies = [ - "akri-shared", - "env_logger", - "futures 0.1.29", - "log 0.4.8", + "akri-shared 0.6.5", + "env_logger 0.8.3", + "futures 0.1.31", + "lazy_static", + "log", + "prometheus 0.12.0", "prost", "rscam", - "tokio 0.2.21", + "tokio 0.2.25", "tonic", "tonic-build", ] @@ -3068,27 +4569,33 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f2bd0c6468a8230e1db229cff8029217cf623c767ea5d60bfbd42729ea54d5" +checksum = "eeb8be209bb1c96b7c177c7420d26e04eccacb0eeae6b980e35fcb74678107e0" dependencies = [ "matches", ] [[package]] name = "unicode-normalization" -version = "0.1.13" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb19cf769fa8c6a80a162df694621ebeb4dafb606470b2b2fce0be40a98a977" +checksum = "07fbfce1c8a97d547e8b5334978438d9d6ec8c20e38f56d4a4374d181493eaef" dependencies = [ "tinyvec", ] [[package]] name = "unicode-segmentation" -version = "1.6.0" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796" + +[[package]] +name = "unicode-width" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" +checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3" [[package]] name = "unicode-xid" @@ -3096,6 +4603,12 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564" +[[package]] +name = "untrusted" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" + [[package]] name = "url" version = "1.7.2" @@ -3109,76 +4622,112 @@ dependencies = [ [[package]] name = "url" -version = "2.1.1" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "829d4a8476c35c9bf0bbce5a3b23f4106f79728039b726d292bb93bc106787cb" +checksum = "9ccd964113622c8e9322cfac19eb1004a07e636c545f325da085d5cdde6f1f8b" dependencies = [ - "idna 0.2.0", + "form_urlencoded", + "idna 0.2.2", "matches", "percent-encoding 2.1.0", ] [[package]] -name = "utf8-ranges" -version = "1.0.4" +name = "urlencoding" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9232eb53352b4442e40d7900465dfc534e8cb2dc8f18656fcb2ac16112b5593" + +[[package]] +name = "utf-8" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4ae116fef2b7fea257ed6440d3cfcff7f190865f170cdad00bb6465bf18ecba" +checksum = "05e42f7c18b8f902290b009cde6d651262f956c98bc51bca4cd1d511c9cd85c7" [[package]] name = "uuid" -version = "0.5.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcc7e3b898aa6f6c08e5295b6c89258d1331e9ac578cc992fb818759951bdc22" +checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "rand 0.3.23", + "getrandom 0.2.2", ] [[package]] -name = "uuid" -version = "0.8.1" +name = "value-bag" +version = "1.0.0-alpha.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fde2f6a4bea1d6e007c4ad38c6839fa71cbb63b6dbf5b595aa38dc9b1093c11" +checksum = "6b676010e055c99033117c2343b33a40a30b91fecd6c49055ac9cd2d6c305ab1" dependencies = [ - "rand 0.7.3", + "ctor", ] [[package]] name = "vcpkg" -version = "0.2.10" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b00bca6106a5e23f3eee943593759b7fcddb00554332e856d990c893966879fb" + +[[package]] +name = "vec-arena" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c" +checksum = "34b2f665b594b07095e3ac3f718e13c2197143416fae4c5706cffb7b1af8d7f1" + +[[package]] +name = "vec_map" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "version_check" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed" +checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" [[package]] name = "waker-fn" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9571542c2ce85ce642e6b58b3364da2fb53526360dfb7c211add4f5c23105ff7" +checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" [[package]] name = "want" -version = "0.0.4" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a05d9d966753fa4b5c8db73fcab5eed4549cfe0e1e4e66911e5564a0085c35d1" +checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" dependencies = [ - "futures 0.1.29", - "log 0.4.8", - "try-lock 0.1.0", + "log", + "try-lock", ] [[package]] -name = "want" -version = "0.3.0" +name = "warp" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +checksum = "f41be6df54c97904af01aa23e613d4521eed7ab23537cede692d4058f6449407" dependencies = [ - "log 0.4.8", - "try-lock 0.2.3", + "bytes 0.5.6", + "futures 0.3.14", + "headers", + "http 0.2.3", + "hyper 0.13.10", + "log", + "mime", + "mime_guess", + "multipart", + "pin-project 0.4.28", + "scoped-tls 1.0.0", + "serde", + "serde_json", + "serde_urlencoded 0.6.1", + "tokio 0.2.25", + "tokio-tungstenite", + "tower-service", + "tracing", + "tracing-futures", + "urlencoding", ] [[package]] @@ -3187,13 +4736,19 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" +[[package]] +name = "wasi" +version = "0.10.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" + [[package]] name = "wasm-bindgen" -version = "0.2.64" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a634620115e4a229108b71bde263bb4220c483b3f07f5ba514ee8d15064c4c2" +checksum = "83240549659d187488f91f33c0f8547cbfef0b2088bc470c116d1d260ef623d9" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "serde", "serde_json", "wasm-bindgen-macro", @@ -3201,13 +4756,13 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.64" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e53963b583d18a5aa3aaae4b4c1cb535218246131ba22a71f05b518098571df" +checksum = "ae70622411ca953215ca6d06d3ebeb1e915f0f6613e3b495122878d7ebec7dae" dependencies = [ "bumpalo", "lazy_static", - "log 0.4.8", + "log", "proc-macro2", "quote", "syn", @@ -3216,11 +4771,11 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.14" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dba48d66049d2a6cc8488702e7259ab7afc9043ad0dc5448444f46f2a453b362" +checksum = "81b8b767af23de6ac18bf2168b690bed2902743ddf0fb39252e36f9e2bfc63ea" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "js-sys", "wasm-bindgen", "web-sys", @@ -3228,9 +4783,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.64" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fcfd5ef6eec85623b4c6e844293d4516470d8f19cd72d0d12246017eb9060b8" +checksum = "3e734d91443f177bfdb41969de821e15c516931c3c3db3d318fa1b68975d0f6f" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3238,9 +4793,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.64" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9adff9ee0e94b926ca81b57f57f86d5545cdcb1d259e21ec9bdd95b901754c75" +checksum = "d53739ff08c8a68b0fdbcd54c372b8ab800b1449ab3c9d706503bc7dd1621b2c" dependencies = [ "proc-macro2", "quote", @@ -3251,25 +4806,53 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.64" +version = "0.2.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7b90ea6c632dd06fd765d44542e234d5e63d9bb917ecd64d79778a13bd79ae" +checksum = "d9a543ae66aa233d14bb765ed9af4a33e81b8b58d1584cf1b47ff8cd0b9e4489" [[package]] name = "web-sys" -version = "0.3.41" +version = "0.3.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "863539788676619aac1a23e2df3655e96b32b0e05eb72ca34ba045ad573c625d" +checksum = "a905d57e488fec8861446d3393670fb50d27a262344013181c2cdf9fff5481be" dependencies = [ "js-sys", "wasm-bindgen", ] [[package]] -name = "wepoll-sys-stjepang" -version = "1.0.6" +name = "webhook-configuration" +version = "0.6.5" +dependencies = [ + "actix", + "actix-rt", + "actix-web", + "akri-shared 0.6.5", + "clap", + "k8s-openapi", + "kube", + "openapi", + "openssl", + "rustls 0.18.1", + "serde", + "serde_json", +] + +[[package]] +name = "webpki" +version = "0.21.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "wepoll-sys" +version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fd319e971980166b53e17b1026812ad66c6b54063be879eb182342b55284694" +checksum = "0fcb14dea929042224824779fbc82d9fab8d2e6d3cbc0ac404de8edf489e77ff" dependencies = [ "cc", ] @@ -3283,6 +4866,12 @@ dependencies = [ "libc", ] +[[package]] +name = "widestring" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" + [[package]] name = "winapi" version = "0.2.8" @@ -3326,6 +4915,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "winreg" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" +dependencies = [ + "winapi 0.3.9", +] + [[package]] name = "winreg" version = "0.7.0" @@ -3353,9 +4951,9 @@ checksum = "b07db065a5cf61a7e4ba64f29e67db906fb1787316516c4e6e5ff0fea1efcd8a" [[package]] name = "yaml-rust" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39f0c922f1a334134dc2f7a8b67dc5d25f0735263feec974345ff706bcf20b0d" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" dependencies = [ "linked-hash-map", ] @@ -3366,7 +4964,7 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe81055b36d926da70850a7c1a9bc72eff184659da29e008ce9f2af4bfee7c89" dependencies = [ - "log 0.4.8", + "log", "xml-rs", ] diff --git a/Cargo.toml b/Cargo.toml index 066472964..3bc9e6347 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,23 @@ # Patch to allow invalid authority headers provided by grpc-go / kubelet # Issue to track: https://github.com/grpc/grpc-go/issues/2628 [patch.crates-io] -h2 = { git = "https://github.com/kate-goldenring/h2", branch = "master"} +h2 = { git = "https://github.com/kate-goldenring/h2", branch = "master" } [workspace] -members = ["shared", "controller", "agent", "samples/brokers/udev-video-broker"] +members = [ + "shared", + "agent", + "controller", + "samples/brokers/udev-video-broker", + "webhooks/validating/configuration", + "discovery-utils", + "discovery-handlers/debug-echo", + "discovery-handlers/onvif", + "discovery-handlers/opcua", + "discovery-handlers/udev", + "discovery-handler-modules/debug-echo-discovery-handler", + "discovery-handler-modules/onvif-discovery-handler", + "discovery-handler-modules/opcua-discovery-handler", + "discovery-handler-modules/udev-discovery-handler", + "discovery-handler-modules/http-discovery-handler", +] diff --git a/Cross.toml b/Cross.toml index ec196e59d..dae228924 100644 --- a/Cross.toml +++ b/Cross.toml @@ -2,6 +2,7 @@ passthrough = [ "PKG_CONFIG_ALLOW_CROSS", "RUST_LOG", + "CARGO_INCREMENTAL", ] [target.x86_64-unknown-linux-gnu] diff --git a/Makefile b/Makefile index 1598f5afc..56ebc34d2 100644 --- a/Makefile +++ b/Makefile @@ -2,6 +2,27 @@ BUILD_AMD64 ?= 1 BUILD_ARM32 ?= 1 BUILD_ARM64 ?= 1 +# Specify flag to build optimized release version of rust components. +# Set to be empty to use debug builds. +BUILD_RELEASE_FLAG ?= 1 + +# Space separated list of rust packages to not build such as the following to not build +# the udev discovery handler library or module: "akri-udev udev-discovery-handler" +PACKAGES_TO_EXCLUDE ?= + +# Incremental compilation causes rustc to save additional information to disk which will be +# reused when recompiling the crate, improving re-compile times. +# The additional information is stored in the target directory. +# By default for cargo builds, it is enabled in debug mode and disabled in release mode. +CARGO_INCREMENTAL ?= 0 + +BUILD_SLIM_AGENT ?= 1 +FULL_AGENT_EXECUTABLE_NAME ?= agent-full +# Specify which features of the Agent to build, namely which Discovery Handlers +# should be embedded if any. The "agent-full" feature must be enabled to use the embedded +# Discovery Handlers. IE: AGENT_FEATURES="agent-full onvif-feat opcua-feat udev-feat" +AGENT_FEATURES ?= + REGISTRY ?= devcaptest.azurecr.io UNIQUE_ID ?= $(USER) @@ -9,8 +30,13 @@ INTERMEDIATE_DOCKERFILE_DIR ?= build/containers/intermediate DOCKERFILE_DIR ?= build/containers PREFIX ?= $(REGISTRY)/$(UNIQUE_ID) -VERSION=$(shell cat version.txt) -VERSION_LABEL=v$(VERSION) + +# Evaluate VERSION and TIMESTAMP immediately to avoid +# any lazy evaluation change in the values +VERSION := $(shell cat version.txt) +TIMESTAMP := $(shell date +"%Y%m%d_%H%M%S") + +VERSION_LABEL=v$(VERSION)-$(TIMESTAMP) LABEL_PREFIX ?= $(VERSION_LABEL) CACHE_OPTION ?= @@ -29,4 +55,4 @@ ARM64V8_TARGET = aarch64-unknown-linux-gnu include build/intermediate-containers.mk # Akri container defines -include build/akri-containers.mk \ No newline at end of file +include build/akri-containers.mk diff --git a/README.md b/README.md index 1ae5ca7c8..870984d07 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,8 @@ -# Akri +

Akri Logo

+ [![Slack channel #akri](https://img.shields.io/badge/slack-akri-blueviolet.svg?logo=slack)](https://kubernetes.slack.com/messages/akri) -[![Rust Version](https://img.shields.io/badge/rustc-1.41.0-blue.svg)](https://blog.rust-lang.org/2020/01/30/Rust-1.41.0.html) -[![Kubernetes Version](https://img.shields.io/badge/kubernetes-≥%201.16-blue.svg)](https://v1-16.docs.kubernetes.io/) +[![Rust Version](https://img.shields.io/badge/rustc-1.51.0-blue.svg)](https://blog.rust-lang.org/2021/03/25/Rust-1.51.0.html) +[![Kubernetes Version](https://img.shields.io/badge/kubernetes-≥%201.16-blue.svg)](https://kubernetes.io/) [![codecov](https://codecov.io/gh/deislabs/akri/branch/main/graph/badge.svg?token=V468HO7CDE)](https://codecov.io/gh/deislabs/akri) [![Check Rust](https://github.com/deislabs/akri/workflows/Check%20Rust/badge.svg?branch=main&event=push)](https://github.com/deislabs/akri/actions?query=workflow%3A%22Check+Rust%22) @@ -10,35 +11,40 @@ [![Build Agent](https://github.com/deislabs/akri/workflows/Build%20Agent/badge.svg?branch=main&event=push)](https://github.com/deislabs/akri/actions?query=workflow%3A%22Build+Agent%22) [![Test K3s, Kubernetes, and MicroK8s](https://github.com/deislabs/akri/workflows/Test%20K3s,%20Kubernetes,%20and%20MicroK8s/badge.svg?branch=main&event=push)](https://github.com/deislabs/akri/actions?query=workflow%3A%22Test+K3s%2C+Kubernetes%2C+and+MicroK8s%22) + +---- Akri lets you easily expose heterogeneous leaf devices (such as IP cameras and USB devices) as resources in a Kubernetes cluster, while also supporting the exposure of embedded hardware resources such as GPUs and FPGAs. Akri continually detects nodes that have access to these devices and schedules workloads based on them. Simply put: you name it, Akri finds it, you use it. + +---- ## Why Akri At the edge, there are a variety of sensors, controllers, and MCU class devices that are producing data and performing actions. For Kubernetes to be a viable edge computing solution, these heterogeneous “leaf devices” need to be easily utilized by Kubernetes clusters. However, many of these leaf devices are too small to run Kubernetes themselves. Akri is an open source project that exposes these leaf devices as resources in a Kubernetes cluster. It leverages and extends the Kubernetes [device plugin framework](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/), which was created with the cloud in mind and focuses on advertising static resources such as GPUs and other system hardware. Akri took this framework and applied it to the edge, where there is a diverse set of leaf devices with unique communication protocols and intermittent availability. -Akri is made for the edge, **handling the dynamic appearance and disappearance of leaf devices**. Akri provides an abstraction layer similar to [CNI](https://github.com/containernetworking/cni), but instead of abstracting the underlying network details, it is removing the work of finding, utilizing, and monitoring the availability of the leaf device. An operator simply has to apply a Akri Configuration to a cluster, specifying the discovery protocol (say ONVIF) and the pod that should be deployed upon discovery (say a video frame server). Then, Akri does the rest. An operator can also allow multiple nodes to utilize a leaf device, thereby **providing high availability** in the case where a node goes offline. Furthermore, Akri will automatically create a Kubernetes service for each type of leaf device (or Akri Configuration), removing the need for an application to track the state of pods or nodes. +Akri is made for the edge, **handling the dynamic appearance and disappearance of leaf devices**. Akri provides an abstraction layer similar to [CNI](https://github.com/containernetworking/cni), but instead of abstracting the underlying network details, it is removing the work of finding, utilizing, and monitoring the availability of the leaf device. An operator simply has to apply a Akri Configuration to a cluster, specifying the Discovery Handler (say ONVIF) that should be used to discover the devices and the Pod that should be deployed upon discovery (say a video frame server). Then, Akri does the rest. An operator can also allow multiple nodes to utilize a leaf device, thereby **providing high availability** in the case where a node goes offline. Furthermore, Akri will automatically create a Kubernetes service for each type of leaf device (or Akri Configuration), removing the need for an application to track the state of pods or nodes. -Most importantly, Akri **was built to be extensible**. We currently have ONVIF, udev, and OPC UA discovery handlers, but more can be easily added by community members like you. The more protocols Akri can support, the wider an array of leaf devices Akri can discover. We are excited to work with you to build a more connected edge. +Most importantly, Akri **was built to be extensible**. Akri currently supports ONVIF, udev, and OPC UA Discovery Handlers, but more can be easily added by community members like you. The more protocols Akri can support, the wider an array of leaf devices Akri can discover. We are excited to work with you to build a more connected edge. ## How Akri Works -Akri’s architecture is made up of four key components: two custom resources, a device plugin implementation, and a custom controller. The first custom resource, the Akri Configuration, is where **you name it**. This tells Akri what kind of device it should look for. At this point, **Akri finds it**! Akri's device plugin implementation looks for the device and tracks its availability using Akri's second custom resource, the Akri Instance. Having found your device, the Akri Controller helps **you use it**. It sees each Akri Instance (which represents a leaf device) and deploys a ("broker") pod that knows how to connect to the resource and utilize it. +Akri’s architecture is made up of five key components: two custom resources, Discovery Handlers, an Agent (device plugin implementation), and a custom Controller. The first custom resource, the Akri Configuration, is where **you name it**. This tells Akri what kind of device it should look for. At this point, **Akri finds it**! Akri's Discovery Handlers look for the device and inform the Agent of discovered devices. The Agent then creates Akri's second custom resource, the Akri Instance, to track the availability and usage of the device. Having found your device, the Akri Controller helps **you use it**. It sees each Akri Instance (which represents a leaf device) and deploys a ("broker") Pod that knows how to connect to the resource and utilize it. -Akri ONVIF Flow ## Quick Start with a Demo Try the [end to end demo](./docs/end-to-end-demo.md) of Akri to see Akri discover mock video cameras and a streaming app display the footage from those cameras. It includes instructions on K8s cluster setup. If you would like to perform the demo on a cluster of Raspberry Pi 4's, see the [Raspberry Pi 4 demo](./docs/end-to-end-demo-rpi4.md). ## Documentation -- [Running Akri using our currently supported protocols](./docs/user-guide.md) +- [User guide for deploying Akri using Helm](./docs/user-guide.md) - [Akri architecture in depth](./docs/architecture.md) - [How to build Akri](./docs/development.md) -- [How to extend Akri for protocols that haven't been supported yet](./docs/extensibility.md). -- Proposals for enhancements such as new protocol implementations can be found in the [proposals folder](./docs/proposals) +- [How to extend Akri for protocols that haven't been supported yet](./docs/discovery-handler-development.md). +- [How to create a broker to leverage discovered devices](./docs/broker-development.md). +- Proposals for enhancements such as new Discovery Handler implementations can be found in the [proposals folder](./docs/proposals) ## Roadmap -Akri was built to be extensible. We currently have ONVIF, udev, OPC UA discovery protocols, but as a community, we hope to continuously support more protocols. We have created a [protocol implementation roadmap](./docs/roadmap.md#implement-additional-protocol-handlers) in order to prioritize development of protocols. If there is a protocol you feel we should prioritize, please [create an issue](https://github.com/deislabs/akri/issues/new/choose), or better yet, contribute the implementation! We are excited to work with you to build a more connected edge. +Akri was built to be extensible. We currently have ONVIF, udev, OPC UA Discovery Handlers, but as a community, we hope to continuously support more protocols. We have created a [Discovery Handler implementation roadmap](./docs/roadmap.md#implement-additional-discovery-handlers) in order to prioritize development of Discovery Handlers. If there is a protocol you feel we should prioritize, please [create an issue](https://github.com/deislabs/akri/issues/new/choose), or better yet, contribute the implementation! ## Contributing This project welcomes contributions, whether by [creating new issues](https://github.com/deislabs/akri/issues/new/choose) or pull requests. See our [contributing document](./docs/contributing.md) on how to get started. diff --git a/agent/Cargo.toml b/agent/Cargo.toml index 599c944c0..ea7a73b5d 100644 --- a/agent/Cargo.toml +++ b/agent/Cargo.toml @@ -1,53 +1,68 @@ [package] name = "agent" -version = "0.1.5" +version = "0.6.5" authors = ["Kate Goldenring ", ""] edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +akri-debug-echo = { path = "../discovery-handlers/debug-echo" } +akri-discovery-utils = { path = "../discovery-utils" } +akri-onvif = { path = "../discovery-handlers/onvif", optional = true } +akri-opcua = { path = "../discovery-handlers/opcua", optional = true } +akri-udev = { path = "../discovery-handlers/udev", optional = true } +akri-shared = { path = "../shared" } +anyhow = "1.0.38" async-stream = "0.2" async-trait = "0.1.0" -blake2 = "0.8.0" +blake2 = "0.9.0" chrono = "0.4.10" -cfg-if = "0.1" -env_logger = "0.6.1" -failure = "0.1.5" +cfg-if = "1.0.0" +env_logger = "0.8.3" futures = { version = "0.3.1", package = "futures" } futures-core = "0.3" futures-util = "0.3" -futures-old = { version = "0.1", package = "futures" } -hyper = "0.11" +hyper = "0.13.10" +h2 = "=0.2.6" kube = { version = "0.23.0", features = ["openapi"] } k8s-openapi = { version = "0.6.0", features = ["v1_16"] } +lazy_static = "1.4" log = "0.4" -mime = "0.3" -mockall = "0.6.0" -opcua-client = "0.7.0" -pest = "2.0" -pest_derive = "2.0" +mockall_double = "0.2.0" +prometheus = { version = "0.12.0", features = ["process"] } prost = "0.6" -rand = "0.7" -regex = "1" +rand = "0.8.3" serde = "1.0.104" serde_json = "1.0.45" serde_yaml = "0.8.11" serde_derive = "1.0.104" -akri-shared = { path = "../shared" } -sxd-document = "0.3.0" -sxd-xpath = "0.4.0" -tempfile = "3.1.0" tokio = { version = "0.2", features = ["full"] } tokio-core = "0.1" tonic = "0.1" tower = "0.3" -udev = "0.4" -url = "2.1.0" +url = "2.2.0" uuid = { version = "0.8.1", features = ["v4"] } -xml-rs = "0.8.0" -yaserde = "0.3.13" -yaserde_derive = "0.3.13" [build-dependencies] tonic-build = "0.1.1" + +[dev-dependencies] +# for testing using a simple discovery handler +akri-discovery-utils = { path = "../discovery-utils", features = ["mock-discovery-handler"] } +akri-onvif = { path = "../discovery-handlers/onvif" } +akri-opcua = { path = "../discovery-handlers/opcua" } +akri-udev = { path = "../discovery-handlers/udev"} +mockall = "0.9.0" +mock_instant = { version = "0.2", features = ["sync"] } +tempfile = "3.1.0" + +[features] +# To embed discovery handlers, add the desired discovery handler features to default and "agent-full". +# Or set the features when building the agent with +# `cargo build --manifest-path agent/Cargo.toml --features "agent-full onvif-feat opcua-feat udev-feat"` +default = [] +onvif-feat = [ "akri-onvif"] +opcua-feat = ["akri-opcua"] +udev-feat = ["akri-udev"] +agent-full = [] \ No newline at end of file diff --git a/agent/README.md b/agent/README.md index b086152a9..d9f30c3b7 100644 --- a/agent/README.md +++ b/agent/README.md @@ -6,12 +6,14 @@ This is the Akri Agent project. It is an implementation of a [Kubernetes device ## Traits ### Public -* **DiscoveryHandler** - This provides an abstraction to allow protocol specific code to handle discovery and provide details for Instance creation. Planned implementations of this trait include `OnvifDiscoveryHandler`, `UdevDiscoveryHandler`, `OpcuaDiscoveryHandler`, and `DebugEchoDiscoveryHandler`. +* **DiscoveryHandler** - This provides an abstraction to allow protocol specific code to handle discovery and provide details for Instance creation. The trait is defined by Akri's [discovery API](../discovery-utils/proto/discovery.proto). Implementations of this trait can be found in the [discovery handlers directory](../discovery-handlers). ```Rust #[async_trait] pub trait DiscoveryHandler { - async fn discover(&self) -> Result, Error>; - fn are_shared(&self) -> Result; + async fn discover( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; } ``` diff --git a/agent/src/main.rs b/agent/src/main.rs index 849dcefad..358f38553 100644 --- a/agent/src/main.rs +++ b/agent/src/main.rs @@ -1,29 +1,40 @@ +extern crate hyper; #[macro_use] -extern crate log; +extern crate lazy_static; #[macro_use] -extern crate yaserde_derive; +extern crate log; #[macro_use] extern crate serde_derive; - -extern crate pest; -#[macro_use] -extern crate pest_derive; - -extern crate hyper; extern crate tokio_core; - -mod protocols; mod util; -use akri_shared::akri::API_NAMESPACE; -use env_logger; +use akri_shared::akri::{metrics::run_metrics_server, API_NAMESPACE}; use log::{info, trace}; -use std::time::Duration; +use prometheus::{HistogramVec, IntGaugeVec}; +use std::{ + collections::HashMap, + sync::{Arc, Mutex}, + time::Duration, +}; +use tokio::sync::broadcast; +#[cfg(feature = "agent-full")] +use util::registration::register_embedded_discovery_handlers; use util::{ - config_action, constants::SLOT_RECONCILIATION_SLOT_GRACE_PERIOD_SECS, + config_action, + constants::{ + NEW_DISCOVERY_HANDLER_CHANNEL_CAPACITY, SLOT_RECONCILIATION_SLOT_GRACE_PERIOD_SECS, + }, + registration::{run_registration_server, DiscoveryHandlerName}, slot_reconciliation::periodic_slot_reconciliation, }; +lazy_static! { + // Reports the number of Instances visible to this node, grouped by Configuration and whether it is shared + pub static ref INSTANCE_COUNT_METRIC: IntGaugeVec = prometheus::register_int_gauge_vec!("akri_instance_count", "Akri Instance Count", &["configuration", "is_shared"]).unwrap(); + // Reports the time to get discovery results, grouped by Configuration + pub static ref DISCOVERY_RESPONSE_TIME_METRIC: HistogramVec = prometheus::register_histogram_vec!("akri_discovery_response_time", "Akri Discovery Response Time", &["configuration"]).unwrap(); +} + /// This is the entry point for the Akri Agent. /// It must be built on unix systems, since the underlying libraries for the `DevicePluginService` unix socket connection are unix only. #[cfg(unix)] @@ -43,6 +54,28 @@ async fn main() -> Result<(), Box let mut tasks = Vec::new(); + // Start server for Prometheus metrics + tasks.push(tokio::spawn(async move { + run_metrics_server().await.unwrap(); + })); + + let discovery_handler_map = Arc::new(Mutex::new(HashMap::new())); + let discovery_handler_map_clone = discovery_handler_map.clone(); + let (new_discovery_handler_sender, _): ( + broadcast::Sender, + broadcast::Receiver, + ) = broadcast::channel(NEW_DISCOVERY_HANDLER_CHANNEL_CAPACITY); + let new_discovery_handler_sender_clone = new_discovery_handler_sender.clone(); + #[cfg(feature = "agent-full")] + register_embedded_discovery_handlers(discovery_handler_map_clone.clone())?; + + // Start registration service for registering `DiscoveryHandlers` + tasks.push(tokio::spawn(async move { + run_registration_server(discovery_handler_map_clone, new_discovery_handler_sender) + .await + .unwrap(); + })); + tasks.push(tokio::spawn(async move { let slot_grace_period = Duration::from_secs(SLOT_RECONCILIATION_SLOT_GRACE_PERIOD_SECS); periodic_slot_reconciliation(slot_grace_period) @@ -51,7 +84,9 @@ async fn main() -> Result<(), Box })); tasks.push(tokio::spawn(async move { - config_action::do_config_watch().await.unwrap() + config_action::do_config_watch(discovery_handler_map, new_discovery_handler_sender_clone) + .await + .unwrap() })); futures::future::try_join_all(tasks).await?; diff --git a/agent/src/protocols/debug_echo/discovery_handler.rs b/agent/src/protocols/debug_echo/discovery_handler.rs deleted file mode 100644 index 017e100ad..000000000 --- a/agent/src/protocols/debug_echo/discovery_handler.rs +++ /dev/null @@ -1,59 +0,0 @@ -use super::super::{DiscoveryHandler, DiscoveryResult}; -use akri_shared::akri::configuration::DebugEchoDiscoveryHandlerConfig; -use async_trait::async_trait; -use failure::Error; -use std::{collections::HashMap, fs}; - -/// File acting as an environment variable for testing discovery. -/// To mimic an instance going offline, kubectl exec into one of the akri-agent-daemonset pods -/// and echo "OFFLINE" > /tmp/debug-echo-availability.txt -/// To mimic a device coming back online, remove the word "OFFLINE" from the file -/// ie: echo "" > /tmp/debug-echo-availability.txt -pub const DEBUG_ECHO_AVAILABILITY_CHECK_PATH: &str = "/tmp/debug-echo-availability.txt"; -/// String to write into DEBUG_ECHO_AVAILABILITY_CHECK_PATH to make DebugEcho devices undiscoverable -pub const OFFLINE: &str = "OFFLINE"; - -/// `DebugEchoDiscoveryHandler` contains a `DebugEchoDiscoveryHandlerConfig` which has a -/// list of mock instances (`discovery_handler_config.descriptions`) and their sharability. -/// It mocks discovering the instances by inspecting the contents of the file at `DEBUG_ECHO_AVAILABILITY_CHECK_PATH`. -/// If the file contains "OFFLINE", it won't discover any of the instances, else it discovers them all. -#[derive(Debug)] -pub struct DebugEchoDiscoveryHandler { - discovery_handler_config: DebugEchoDiscoveryHandlerConfig, -} - -impl DebugEchoDiscoveryHandler { - pub fn new(discovery_handler_config: &DebugEchoDiscoveryHandlerConfig) -> Self { - DebugEchoDiscoveryHandler { - discovery_handler_config: discovery_handler_config.clone(), - } - } -} - -#[async_trait] -impl DiscoveryHandler for DebugEchoDiscoveryHandler { - async fn discover(&self) -> Result, Error> { - let availability = - fs::read_to_string(DEBUG_ECHO_AVAILABILITY_CHECK_PATH).unwrap_or_default(); - trace!( - "discover -- DebugEcho capabilities visible? {}", - !availability.contains(OFFLINE) - ); - // If the device is offline, return an empty list of instance info - if availability.contains(OFFLINE) { - Ok(Vec::new()) - } else { - Ok(self - .discovery_handler_config - .descriptions - .iter() - .map(|description| { - DiscoveryResult::new(description, HashMap::new(), self.are_shared().unwrap()) - }) - .collect::>()) - } - } - fn are_shared(&self) -> Result { - Ok(self.discovery_handler_config.shared) - } -} diff --git a/agent/src/protocols/debug_echo/mod.rs b/agent/src/protocols/debug_echo/mod.rs deleted file mode 100644 index edff99faf..000000000 --- a/agent/src/protocols/debug_echo/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -mod discovery_handler; -pub use self::discovery_handler::{ - DebugEchoDiscoveryHandler, DEBUG_ECHO_AVAILABILITY_CHECK_PATH, OFFLINE, -}; diff --git a/agent/src/protocols/mod.rs b/agent/src/protocols/mod.rs deleted file mode 100644 index 17db42d72..000000000 --- a/agent/src/protocols/mod.rs +++ /dev/null @@ -1,201 +0,0 @@ -use akri_shared::{ - akri::configuration::ProtocolHandler, - os::env_var::{ActualEnvVarQuery, EnvVarQuery}, -}; -use async_trait::async_trait; -use blake2::digest::{Input, VariableOutput}; -use blake2::VarBlake2b; -use failure::Error; -use std::collections::HashMap; - -#[derive(Debug, Clone, PartialEq)] -pub struct DiscoveryResult { - pub digest: String, - pub properties: HashMap, -} -impl DiscoveryResult { - fn new(id_to_digest: &str, properties: HashMap, shared: bool) -> Self { - let mut id_to_digest = id_to_digest.to_string(); - // For unshared devices, include node hostname in id_to_digest so instances have unique names - if !shared { - id_to_digest = format!( - "{}{}", - &id_to_digest, - std::env::var("AGENT_NODE_NAME").unwrap() - ); - } - let mut hasher = VarBlake2b::new(3).unwrap(); - hasher.input(id_to_digest); - let digest = hasher - .vec_result() - .iter() - .map(|num| format!("{:02x}", num)) - .collect::>() - .join(""); - DiscoveryResult { digest, properties } - } -} - -/// DiscoveryHandler describes anything that can find available instances and define -/// whether they are shared. -/// -/// DiscoveryHandler provides an abstraction to help in Instance -/// creation: search/find for instances, specify whether the instance -/// should be shared, etc. -/// -/// # Examples -/// -/// ``` -/// pub struct SampleDiscoveryHandler {} -/// #[async_trait] -/// impl DiscoveryHandler for SampleDiscoveryHandler { -/// async fn discover(&self) -> Result, failure::Error> { -/// Ok(Vec::new()) -/// } -/// fn are_shared(&self) -> Result { -/// Ok(true) -/// } -/// } -/// ``` -#[async_trait] -pub trait DiscoveryHandler { - async fn discover(&self) -> Result, Error>; - fn are_shared(&self) -> Result; -} - -pub mod debug_echo; -mod onvif; -mod opcua; -mod udev; - -pub fn get_discovery_handler( - discovery_handler_config: &ProtocolHandler, -) -> Result, Error> { - let query_var_set = ActualEnvVarQuery {}; - inner_get_discovery_handler(discovery_handler_config, &query_var_set) -} - -fn inner_get_discovery_handler( - discovery_handler_config: &ProtocolHandler, - query: &impl EnvVarQuery, -) -> Result, Error> { - match discovery_handler_config { - ProtocolHandler::onvif(onvif) => Ok(Box::new(onvif::OnvifDiscoveryHandler::new(&onvif))), - ProtocolHandler::udev(udev) => Ok(Box::new(udev::UdevDiscoveryHandler::new(&udev))), - ProtocolHandler::opcua(opcua) => Ok(Box::new(opcua::OpcuaDiscoveryHandler::new(&opcua))), - ProtocolHandler::debugEcho(dbg) => match query.get_env_var("ENABLE_DEBUG_ECHO") { - Ok(_) => Ok(Box::new(debug_echo::DebugEchoDiscoveryHandler::new(dbg))), - _ => Err(failure::format_err!("No protocol configured")), - }, - } -} - -#[cfg(test)] -mod test { - use super::*; - use akri_shared::{ - akri::configuration::{Configuration, ProtocolHandler}, - os::env_var::MockEnvVarQuery, - }; - use std::env::VarError; - - #[tokio::test] - async fn test_inner_get_discovery_handler() { - let mock_query = MockEnvVarQuery::new(); - - let onvif_json = r#"{"onvif":{}}"#; - let deserialized: ProtocolHandler = serde_json::from_str(onvif_json).unwrap(); - assert!(inner_get_discovery_handler(&deserialized, &mock_query).is_ok()); - - let udev_json = r#"{"udev":{"udevRules":[]}}"#; - let deserialized: ProtocolHandler = serde_json::from_str(udev_json).unwrap(); - assert!(inner_get_discovery_handler(&deserialized, &mock_query).is_ok()); - - let opcua_json = r#"{"opcua":{"opcuaDiscoveryMethod":{"standard":{}}}}"#; - let deserialized: ProtocolHandler = serde_json::from_str(opcua_json).unwrap(); - assert!(inner_get_discovery_handler(&deserialized, &mock_query).is_ok()); - - let json = r#"{}"#; - assert!(serde_json::from_str::(json).is_err()); - } - - #[tokio::test] - async fn test_udev_discover_no_rules() { - let mock_query = MockEnvVarQuery::new(); - - let json = r#"{"udev":{"udevRules":[]}}"#; - let deserialized: ProtocolHandler = serde_json::from_str(json).unwrap(); - let discovery_handler = inner_get_discovery_handler(&deserialized, &mock_query).unwrap(); - assert_eq!(discovery_handler.discover().await.unwrap().len(), 0); - } - - #[tokio::test] - async fn test_factory_for_debug_echo_when_no_env_var_set() { - let json = r#"{"protocol":{"debugEcho":{"descriptions":["foo1"],"shared":true}}}"#; - let deserialized: Configuration = serde_json::from_str(json).unwrap(); - - let mut mock_query_without_var_set = MockEnvVarQuery::new(); - mock_query_without_var_set - .expect_get_env_var() - .returning(|_| Err(VarError::NotPresent)); - if inner_get_discovery_handler(&deserialized.protocol, &mock_query_without_var_set).is_ok() - { - panic!("protocol configuration as debugEcho should return error when 'ENABLE_DEBUG_ECHO' env var is not set") - } - - let mut mock_query_with_var_set = MockEnvVarQuery::new(); - mock_query_with_var_set - .expect_get_env_var() - .returning(|_| Ok("1".to_string())); - let pi = DiscoveryResult::new(&"foo1".to_string(), HashMap::new(), true); - let debug_echo_discovery_handler = - inner_get_discovery_handler(&deserialized.protocol, &mock_query_with_var_set).unwrap(); - assert_eq!(true, debug_echo_discovery_handler.are_shared().unwrap()); - assert_eq!( - 1, - debug_echo_discovery_handler.discover().await.unwrap().len() - ); - assert_eq!( - pi.digest, - debug_echo_discovery_handler - .discover() - .await - .unwrap() - .get(0) - .unwrap() - .digest - ); - } - - #[tokio::test] - async fn test_discovery_result_partialeq() { - let left = DiscoveryResult::new(&"foo1".to_string(), HashMap::new(), true); - let right = DiscoveryResult::new(&"foo1".to_string(), HashMap::new(), true); - assert_eq!(left, right); - } - - #[tokio::test] - async fn test_discovery_result_partialeq_false() { - { - let left = DiscoveryResult::new(&"foo1".to_string(), HashMap::new(), true); - let right = DiscoveryResult::new(&"foo2".to_string(), HashMap::new(), true); - assert_ne!(left, right); - } - - // TODO 201217: Needs work on `DiscoveryResult::new` to enable test (https://github.com/deislabs/akri/pull/176#discussion_r544703968) - // { - // std::env::set_var("AGENT_NODE_NAME", "something"); - // let left = DiscoveryResult::new(&"foo1".to_string(), HashMap::new(), true); - // let right = DiscoveryResult::new(&"foo1".to_string(), HashMap::new(), false); - // assert_ne!(left, right); - // } - - { - let mut nonempty: HashMap = HashMap::new(); - nonempty.insert("one".to_string(), "two".to_string()); - let left = DiscoveryResult::new(&"foo1".to_string(), nonempty, true); - let right = DiscoveryResult::new(&"foo1".to_string(), HashMap::new(), true); - assert_ne!(left, right); - } - } -} diff --git a/agent/src/protocols/onvif/discovery_handler.rs b/agent/src/protocols/onvif/discovery_handler.rs deleted file mode 100644 index b46b1b8a3..000000000 --- a/agent/src/protocols/onvif/discovery_handler.rs +++ /dev/null @@ -1,511 +0,0 @@ -use super::super::{DiscoveryHandler, DiscoveryResult}; -use super::discovery_impl::util; -use akri_shared::akri::configuration::{FilterList, FilterType, OnvifDiscoveryHandlerConfig}; -use akri_shared::onvif::device_info::{ - OnvifQuery, OnvifQueryImpl, ONVIF_DEVICE_IP_ADDRESS_LABEL_ID, - ONVIF_DEVICE_MAC_ADDRESS_LABEL_ID, ONVIF_DEVICE_SERVICE_URL_LABEL_ID, -}; -use async_trait::async_trait; -use failure::Error; -use std::{collections::HashMap, time::Duration}; - -/// `OnvifDiscoveryHandler` discovers the onvif instances as described by the filters `discover_handler_config.ip_addresses`, -/// `discover_handler_config.mac_addresses`, and `discover_handler_config.scopes`. -/// The instances it discovers are always shared. -#[derive(Debug)] -pub struct OnvifDiscoveryHandler { - discovery_handler_config: OnvifDiscoveryHandlerConfig, -} - -impl OnvifDiscoveryHandler { - pub fn new(discovery_handler_config: &OnvifDiscoveryHandlerConfig) -> Self { - OnvifDiscoveryHandler { - discovery_handler_config: discovery_handler_config.clone(), - } - } - - fn execute_filter(filter_list: Option<&FilterList>, filter_against: &[String]) -> bool { - if filter_list.is_none() { - return false; - } - let filter_action = filter_list.as_ref().unwrap().action.clone(); - let filter_count = filter_list - .unwrap() - .items - .iter() - .filter(|pattern| { - filter_against - .iter() - .filter(|filter_against_item| filter_against_item.contains(*pattern)) - .count() - > 0 - }) - .count(); - - if FilterType::Include == filter_action { - filter_count == 0 - } else { - filter_count != 0 - } - } - - async fn apply_filters( - &self, - device_service_uris: Vec, - onvif_query: &impl OnvifQuery, - ) -> Result, failure::Error> { - let mut result = Vec::new(); - for device_service_url in device_service_uris.iter() { - trace!("apply_filters - device service url {}", &device_service_url); - let (ip_address, mac_address) = match onvif_query - .get_device_ip_and_mac_address(&device_service_url) - .await - { - Ok(ip_and_mac) => ip_and_mac, - Err(e) => { - error!("apply_filters - error getting ip and mac address: {}", e); - continue; - } - }; - - // Evaluate camera ip address against ip filter if provided - let ip_address_as_vec = vec![ip_address.clone()]; - if OnvifDiscoveryHandler::execute_filter( - self.discovery_handler_config.ip_addresses.as_ref(), - &ip_address_as_vec, - ) { - continue; - } - - // Evaluate camera mac address against mac filter if provided - let mac_address_as_vec = vec![mac_address.clone()]; - if OnvifDiscoveryHandler::execute_filter( - self.discovery_handler_config.mac_addresses.as_ref(), - &mac_address_as_vec, - ) { - continue; - } - - let ip_and_mac_joined = format!("{}-{}", &ip_address, &mac_address); - - // Evaluate camera scopes against scopes filter if provided - let device_scopes = match onvif_query.get_device_scopes(&device_service_url).await { - Ok(scopes) => scopes, - Err(e) => { - error!("apply_filters - error getting scopes: {}", e); - continue; - } - }; - if OnvifDiscoveryHandler::execute_filter( - self.discovery_handler_config.scopes.as_ref(), - &device_scopes, - ) { - continue; - } - - let mut properties = HashMap::new(); - properties.insert( - ONVIF_DEVICE_SERVICE_URL_LABEL_ID.to_string(), - device_service_url.to_string(), - ); - properties.insert(ONVIF_DEVICE_IP_ADDRESS_LABEL_ID.into(), ip_address); - properties.insert(ONVIF_DEVICE_MAC_ADDRESS_LABEL_ID.into(), mac_address); - - trace!( - "apply_filters - returns DiscoveryResult ip/mac: {:?}, props: {:?}", - &ip_and_mac_joined, - &properties - ); - result.push(DiscoveryResult::new( - &ip_and_mac_joined, - properties, - self.are_shared().unwrap(), - )) - } - Ok(result) - } -} - -#[async_trait] -impl DiscoveryHandler for OnvifDiscoveryHandler { - async fn discover(&self) -> Result, failure::Error> { - let onvif_query = OnvifQueryImpl {}; - - info!("discover - filters:{:?}", &self.discovery_handler_config,); - let discovered_onvif_cameras = util::simple_onvif_discover(Duration::from_secs( - self.discovery_handler_config.discovery_timeout_seconds as u64, - )) - .await?; - info!("discover - discovered:{:?}", &discovered_onvif_cameras,); - let filtered_onvif_cameras = self - .apply_filters(discovered_onvif_cameras, &onvif_query) - .await; - info!("discover - filtered:{:?}", &filtered_onvif_cameras); - filtered_onvif_cameras - } - fn are_shared(&self) -> Result { - Ok(true) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use akri_shared::onvif::device_info::test_onvif::MockOnvifQueryImpl; - - struct IpAndMac { - mock_uri: &'static str, - mock_ip: &'static str, - mock_mac: &'static str, - } - - struct Scope { - mock_uri: &'static str, - mock_scope: &'static str, - } - - fn configure_scenario( - mock: &mut MockOnvifQueryImpl, - ip_and_mac: Option, - scope: Option, - ) { - if let Some(ip_and_mac_) = ip_and_mac { - configure_get_device_ip_and_mac_address( - mock, - &ip_and_mac_.mock_uri, - &ip_and_mac_.mock_ip, - &ip_and_mac_.mock_mac, - ) - } - if let Some(scope_) = scope { - configure_get_device_scopes(mock, &scope_.mock_uri, &scope_.mock_scope) - } - } - - fn configure_get_device_ip_and_mac_address( - mock: &mut MockOnvifQueryImpl, - uri: &'static str, - ip: &'static str, - mac: &'static str, - ) { - mock.expect_get_device_ip_and_mac_address() - .times(1) - .withf(move |u| u == uri) - .returning(move |_| Ok((ip.to_string(), mac.to_string()))); - } - - fn configure_get_device_scopes( - mock: &mut MockOnvifQueryImpl, - uri: &'static str, - scope: &'static str, - ) { - mock.expect_get_device_scopes() - .times(1) - .withf(move |u| u == uri) - .returning(move |_| Ok(vec![scope.to_string()])); - } - - #[tokio::test] - async fn test_apply_filters_no_filters() { - let mock_uri = "device_uri"; - - let mut mock = MockOnvifQueryImpl::new(); - configure_scenario( - &mut mock, - Some(IpAndMac { - mock_uri: "device_uri", - mock_ip: "mock.ip", - mock_mac: "mock:mac", - }), - Some(Scope { - mock_uri: "device_uri", - mock_scope: "mock.scope", - }), - ); - - let onvif = OnvifDiscoveryHandler::new(&OnvifDiscoveryHandlerConfig { - ip_addresses: None, - mac_addresses: None, - scopes: None, - discovery_timeout_seconds: 1, - }); - let instances = onvif - .apply_filters(vec![mock_uri.to_string()], &mock) - .await - .unwrap(); - - assert_eq!(1, instances.len()); - } - - #[tokio::test] - async fn test_apply_filters_include_ip_exist() { - let mock_uri = "device_uri"; - let mock_ip = "mock.ip"; - - let mut mock = MockOnvifQueryImpl::new(); - configure_scenario( - &mut mock, - Some(IpAndMac { - mock_uri, - mock_ip, - mock_mac: "mock:mac", - }), - Some(Scope { - mock_uri, - mock_scope: "mock.scope", - }), - ); - - let onvif = OnvifDiscoveryHandler::new(&OnvifDiscoveryHandlerConfig { - ip_addresses: Some(FilterList { - action: FilterType::Include, - items: vec![mock_ip.to_string()], - }), - mac_addresses: None, - scopes: None, - discovery_timeout_seconds: 1, - }); - let instances = onvif - .apply_filters(vec![mock_uri.to_string()], &mock) - .await - .unwrap(); - - assert_eq!(1, instances.len()); - } - - #[tokio::test] - async fn test_apply_filters_include_ip_nonexist() { - let mock_uri = "device_uri"; - - let mut mock = MockOnvifQueryImpl::new(); - configure_scenario( - &mut mock, - Some(IpAndMac { - mock_uri, - mock_ip: "mock.ip", - mock_mac: "mock:mac", - }), - None, - ); - - let onvif = OnvifDiscoveryHandler::new(&OnvifDiscoveryHandlerConfig { - ip_addresses: Some(FilterList { - action: FilterType::Include, - items: vec!["nonexist.ip".to_string()], - }), - mac_addresses: None, - scopes: None, - discovery_timeout_seconds: 1, - }); - let instances = onvif - .apply_filters(vec![mock_uri.to_string()], &mock) - .await - .unwrap(); - - assert_eq!(0, instances.len()); - } - - #[tokio::test] - async fn test_apply_filters_exclude_ip_nonexist() { - let mock_uri = "device_uri"; - - let mut mock = MockOnvifQueryImpl::new(); - configure_scenario( - &mut mock, - Some(IpAndMac { - mock_uri, - mock_ip: "mock.ip", - mock_mac: "mock:mac", - }), - Some(Scope { - mock_uri, - mock_scope: "mock.scope", - }), - ); - - let onvif = OnvifDiscoveryHandler::new(&OnvifDiscoveryHandlerConfig { - ip_addresses: Some(FilterList { - action: FilterType::Exclude, - items: vec!["nonexist.ip".to_string()], - }), - mac_addresses: None, - scopes: None, - discovery_timeout_seconds: 1, - }); - let instances = onvif - .apply_filters(vec![mock_uri.to_string()], &mock) - .await - .unwrap(); - - assert_eq!(1, instances.len()); - } - - #[tokio::test] - async fn test_apply_filters_exclude_ip_exist() { - let mock_uri = "device_uri"; - let mock_ip = "mock.ip"; - - let mut mock = MockOnvifQueryImpl::new(); - configure_scenario( - &mut mock, - Some(IpAndMac { - mock_uri, - mock_ip, - mock_mac: "mock:mac", - }), - None, - ); - - let onvif = OnvifDiscoveryHandler::new(&OnvifDiscoveryHandlerConfig { - ip_addresses: Some(FilterList { - action: FilterType::Exclude, - items: vec![mock_ip.to_string()], - }), - mac_addresses: None, - scopes: None, - discovery_timeout_seconds: 1, - }); - let instances = onvif - .apply_filters(vec![mock_uri.to_string()], &mock) - .await - .unwrap(); - - assert_eq!(0, instances.len()); - } - - #[tokio::test] - async fn test_apply_filters_include_mac_exist() { - let mock_uri = "device_uri"; - let mock_mac = "mock:mac"; - - let mut mock = MockOnvifQueryImpl::new(); - configure_scenario( - &mut mock, - Some(IpAndMac { - mock_uri, - mock_ip: "mock.ip", - mock_mac, - }), - Some(Scope { - mock_uri, - mock_scope: "mock.scope", - }), - ); - - let onvif = OnvifDiscoveryHandler::new(&OnvifDiscoveryHandlerConfig { - ip_addresses: None, - mac_addresses: Some(FilterList { - action: FilterType::Include, - items: vec![mock_mac.to_string()], - }), - scopes: None, - discovery_timeout_seconds: 1, - }); - let instances = onvif - .apply_filters(vec![mock_uri.to_string()], &mock) - .await - .unwrap(); - - assert_eq!(1, instances.len()); - } - - #[tokio::test] - async fn test_apply_filters_include_mac_nonexist() { - let mock_uri = "device_uri"; - - let mut mock = MockOnvifQueryImpl::new(); - configure_scenario( - &mut mock, - Some(IpAndMac { - mock_uri, - mock_ip: "mock.ip", - mock_mac: "mock:mac", - }), - None, - ); - - let onvif = OnvifDiscoveryHandler::new(&OnvifDiscoveryHandlerConfig { - ip_addresses: None, - mac_addresses: Some(FilterList { - action: FilterType::Include, - items: vec!["nonexist:mac".to_string()], - }), - scopes: None, - discovery_timeout_seconds: 1, - }); - let instances = onvif - .apply_filters(vec![mock_uri.to_string()], &mock) - .await - .unwrap(); - - assert_eq!(0, instances.len()); - } - - #[tokio::test] - async fn test_apply_filters_exclude_mac_nonexist() { - let mock_uri = "device_uri"; - - let mut mock = MockOnvifQueryImpl::new(); - configure_scenario( - &mut mock, - Some(IpAndMac { - mock_uri, - mock_ip: "mock.ip", - mock_mac: "mock:mac", - }), - Some(Scope { - mock_uri, - mock_scope: "mock.scope", - }), - ); - - let onvif = OnvifDiscoveryHandler::new(&OnvifDiscoveryHandlerConfig { - ip_addresses: None, - mac_addresses: Some(FilterList { - action: FilterType::Exclude, - items: vec!["nonexist:mac".to_string()], - }), - scopes: None, - discovery_timeout_seconds: 1, - }); - let instances = onvif - .apply_filters(vec![mock_uri.to_string()], &mock) - .await - .unwrap(); - - assert_eq!(1, instances.len()); - } - - #[tokio::test] - async fn test_apply_filters_exclude_mac_exist() { - let mock_uri = "device_uri"; - let mock_mac = "mock:mac"; - - let mut mock = MockOnvifQueryImpl::new(); - configure_scenario( - &mut mock, - Some(IpAndMac { - mock_uri, - mock_ip: "mock.ip", - mock_mac, - }), - None, - ); - - let onvif = OnvifDiscoveryHandler::new(&OnvifDiscoveryHandlerConfig { - ip_addresses: None, - mac_addresses: Some(FilterList { - action: FilterType::Exclude, - items: vec![mock_mac.to_string()], - }), - scopes: None, - discovery_timeout_seconds: 1, - }); - let instances = onvif - .apply_filters(vec![mock_uri.to_string()], &mock) - .await - .unwrap(); - - assert_eq!(0, instances.len()); - } -} diff --git a/agent/src/protocols/onvif/mod.rs b/agent/src/protocols/onvif/mod.rs deleted file mode 100644 index 5d3a2ee8f..000000000 --- a/agent/src/protocols/onvif/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -mod discovery_handler; -mod discovery_impl; -pub use self::discovery_handler::OnvifDiscoveryHandler; diff --git a/agent/src/protocols/opcua/discovery_handler.rs b/agent/src/protocols/opcua/discovery_handler.rs deleted file mode 100644 index e0cbb1ad7..000000000 --- a/agent/src/protocols/opcua/discovery_handler.rs +++ /dev/null @@ -1,51 +0,0 @@ -use super::super::{DiscoveryHandler, DiscoveryResult}; -use super::{discovery_impl::do_standard_discovery, OPCUA_DISCOVERY_URL_LABEL}; -use akri_shared::akri::configuration::{OpcuaDiscoveryHandlerConfig, OpcuaDiscoveryMethod}; -use async_trait::async_trait; -use failure::Error; - -/// `OpcuaDiscoveryHandler` discovers the OPC UA server instances as described by the `discovery_handler_config.opcua_discovery_method` -/// and the filter `discover_handler_config.application_names`. The instances it discovers are always shared. -#[derive(Debug)] -pub struct OpcuaDiscoveryHandler { - discovery_handler_config: OpcuaDiscoveryHandlerConfig, -} - -impl OpcuaDiscoveryHandler { - pub fn new(discovery_handler_config: &OpcuaDiscoveryHandlerConfig) -> Self { - OpcuaDiscoveryHandler { - discovery_handler_config: discovery_handler_config.clone(), - } - } -} - -#[async_trait] -impl DiscoveryHandler for OpcuaDiscoveryHandler { - async fn discover(&self) -> Result, Error> { - let discovery_urls: Vec = - match &self.discovery_handler_config.opcua_discovery_method { - OpcuaDiscoveryMethod::standard(standard_opcua_discovery) => do_standard_discovery( - standard_opcua_discovery.discovery_urls.clone(), - self.discovery_handler_config.application_names.clone(), - ), - // No other discovery methods implemented yet - }; - - // Build DiscoveryResult for each server discovered - Ok(discovery_urls - .into_iter() - .map(|discovery_url| { - let mut properties = std::collections::HashMap::new(); - trace!( - "discover - found OPC UA server at DiscoveryURL {}", - discovery_url - ); - properties.insert(OPCUA_DISCOVERY_URL_LABEL.to_string(), discovery_url.clone()); - DiscoveryResult::new(&discovery_url, properties, self.are_shared().unwrap()) - }) - .collect::>()) - } - fn are_shared(&self) -> Result { - Ok(true) - } -} diff --git a/agent/src/protocols/udev/discovery_handler.rs b/agent/src/protocols/udev/discovery_handler.rs deleted file mode 100644 index 422d64f76..000000000 --- a/agent/src/protocols/udev/discovery_handler.rs +++ /dev/null @@ -1,57 +0,0 @@ -use super::super::{DiscoveryHandler, DiscoveryResult}; -use super::{discovery_impl, udev_enumerator, UDEV_DEVNODE_LABEL_ID}; -use akri_shared::akri::configuration::UdevDiscoveryHandlerConfig; -use async_trait::async_trait; -use failure::Error; -use std::collections::HashSet; - -/// `UdevDiscoveryHandler` discovers udev instances by parsing the udev rules in `discovery_handler_config.udev_rules`. -/// The instances it discovers are always unshared. -#[derive(Debug)] -pub struct UdevDiscoveryHandler { - discovery_handler_config: UdevDiscoveryHandlerConfig, -} - -impl UdevDiscoveryHandler { - pub fn new(discovery_handler_config: &UdevDiscoveryHandlerConfig) -> Self { - UdevDiscoveryHandler { - discovery_handler_config: discovery_handler_config.clone(), - } - } -} - -#[async_trait] -impl DiscoveryHandler for UdevDiscoveryHandler { - async fn discover(&self) -> Result, Error> { - let udev_rules = self.discovery_handler_config.udev_rules.clone(); - trace!("discover - for udev rules {:?}", udev_rules); - let mut devpaths: HashSet = HashSet::new(); - udev_rules - .iter() - .map(|rule| { - let enumerator = udev_enumerator::create_enumerator(); - let paths = discovery_impl::do_parse_and_find(enumerator, &rule)?; - paths.into_iter().for_each(|path| { - devpaths.insert(path); - }); - Ok(()) - }) - .collect::>()?; - trace!( - "discover - mapping and returning devices at devpaths {:?}", - devpaths - ); - Ok(devpaths - .into_iter() - .map(|path| { - let mut properties = std::collections::HashMap::new(); - properties.insert(UDEV_DEVNODE_LABEL_ID.to_string(), path.clone()); - DiscoveryResult::new(&path, properties, self.are_shared().unwrap()) - }) - .collect::>()) - } - - fn are_shared(&self) -> Result { - Ok(false) - } -} diff --git a/agent/src/util/config_action.rs b/agent/src/util/config_action.rs index ac01bbfa0..89a8ac798 100644 --- a/agent/src/util/config_action.rs +++ b/agent/src/util/config_action.rs @@ -1,33 +1,23 @@ -use super::super::protocols; use super::{ constants::{ - DEVICE_PLUGIN_PATH, DISCOVERY_DELAY_SECS, SHARED_INSTANCE_OFFLINE_GRACE_PERIOD_SECS, + DISCOVERY_OPERATOR_FINISHED_DISCOVERY_CHANNEL_CAPACITY, + DISCOVERY_OPERATOR_STOP_DISCOVERY_CHANNEL_CAPACITY, }, device_plugin_service, - device_plugin_service::{ - get_device_instance_name, ConnectivityStatus, InstanceInfo, InstanceMap, - }, + device_plugin_service::InstanceMap, + discovery_operator::start_discovery::{start_discovery, DiscoveryOperator}, + registration::RegisteredDiscoveryHandlerMap, }; use akri_shared::{ - akri::{ - configuration::{Configuration, KubeAkriConfig, ProtocolHandler}, - API_CONFIGURATIONS, API_NAMESPACE, API_VERSION, - }, + akri::{configuration::KubeAkriConfig, API_CONFIGURATIONS, API_NAMESPACE, API_VERSION}, k8s, - k8s::KubeInterface, + k8s::{try_delete_instance, KubeInterface}, }; use futures::StreamExt; use kube::api::{Informer, RawApi, WatchEvent}; use log::{info, trace}; -use std::{ - collections::HashMap, - sync::Arc, - time::{Duration, Instant}, -}; -use tokio::{ - sync::{broadcast, mpsc, Mutex}, - time::timeout, -}; +use std::{collections::HashMap, sync::Arc}; +use tokio::sync::{broadcast, mpsc, Mutex}; type ConfigMap = Arc>>; @@ -35,13 +25,22 @@ type ConfigMap = Arc>>; /// and senders for ceasing to discover instances upon Configuration deletion. #[derive(Debug)] pub struct ConfigInfo { + /// Map of all of a Configuration's Instances instance_map: InstanceMap, - stop_discovery_sender: mpsc::Sender<()>, - finished_discovery_sender: broadcast::Sender<()>, + /// Sends notification to a `DiscoveryOperator` that it should stop all discovery for its Configuration. + /// This signals it to tell each of its subtasks to stop discovery. + /// A broadcast channel is used so both the sending and receiving ends can be cloned. + stop_discovery_sender: broadcast::Sender<()>, + /// Receives notification that all `DiscoveryOperators` threads have completed and a Configuration's Instances + /// can be safely deleted and the associated `DevicePluginServices` terminated. + finished_discovery_receiver: mpsc::Receiver<()>, } /// This handles pre-existing Configurations and invokes an internal method that watches for Configuration events. -pub async fn do_config_watch() -> Result<(), Box> { +pub async fn do_config_watch( + discovery_handler_map: RegisteredDiscoveryHandlerMap, + new_discovery_handler_sender: broadcast::Sender, +) -> Result<(), Box> { info!("do_config_watch - enter"); let config_map: ConfigMap = Arc::new(Mutex::new(HashMap::new())); let kube_interface = k8s::create_kube_interface(); @@ -51,16 +50,31 @@ pub async fn do_config_watch() -> Result<(), Box Result<(), Box, ) -> Result<(), Box> { trace!("watch_for_config_changes - start"); let akri_config_type = RawApi::customResource(API_CONFIGURATIONS) @@ -86,7 +102,15 @@ async fn watch_for_config_changes( // Currently, this does not handle None except to break the // while. while let Some(event) = configs.next().await { - handle_config(kube_interface, event?, config_map.clone()).await? + let new_discovery_handler_sender = new_discovery_handler_sender.clone(); + handle_config( + kube_interface, + event?, + config_map.clone(), + discovery_handler_map.clone(), + new_discovery_handler_sender, + ) + .await? } } } @@ -97,6 +121,8 @@ async fn handle_config( kube_interface: &impl KubeInterface, event: WatchEvent, config_map: ConfigMap, + discovery_handler_map: RegisteredDiscoveryHandlerMap, + new_discovery_handler_sender: broadcast::Sender, ) -> Result<(), Box> { trace!("handle_config - something happened to a configuration"); match event { @@ -106,7 +132,15 @@ async fn handle_config( config.metadata.name ); tokio::spawn(async move { - handle_config_add(&config, config_map).await.unwrap(); + handle_config_add( + Arc::new(Box::new(k8s::create_kube_interface())), + &config, + config_map, + discovery_handler_map, + new_discovery_handler_sender, + ) + .await + .unwrap(); }); Ok(()) } @@ -126,7 +160,15 @@ async fn handle_config( ); handle_config_delete(kube_interface, &config, config_map.clone()).await?; tokio::spawn(async move { - handle_config_add(&config, config_map).await.unwrap(); + handle_config_add( + Arc::new(Box::new(k8s::create_kube_interface())), + &config, + config_map, + discovery_handler_map, + new_discovery_handler_sender, + ) + .await + .unwrap(); }); Ok(()) } @@ -140,65 +182,52 @@ async fn handle_config( /// This handles added Configuration by creating a new ConfigInfo for it and adding it to the ConfigMap. /// Then calls a function to continually observe the availability of instances associated with the Configuration. async fn handle_config_add( + kube_interface: Arc>, config: &KubeAkriConfig, config_map: ConfigMap, + discovery_handler_map: RegisteredDiscoveryHandlerMap, + new_discovery_handler_sender: broadcast::Sender, ) -> Result<(), Box> { - let config_protocol = config.spec.protocol.clone(); - let discovery_handler = protocols::get_discovery_handler(&config_protocol)?; - let discovery_results = discovery_handler.discover().await?; let config_name = config.metadata.name.clone(); - let config_uid = config.metadata.uid.as_ref().unwrap().clone(); - let config_namespace = config.metadata.namespace.as_ref().unwrap().clone(); - info!( - "handle_config_add - entered for Configuration {} with visible_instances={:?}", - config.metadata.name, &discovery_results - ); // Create a new instance map for this config and add it to the config map let instance_map: InstanceMap = Arc::new(Mutex::new(HashMap::new())); - // Channel capacity: should only ever be sent once upon config deletion - let (stop_discovery_sender, stop_discovery_receiver) = mpsc::channel(1); - // Channel capacity: should only ever be sent once upon receiving stop watching message - let (finished_discovery_sender, _) = broadcast::channel(1); + let (stop_discovery_sender, _): (broadcast::Sender<()>, broadcast::Receiver<()>) = + broadcast::channel(DISCOVERY_OPERATOR_STOP_DISCOVERY_CHANNEL_CAPACITY); + let (mut finished_discovery_sender, finished_discovery_receiver) = + mpsc::channel(DISCOVERY_OPERATOR_FINISHED_DISCOVERY_CHANNEL_CAPACITY); let config_info = ConfigInfo { instance_map: instance_map.clone(), - stop_discovery_sender, - finished_discovery_sender: finished_discovery_sender.clone(), + stop_discovery_sender: stop_discovery_sender.clone(), + finished_discovery_receiver, }; config_map .lock() .await .insert(config_name.clone(), config_info); - let kube_interface = k8s::create_kube_interface(); - let config_spec = config.spec.clone(); + let config = config.clone(); // Keep discovering instances until the config is deleted, signaled by a message from handle_config_delete tokio::spawn(async move { - let periodic_discovery = PeriodicDiscovery { - config_name, - config_uid, - config_namespace, - config_spec, - config_protocol, - instance_map, - }; - periodic_discovery - .do_periodic_discovery( - &kube_interface, - stop_discovery_receiver, - finished_discovery_sender, - DEVICE_PLUGIN_PATH, - ) - .await - .unwrap(); + let discovery_operator = + DiscoveryOperator::new(discovery_handler_map, config, instance_map); + start_discovery( + discovery_operator, + new_discovery_handler_sender, + stop_discovery_sender, + &mut finished_discovery_sender, + kube_interface, + ) + .await + .unwrap(); }) .await?; Ok(()) } -/// This handles a deleted Congfiguration. First, it ceases to discover instances associated with the Configuration. +/// This handles a deleted Configuration. First, it ceases to discover instances associated with the Configuration. /// Then, for each of the Configuration's Instances, it signals the DevicePluginService to shutdown, /// and deletes the Instance CRD. -pub async fn handle_config_delete( +async fn handle_config_delete( kube_interface: &impl KubeInterface, config: &KubeAkriConfig, config_map: ConfigMap, @@ -216,17 +245,17 @@ pub async fn handle_config_delete( .stop_discovery_sender .clone() .send(()) - .await .is_ok() { - let mut finished_discovery_receiver = config_map + config_map .lock() .await - .get(&config.metadata.name) + .get_mut(&config.metadata.name) .unwrap() - .finished_discovery_sender - .subscribe(); - finished_discovery_receiver.recv().await.unwrap(); + .finished_discovery_receiver + .recv() + .await + .unwrap(); trace!( "handle_config_delete - for config {} received message that do_periodic_discovery ended", config.metadata.name @@ -249,8 +278,16 @@ pub async fn handle_config_delete( .clone(); config_map_locked.remove(&config.metadata.name); } + delete_all_instances_in_map(kube_interface, instance_map, config).await?; + Ok(()) +} - // Shutdown Instances' DevicePluginServices and delete the Instances +/// This shuts down all a Configuration's Instances and terminates the associated Device Plugins +pub async fn delete_all_instances_in_map( + kube_interface: &impl k8s::KubeInterface, + instance_map: InstanceMap, + config: &KubeAkriConfig, +) -> Result<(), Box> { let mut instance_map_locked = instance_map.lock().await; let instances_to_delete_map = instance_map_locked.clone(); let namespace = config.metadata.namespace.as_ref().unwrap(); @@ -267,305 +304,49 @@ pub async fn handle_config_delete( instance_map_locked.remove(&instance_name); try_delete_instance(kube_interface, &instance_name, &namespace).await?; } - Ok(()) } -/// This deletes an Instance unless it has already been deleted by another node -async fn try_delete_instance( - kube_interface: &impl KubeInterface, - instance_name: &str, - instance_namespace: &str, -) -> Result<(), Box> { - match kube_interface - .delete_instance(instance_name, &instance_namespace) - .await - { - Ok(()) => { - trace!("delete_instance - deleted Instance {}", instance_name); - Ok(()) - } - Err(e) => { - // Check if already was deleted else return error - if let Err(_e) = kube_interface - .find_instance(&instance_name, &instance_namespace) - .await - { - trace!( - "delete_instance - discovered Instance {} already deleted", - instance_name - ); - Ok(()) - } else { - Err(e) - } - } - } -} - -/// Information required for periodic discovery -struct PeriodicDiscovery { - config_name: String, - config_uid: String, - config_namespace: String, - config_spec: Configuration, - config_protocol: ProtocolHandler, - instance_map: InstanceMap, -} - -impl PeriodicDiscovery { - /// This is spawned as a task for each Configuration and continues to periodically run - /// until the Config is deleted, at which point, this function is signaled to stop. - /// Looks up which instances are currently visible to the node. Passes this list to a function that - /// updates the ConnectivityStatus of the Configuration's Instances or deletes Instance CRDs if needed. - /// If a new instance becomes visible that isn't in the Configuration's InstanceMap, - /// a DevicePluginService and Instance CRD are created for it, and it is added to the InstanceMap. - async fn do_periodic_discovery( - &self, - kube_interface: &impl KubeInterface, - mut stop_discovery_receiver: mpsc::Receiver<()>, - finished_discovery_sender: broadcast::Sender<()>, - device_plugin_path: &str, - ) -> Result<(), Box> { - trace!( - "do_periodic_discovery - start for config {}", - self.config_name - ); - let protocol = protocols::get_discovery_handler(&self.config_protocol)?; - let shared = protocol.are_shared()?; - loop { - trace!( - "do_periodic_discovery - loop iteration for config {}", - &self.config_name - ); - let discovery_results = protocol.discover().await?; - let config_name = self.config_name.clone(); - let currently_visible_instances: HashMap = - discovery_results - .iter() - .map(|discovery_result| { - let instance_name = - get_device_instance_name(&discovery_result.digest, &config_name); - (instance_name, discovery_result.clone()) - }) - .collect(); - - // Update the connectivity status of instances and return list of visible instances that don't have Instance CRDs - let new_discovery_results = self - .update_connectivity_status(kube_interface, ¤tly_visible_instances, shared) - .await?; - - // If there are newly visible instances associated with a Config, make a device plugin and Instance CRD for them - if !new_discovery_results.is_empty() { - for discovery_result in new_discovery_results { - let config_name = config_name.clone(); - let instance_name = - get_device_instance_name(&discovery_result.digest, &config_name); - trace!( - "do_periodic_discovery - new instance {} came online", - instance_name - ); - let instance_properties = discovery_result.properties.clone(); - let config_spec = self.config_spec.clone(); - let instance_map = self.instance_map.clone(); - if let Err(e) = device_plugin_service::build_device_plugin( - instance_name, - config_name, - self.config_uid.clone(), - self.config_namespace.clone(), - config_spec, - shared, - instance_properties, - instance_map, - device_plugin_path, - ) - .await - { - error!("do_periodic_discovery - error {} building device plugin ... trying again on next iteration", e); - } - } - } - if timeout( - Duration::from_secs(DISCOVERY_DELAY_SECS), - stop_discovery_receiver.recv(), - ) - .await - .is_ok() - { - trace!("do_periodic_discovery - for config {} received message to end ... sending message that finished and returning Ok", config_name); - finished_discovery_sender.send(()).unwrap(); - return Ok(()); - }; - } - } - - /// Takes in a list of currently visible instances and either updates an Instance's ConnectivityStatus or deletes an Instance. - /// If an instance is no longer visible then it's ConnectivityStatus is changed to Offline(time now). - /// The associated DevicePluginService checks its ConnectivityStatus before sending a response back to kubelet - /// and will send all unhealthy devices if its status is Offline, preventing kubelet from allocating any more pods to it. - /// An Instance CRD is deleted and it's DevicePluginService shutdown if its: - /// (A) shared instance is still not visible after 5 minutes or (B) unshared instance is still not visible on the next visibility check. - /// An unshared instance will be offline for between DISCOVERY_DELAY_SECS - 2 x DISCOVERY_DELAY_SECS - async fn update_connectivity_status( - &self, - kube_interface: &impl KubeInterface, - currently_visible_instances: &HashMap, - shared: bool, - ) -> Result, Box> - { - let instance_map_clone = self.instance_map.lock().await.clone(); - // Find all visible instances that do not have Instance CRDs yet - let new_discovery_results: Vec = currently_visible_instances - .iter() - .filter(|(name, _)| !instance_map_clone.contains_key(*name)) - .map(|(_, p)| p.clone()) - .collect(); - - for (instance, instance_info) in instance_map_clone { - if currently_visible_instances.contains_key(&instance) { - let connectivity_status = instance_info.connectivity_status; - // If instance is visible, make sure connectivity status is (updated to be) Online - if let ConnectivityStatus::Offline(_instant) = connectivity_status { - trace!( - "update_connectivity_status - instance {} that was temporarily offline is back online", - instance - ); - let list_and_watch_message_sender = instance_info.list_and_watch_message_sender; - let updated_instance_info = InstanceInfo { - connectivity_status: ConnectivityStatus::Online, - list_and_watch_message_sender: list_and_watch_message_sender.clone(), - }; - self.instance_map - .lock() - .await - .insert(instance.clone(), updated_instance_info); - list_and_watch_message_sender - .send(device_plugin_service::ListAndWatchMessageKind::Continue) - .unwrap(); - } - trace!( - "update_connectivity_status - instance {} still online", - instance - ); - } else { - // If the instance is not visible: - // // If the instance has not already been labeled offline, label it - // // If the instance has already been labeled offline - // // // shared - remove instance from map if grace period has elaspsed without the instance coming back online - // // // unshared - remove instance from map - match instance_info.connectivity_status { - ConnectivityStatus::Online => { - let sender = instance_info.list_and_watch_message_sender.clone(); - let updated_instance_info = InstanceInfo { - connectivity_status: ConnectivityStatus::Offline(Instant::now()), - list_and_watch_message_sender: instance_info - .list_and_watch_message_sender, - }; - self.instance_map - .lock() - .await - .insert(instance.clone(), updated_instance_info); - trace!( - "update_connectivity_status - instance {} went offline ... starting timer and forcing list_and_watch to continue", - instance - ); - sender - .send(device_plugin_service::ListAndWatchMessageKind::Continue) - .unwrap(); - } - ConnectivityStatus::Offline(instant) => { - let time_offline = instant.elapsed().as_secs(); - // If instance has been offline for longer than the grace period or it is unshared, terminate the associated device plugin - if !shared || time_offline >= SHARED_INSTANCE_OFFLINE_GRACE_PERIOD_SECS { - trace!("update_connectivity_status - instance {} has been offline too long ... terminating DevicePluginService", instance); - device_plugin_service::terminate_device_plugin_service( - &instance, - self.instance_map.clone(), - ) - .await?; - try_delete_instance(kube_interface, &instance, &self.config_namespace) - .await?; - } - } - } - } - } - Ok(new_discovery_results) - } -} - #[cfg(test)] mod config_action_tests { + use super::super::{ + device_plugin_service, + device_plugin_service::{InstanceConnectivityStatus, InstanceMap}, + discovery_operator::tests::{add_discovery_handler_to_map, build_instance_map}, + registration::{DiscoveryHandlerEndpoint, DiscoveryHandlerStatus}, + }; use super::*; - use akri_shared::k8s::test_kube::MockKubeImpl; - use protocols::debug_echo::{DEBUG_ECHO_AVAILABILITY_CHECK_PATH, OFFLINE}; - use std::{env, fs}; - use tempfile::Builder; - use tokio::sync::broadcast; - - async fn build_instance_map( - config: &KubeAkriConfig, - visibile_discovery_results: &mut Vec, - list_and_watch_message_receivers: &mut Vec< - broadcast::Receiver, - >, - connectivity_status: ConnectivityStatus, - ) -> InstanceMap { - // Set env vars for getting instances - env::set_var("AGENT_NODE_NAME", "node-a"); - env::set_var("ENABLE_DEBUG_ECHO", "yes"); - let protocol = config.spec.protocol.clone(); - let discovery_handler = protocols::get_discovery_handler(&protocol).unwrap(); - let discovery_results = discovery_handler.discover().await.unwrap(); - *visibile_discovery_results = discovery_results.clone(); - let instance_map: InstanceMap = Arc::new(Mutex::new( - discovery_results - .iter() - .map(|instance_info| { - let (list_and_watch_message_sender, list_and_watch_message_receiver) = - broadcast::channel(2); - list_and_watch_message_receivers.push(list_and_watch_message_receiver); - let instance_name = - get_device_instance_name(&instance_info.digest, &config.metadata.name); - ( - instance_name, - InstanceInfo { - list_and_watch_message_sender, - connectivity_status: connectivity_status.clone(), - }, - ) - }) - .collect(), - )); - instance_map - } + use akri_discovery_utils::discovery::{mock_discovery_handler, v0::Device}; + use akri_shared::{akri::configuration::KubeAkriConfig, k8s::MockKubeInterface}; + use std::{collections::HashMap, fs, sync::Arc}; + use tokio::sync::{broadcast, Mutex}; #[tokio::test] async fn test_handle_config_delete() { let _ = env_logger::builder().is_test(true).try_init(); - let path_to_config = "../test/json/config-a.json"; - let dcc_json = fs::read_to_string(path_to_config).expect("Unable to read file"); - let config: KubeAkriConfig = serde_json::from_str(&dcc_json).unwrap(); + let path_to_config = "../test/yaml/config-a.yaml"; + let config_yaml = fs::read_to_string(path_to_config).expect("Unable to read file"); + let config: KubeAkriConfig = serde_yaml::from_str(&config_yaml).unwrap(); let config_name = config.metadata.name.clone(); let mut list_and_watch_message_receivers = Vec::new(); let mut visible_discovery_results = Vec::new(); - let mut mock = MockKubeImpl::new(); + let mut mock = MockKubeInterface::new(); let instance_map: InstanceMap = build_instance_map( &config, &mut visible_discovery_results, &mut list_and_watch_message_receivers, - ConnectivityStatus::Online, + InstanceConnectivityStatus::Online, ) .await; - let (stop_discovery_sender, mut stop_discovery_receiver) = mpsc::channel(2); - let (finished_discovery_sender, _) = broadcast::channel(2); + let (stop_discovery_sender, mut stop_discovery_receiver) = broadcast::channel(2); + let (mut finished_discovery_sender, finished_discovery_receiver) = mpsc::channel(2); let mut map: HashMap = HashMap::new(); map.insert( config_name.clone(), ConfigInfo { stop_discovery_sender, instance_map: instance_map.clone(), - finished_discovery_sender: finished_discovery_sender.clone(), + finished_discovery_receiver, }, ); let config_map: ConfigMap = Arc::new(Mutex::new(map)); @@ -581,10 +362,10 @@ mod config_action_tests { assert!(!config_map.lock().await.contains_key(&config_name)); }); - // Assert that handle_config_delete tells do_periodic_discovery to end - assert!(stop_discovery_receiver.recv().await.is_some()); + // Assert that handle_config_delete tells start_discovery to end + assert!(stop_discovery_receiver.recv().await.is_ok()); // Mimic do_periodic_discovery's response - finished_discovery_sender.send(()).unwrap(); + finished_discovery_sender.send(()).await.unwrap(); // Assert list_and_watch is signaled to end for every instance associated with a config let mut tasks = Vec::new(); @@ -602,209 +383,175 @@ mod config_action_tests { assert_eq!(instance_map.lock().await.len(), 0); } - // 1: ConnectivityStatus of all instances that go offline is changed from Online to Offline - // 2: ConnectivityStatus of shared instances that come back online in under 5 minutes is changed from Offline to Online - // 3: ConnectivityStatus of unshared instances that come back online before next periodic discovery is changed from Offline to Online - #[tokio::test] - async fn test_update_connectivity_status() { - let _ = env_logger::builder().is_test(true).try_init(); - let path_to_config = "../test/json/config-a.json"; - let dcc_json = fs::read_to_string(path_to_config).expect("Unable to read file"); - let config: KubeAkriConfig = serde_json::from_str(&dcc_json).unwrap(); - let config_name = config.metadata.name.clone(); - let mut list_and_watch_message_receivers = Vec::new(); - let mut visible_discovery_results = Vec::new(); - let mock = MockKubeImpl::new(); - - // - // 1: Assert that ConnectivityStatus of instance that are no longer visible is changed to Offline - // - let instance_map: InstanceMap = build_instance_map( - &config, - &mut visible_discovery_results, - &mut list_and_watch_message_receivers, - ConnectivityStatus::Online, - ) - .await; - let shared = true; - // discover returns an empty vector when instances are offline - let no_visible_instances: HashMap = HashMap::new(); - let periodic_dicovery = PeriodicDiscovery { - config_name: config_name.clone(), - config_uid: config.metadata.uid.as_ref().unwrap().clone(), - config_namespace: config.metadata.namespace.as_ref().unwrap().clone(), - config_spec: config.spec.clone(), - config_protocol: config.spec.protocol.clone(), - instance_map: instance_map.clone(), - }; - periodic_dicovery - .update_connectivity_status(&mock, &no_visible_instances, shared) + async fn run_and_test_handle_config_add( + discovery_handler_map: RegisteredDiscoveryHandlerMap, + config_map: ConfigMap, + config: KubeAkriConfig, + dh_endpoint: &DiscoveryHandlerEndpoint, + dh_name: &str, + ) -> tokio::task::JoinHandle<()> { + let (new_discovery_handler_sender, _) = broadcast::channel(1); + let mut mock_kube_interface = MockKubeInterface::new(); + mock_kube_interface + .expect_create_instance() + .times(1) + .returning(move |_, _, _, _, _| Ok(())); + let arc_mock_kube_interface: Arc> = + Arc::new(Box::new(mock_kube_interface)); + let config_add_config = config.clone(); + let config_add_config_map = config_map.clone(); + let config_add_discovery_handler_map = discovery_handler_map.clone(); + let handle = tokio::spawn(async move { + handle_config_add( + arc_mock_kube_interface, + &config_add_config, + config_add_config_map, + config_add_discovery_handler_map, + new_discovery_handler_sender, + ) .await .unwrap(); - let unwrapped_instance_map = instance_map.lock().await.clone(); - for (_, instance_info) in unwrapped_instance_map { - assert_ne!( - instance_info.connectivity_status, - ConnectivityStatus::Online - ); - } + }); - // - // 2: Assert that ConnectivityStatus of shared instances that come back online in <5 mins is changed to Online - // - let instance_map: InstanceMap = build_instance_map( - &config, - &mut visible_discovery_results, - &mut list_and_watch_message_receivers, - ConnectivityStatus::Offline(Instant::now()), + // Loop until the Configuration and single discovered Instance are added to the ConfigMap + let mut x: i8 = 0; + while x < 5 { + tokio::time::delay_for(std::time::Duration::from_millis(200)).await; + if let Some(config_info) = config_map.lock().await.get(&config.metadata.name) { + if config_info.instance_map.lock().await.len() == 1 { + break; + } + } + x += 1; + } + assert_ne!(x, 4); + // Assert that Discovery Handler is marked as Active + check_discovery_handler_status( + discovery_handler_map, + dh_name, + dh_endpoint, + DiscoveryHandlerStatus::Active, ) .await; - let shared = true; - let currently_visible_instances: HashMap = - visible_discovery_results - .iter() - .map(|instance_info| { - let instance_name = - get_device_instance_name(&instance_info.digest, &config_name); - (instance_name, instance_info.clone()) - }) - .collect(); - let periodic_dicovery = PeriodicDiscovery { - config_name: config_name.clone(), - config_uid: config.metadata.uid.as_ref().unwrap().clone(), - config_namespace: config.metadata.namespace.as_ref().unwrap().clone(), - config_spec: config.spec.clone(), - config_protocol: config.spec.protocol.clone(), - instance_map: instance_map.clone(), - }; - periodic_dicovery - .update_connectivity_status(&mock, ¤tly_visible_instances, shared) - .await - .unwrap(); - let unwrapped_instance_map = instance_map.lock().await.clone(); - for (_, instance_info) in unwrapped_instance_map { - assert_eq!( - instance_info.connectivity_status, - ConnectivityStatus::Online - ); + handle + } + + async fn check_discovery_handler_status( + discovery_handler_map: RegisteredDiscoveryHandlerMap, + dh_name: &str, + dh_endpoint: &DiscoveryHandlerEndpoint, + dh_status: DiscoveryHandlerStatus, + ) { + let mut x: i8 = 0; + while x < 5 { + tokio::time::delay_for(std::time::Duration::from_millis(200)).await; + let dh_map = discovery_handler_map.lock().unwrap(); + if let Some(dh_details_map) = dh_map.get(dh_name) { + if dh_details_map.get(dh_endpoint).unwrap().connectivity_status == dh_status { + break; + } + } + x += 1; } + assert_ne!(x, 4); + } - // - // 3: Assert that ConnectivityStatus of unshared instances that come back online before next visibility check is changed to Online - // - let instance_map: InstanceMap = build_instance_map( - &config, - &mut visible_discovery_results, - &mut list_and_watch_message_receivers, - ConnectivityStatus::Offline(Instant::now()), + // Tests that when a Configuration is added, deleted, and added again, + // instances are created, deleted and recreated, + // and the Discovery Handler is marked as Active, Waiting, Active, and Waiting. + // Also asserts that all threads are successfully terminated. + #[tokio::test] + async fn test_handle_config_add_delete_add() { + let _ = env_logger::builder().is_test(true).try_init(); + + // Set up Discovery Handler + // Start a mock DH, specifying that it should NOT return an error + let return_error = false; + let (endpoint_dir, endpoint) = + mock_discovery_handler::get_mock_discovery_handler_dir_and_endpoint("mock.sock"); + let dh_endpoint = DiscoveryHandlerEndpoint::Uds(endpoint.to_string()); + let device_id = "device_id"; + let _dh_server_thread_handle = mock_discovery_handler::run_mock_discovery_handler( + &endpoint_dir, + &endpoint, + return_error, + vec![Device { + id: device_id.to_string(), + properties: HashMap::new(), + mounts: Vec::default(), + device_specs: Vec::default(), + }], ) .await; - let shared = false; - let periodic_dicovery = PeriodicDiscovery { - config_name: config_name.clone(), - config_uid: config.metadata.uid.as_ref().unwrap().clone(), - config_namespace: config.metadata.namespace.as_ref().unwrap().clone(), - config_spec: config.spec.clone(), - config_protocol: config.spec.protocol.clone(), - instance_map: instance_map.clone(), - }; - periodic_dicovery - .update_connectivity_status(&mock, ¤tly_visible_instances, shared) + // Make sure registration server has started + akri_shared::uds::unix_stream::try_connect(&endpoint) .await .unwrap(); - let unwrapped_instance_map = instance_map.lock().await.clone(); - for (_, instance_info) in unwrapped_instance_map { - assert_eq!( - instance_info.connectivity_status, - ConnectivityStatus::Online - ); - } - } - /// Checks the termination case for when an unshared instance is still offline upon the second periodic discovery - /// Must be run independently since writing "OFFLINE" to DEBUG_ECHO_AVAILABILITY_CHECK_PATH in order to emulate - /// offline devices can clobber other tests run in parallel that are looking for online devices. - /// Run with: cargo test -- test_do_periodic_discovery --ignored - #[tokio::test] - #[ignore] - async fn test_do_periodic_discovery() { - let _ = env_logger::builder().is_test(true).try_init(); - // Set env vars - env::set_var("AGENT_NODE_NAME", "node-a"); - env::set_var("ENABLE_DEBUG_ECHO", "yes"); - // Make each get_instances check return an empty list of instances - let path_to_config = "../test/json/config-a.json"; - let dcc_json = fs::read_to_string(path_to_config).expect("Unable to read file"); - let config: KubeAkriConfig = serde_json::from_str(&dcc_json).unwrap(); - let config_name = config.metadata.name.clone(); - let config_uid = config.metadata.uid.as_ref().unwrap().clone(); - let config_namespace = config.metadata.namespace.as_ref().unwrap().clone(); - let protocol = config.spec.protocol.clone(); - let mut visible_discovery_results = Vec::new(); - let mut list_and_watch_message_receivers = Vec::new(); - let (mut watch_periph_tx, watch_periph_rx) = mpsc::channel(2); - let (finished_watching_tx, mut finished_watching_rx) = broadcast::channel(2); - let mut mock = MockKubeImpl::new(); + // Add Discovery Handler to map + let dh_name = "debugEcho"; + let discovery_handler_map = Arc::new(std::sync::Mutex::new(HashMap::new())); + add_discovery_handler_to_map(dh_name, &dh_endpoint, false, discovery_handler_map.clone()); - // Set ConnectivityStatus of all instances in InstanceMap initially to Offline - let instance_map: InstanceMap = build_instance_map( - &config, - &mut visible_discovery_results, - &mut list_and_watch_message_receivers, - ConnectivityStatus::Offline(Instant::now()), + // Set up, run, and test handle_config_add + // Discovery Handler should create an instance and be marked as Active + let path_to_config = "../test/yaml/config-a.yaml"; + let config_yaml = fs::read_to_string(path_to_config).expect("Unable to read file"); + let config: KubeAkriConfig = serde_yaml::from_str(&config_yaml).unwrap(); + let config_name = config.metadata.name.clone(); + let config_map: ConfigMap = Arc::new(Mutex::new(HashMap::new())); + let first_add_handle = run_and_test_handle_config_add( + discovery_handler_map.clone(), + config_map.clone(), + config.clone(), + &dh_endpoint, + dh_name, ) .await; - // - // Assert that when an unshared instance is already offline it is terminated - // - mock.expect_delete_instance() - .times(2) - .returning(move |_, _| Ok(())); - let instance_map_clone = instance_map.clone(); - // Change instances to be offline - fs::write(DEBUG_ECHO_AVAILABILITY_CHECK_PATH, OFFLINE).unwrap(); - tokio::spawn(async move { - let periodic_dicovery = PeriodicDiscovery { - config_name: config_name.clone(), - config_uid: config_uid.clone(), - config_namespace: config_namespace.clone(), - config_spec: config.spec, - config_protocol: protocol, - instance_map: instance_map_clone, - }; - let device_plugin_temp_dir = - Builder::new().prefix("device-plugins-").tempdir().unwrap(); - let device_plugin_temp_dir_path = device_plugin_temp_dir.path().to_str().unwrap(); - periodic_dicovery - .do_periodic_discovery( - &mock, - watch_periph_rx, - finished_watching_tx, - device_plugin_temp_dir_path, - ) - .await - .unwrap(); - }); - let mut tasks = Vec::new(); - for mut receiver in list_and_watch_message_receivers { - tasks.push(tokio::spawn(async move { - assert_eq!( - receiver.recv().await.unwrap(), - device_plugin_service::ListAndWatchMessageKind::End - ); - })); - } - futures::future::join_all(tasks).await; + let config_delete_config = config.clone(); + let config_delete_config_map = config_map.clone(); + handle_config_delete( + &MockKubeInterface::new(), + &config_delete_config, + config_delete_config_map.clone(), + ) + .await + .unwrap(); - // Assert that all instances have been removed from the instance map - assert_eq!(instance_map.lock().await.len(), 0); + // Assert that config is removed from map after it has been deleted + assert!(!config_delete_config_map + .lock() + .await + .contains_key(&config_name)); + + // Assert that Discovery Handler is marked as Waiting + check_discovery_handler_status( + discovery_handler_map.clone(), + dh_name, + &dh_endpoint, + DiscoveryHandlerStatus::Waiting, + ) + .await; - watch_periph_tx.send(()).await.unwrap(); - // Assert that replies saying finished watching - assert!(finished_watching_rx.recv().await.is_ok()); + let second_add_handle = run_and_test_handle_config_add( + discovery_handler_map.clone(), + config_map.clone(), + config.clone(), + &dh_endpoint, + dh_name, + ) + .await; + + // Assert that Discovery Handler is marked as Waiting + check_discovery_handler_status( + discovery_handler_map.clone(), + dh_name, + &dh_endpoint, + DiscoveryHandlerStatus::Waiting, + ) + .await; - // Reset file to be online - fs::write(DEBUG_ECHO_AVAILABILITY_CHECK_PATH, "ONLINE").unwrap(); + futures::future::join_all(vec![first_add_handle, second_add_handle]).await; } } diff --git a/agent/src/util/constants.rs b/agent/src/util/constants.rs index 4cf5f6d95..1cfd651b0 100644 --- a/agent/src/util/constants.rs +++ b/agent/src/util/constants.rs @@ -1,9 +1,9 @@ -/// For unshared devices, Healthy means the device is discoverable. -/// For shared devices, Healthy means the device is either unused or used by this node. +/// For unshared devices, Healthy means the device is discoverable. For shared devices, Healthy means the device is +/// either unused or used by this node. pub const HEALTHY: &str = "Healthy"; -/// For unshared devices, Unhealthy means the device is not discoverable. -/// For shared devices, UnHealthy means that the device shared and used already by another node. +/// For unshared devices, Unhealthy means the device is not discoverable. For shared devices, Unhealthy means that the +/// device shared and used already by another node. pub const UNHEALTHY: &str = "Unhealthy"; /// Current version of the API supported by kubelet. @@ -18,14 +18,62 @@ pub const KUBELET_SOCKET: &str = "/var/lib/kubelet/device-plugins/kubelet.sock"; /// Maximum length of time `list_and_watch` will sleep before sending kubelet another list of virtual devices pub const LIST_AND_WATCH_SLEEP_SECS: u64 = 60; -/// Length of time to sleep between instance discovery checks -pub const DISCOVERY_DELAY_SECS: u64 = 10; - /// Length of time a shared instance can be offline before it's `DevicePluginService` is shutdown. pub const SHARED_INSTANCE_OFFLINE_GRACE_PERIOD_SECS: u64 = 300; /// Length of time to sleep between slot reconciliation checks pub const SLOT_RECONCILIATION_CHECK_DELAY_SECS: u64 = 10; -/// Length of time a slot can be unused before slot reconciliation relaims it +/// Length of time a slot can be unused before slot reconciliation reclaims it pub const SLOT_RECONCILIATION_SLOT_GRACE_PERIOD_SECS: u64 = 300; + +/// Label of environment variable that, when set, enables the embedded debug echo discovery handler +pub const ENABLE_DEBUG_ECHO_LABEL: &str = "ENABLE_DEBUG_ECHO"; + +/// Maximum amount of time allowed to pass without being able to connect to a discovery handler without it being removed +/// from the map of registered Discovery Handlers. +pub const DISCOVERY_HANDLER_OFFLINE_GRACE_PERIOD_SECS: u64 = 300; + +/// Capacity of channel over which `DevicePluginService::list_and_watch` sends updates to kubelet about "virtual" device +/// health of an instance. The kubelet Device Plugin manager should receive each message instantly; however, providing +/// some buffer in case. +pub const KUBELET_UPDATE_CHANNEL_CAPACITY: usize = 4; + +/// Capacity of channel over which the Agent Registration updates `DiscoveryOperators` when new `DiscoveryHandlers` +/// register. Tokio does not provide an unbounded broadcast channel in order to prevent the channel from growing +/// infinitely due to a "slow receiver". It is hard to determine an appropriate channel size, since the number of +/// `DiscoveryOperator` receivers (equivalent to number of applied Akri Configurations) and the frequency of sends +/// (equivalent to the number of registering `DiscoveryHandlers`) are unpredictable. Therefore, a large size is chosen +/// out of caution. +pub const NEW_DISCOVERY_HANDLER_CHANNEL_CAPACITY: usize = 15; + +/// Capacity of channel over which the `DevicePluginService::list_and_watch` receives messages to +/// `ListAndWatchMessageKind::Continue` (prematurely send updates to kubelet) or `ListAndWatchMessageKind::End` +/// (terminate itself). `list_and_watch` receives messages asynchronously from `DevicePluginService.allocate`, +/// `DiscoveryOperator.update_connectivity_status`, and `handle_config_delete`. Messages are sent as a response to a +/// variety of events, such as an Instance going offline/online, a Configuration being deleted, or a slot being +/// requested via allocate that is already taken, making it hard to determine the appropriate size of the channel. If a +/// new message is put in the channel after capacity is already met, the oldest message is dropped, dropping a +/// `ListAndWatchMessageKind::End` would likely be unrecoverable. Tokio does not provide an unbounded broadcast channel +/// in order to prevent the channel from growing infinitely due to a "slow receiver", so a large channel size is chosen +/// out of caution. +pub const LIST_AND_WATCH_MESSAGE_CHANNEL_CAPACITY: usize = 15; + +/// Capacity of channel over which a `DevicePluginService` receives a shutdown signal. This is either sent by +/// `DevicePluginBuilder::register` or `DevicePluginService::list_and_watch`. Capacity is set to meet worst case +/// scenario in which they both send messages at the same time. +pub const DEVICE_PLUGIN_SERVER_ENDER_CHANNEL_CAPACITY: usize = 2; + +/// Capacity of channel over which a `DiscoveryOperator` is notified to stop discovery for its Configuration. This +/// signals it to tell each of its subtasks to stop discovery. Message is only sent once, upon Configuration deletion. +pub const DISCOVERY_OPERATOR_STOP_DISCOVERY_CHANNEL_CAPACITY: usize = 1; + +/// Capacity of channel over which a DiscoveryOperator signals that it has stopped discovery and a Configuration's +/// Instances and associated `DevicePluginServices` can safely be deleted/terminated. There is only one sender +/// (`DiscoveryOperator`) who only sends a message once. +pub const DISCOVERY_OPERATOR_FINISHED_DISCOVERY_CHANNEL_CAPACITY: usize = 1; + +/// Capacity of channel over which `DiscoveryOperator` is notified to stop (trying to make) a connection with a +/// `DiscoveryHandler`. Sent once by the Agent Registration service when a `DiscoveryHandler` re-registers with a different +/// registration request (edge case). +pub const CLOSE_DISCOVERY_HANDLER_CONNECTION_CHANNEL_CAPACITY: usize = 1; diff --git a/agent/src/util/device_plugin_builder.rs b/agent/src/util/device_plugin_builder.rs new file mode 100644 index 000000000..ed31124b2 --- /dev/null +++ b/agent/src/util/device_plugin_builder.rs @@ -0,0 +1,361 @@ +use super::{ + constants::{ + DEVICE_PLUGIN_PATH, DEVICE_PLUGIN_SERVER_ENDER_CHANNEL_CAPACITY, K8S_DEVICE_PLUGIN_VERSION, + KUBELET_SOCKET, LIST_AND_WATCH_MESSAGE_CHANNEL_CAPACITY, + }, + device_plugin_service::{DevicePluginService, InstanceMap}, + v1beta1, + v1beta1::{device_plugin_server::DevicePluginServer, registration_client, DevicePluginOptions}, +}; +use akri_discovery_utils::discovery::v0::Device; +use akri_shared::{ + akri::{configuration::KubeAkriConfig, AKRI_PREFIX}, + uds::unix_stream, +}; +use async_trait::async_trait; +use futures::stream::TryStreamExt; +use log::{info, trace}; +#[cfg(test)] +use mockall::{automock, predicate::*}; +use std::{convert::TryFrom, env, path::Path, time::SystemTime}; +use tokio::{ + net::UnixListener, + net::UnixStream, + sync::{broadcast, mpsc}, + task, +}; +use tonic::transport::{Endpoint, Server, Uri}; +use tower::service_fn; + +#[cfg_attr(test, automock)] +#[async_trait] +pub trait DevicePluginBuilderInterface: Send + Sync { + async fn build_device_plugin( + &self, + instance_name: String, + config: &KubeAkriConfig, + shared: bool, + instance_map: InstanceMap, + device: Device, + ) -> Result<(), Box>; + + async fn serve( + &self, + device_plugin_service: DevicePluginService, + socket_path: String, + server_ender_receiver: mpsc::Receiver<()>, + ) -> Result<(), Box>; + + async fn register( + &self, + capability_id: &str, + socket_name: &str, + instance_name: &str, + mut server_ender_sender: mpsc::Sender<()>, + kubelet_socket: &str, + ) -> Result<(), Box>; +} + +/// For each Instance, builds a Device Plugin, registers it with the kubelet, and serves it over UDS. +pub struct DevicePluginBuilder {} + +#[async_trait] +impl DevicePluginBuilderInterface for DevicePluginBuilder { + /// This creates a new DevicePluginService for an instance and registers it with the kubelet + async fn build_device_plugin( + &self, + instance_name: String, + config: &KubeAkriConfig, + shared: bool, + instance_map: InstanceMap, + device: Device, + ) -> Result<(), Box> { + info!("build_device_plugin - entered for device {}", instance_name); + let capability_id: String = format!("{}/{}", AKRI_PREFIX, instance_name); + let unique_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?; + let device_endpoint: String = format!("{}-{}.sock", instance_name, unique_time.as_secs()); + let socket_path: String = Path::new(DEVICE_PLUGIN_PATH) + .join(device_endpoint.clone()) + .to_str() + .unwrap() + .to_string(); + let (list_and_watch_message_sender, _) = + broadcast::channel(LIST_AND_WATCH_MESSAGE_CHANNEL_CAPACITY); + let (server_ender_sender, server_ender_receiver) = + mpsc::channel(DEVICE_PLUGIN_SERVER_ENDER_CHANNEL_CAPACITY); + let device_plugin_service = DevicePluginService { + instance_name: instance_name.clone(), + endpoint: device_endpoint.clone(), + config: config.spec.clone(), + config_name: config.metadata.name.clone(), + config_uid: config.metadata.uid.as_ref().unwrap().clone(), + config_namespace: config.metadata.namespace.as_ref().unwrap().clone(), + shared, + node_name: env::var("AGENT_NODE_NAME")?, + instance_map, + list_and_watch_message_sender, + server_ender_sender: server_ender_sender.clone(), + device, + }; + + self.serve( + device_plugin_service, + socket_path.clone(), + server_ender_receiver, + ) + .await?; + + self.register( + &capability_id, + &device_endpoint, + &instance_name, + server_ender_sender, + KUBELET_SOCKET, + ) + .await?; + + Ok(()) + } + + // This starts a DevicePluginServer + async fn serve( + &self, + device_plugin_service: DevicePluginService, + socket_path: String, + server_ender_receiver: mpsc::Receiver<()>, + ) -> Result<(), Box> { + info!( + "serve - creating a device plugin server that will listen at: {}", + socket_path + ); + tokio::fs::create_dir_all(Path::new(&socket_path[..]).parent().unwrap()) + .await + .expect("Failed to create dir at socket path"); + let mut uds = + UnixListener::bind(socket_path.clone()).expect("Failed to bind to socket path"); + let service = DevicePluginServer::new(device_plugin_service); + let socket_path_to_delete = socket_path.clone(); + task::spawn(async move { + Server::builder() + .add_service(service) + .serve_with_incoming_shutdown( + uds.incoming().map_ok(unix_stream::UnixStream), + shutdown_signal(server_ender_receiver), + ) + .await + .unwrap(); + trace!( + "serve - gracefully shutdown ... deleting socket {}", + socket_path_to_delete + ); + // Socket may already be deleted in the case of the kubelet restart + std::fs::remove_file(socket_path_to_delete).unwrap_or(()); + }); + + akri_shared::uds::unix_stream::try_connect(&socket_path).await?; + Ok(()) + } + + /// This registers DevicePlugin with the kubelet. + /// During registration, the device plugin must send + /// (1) name of unix socket, + /// (2) Device-Plugin API it was built against (v1beta1), + /// (3) resource name akri.sh/device_id. + /// If registration request to the kubelet fails, terminates DevicePluginService. + async fn register( + &self, + capability_id: &str, + socket_name: &str, + instance_name: &str, + mut server_ender_sender: mpsc::Sender<()>, + kubelet_socket: &str, + ) -> Result<(), Box> { + info!( + "register - entered for Instance {} and socket_name: {}", + capability_id, socket_name + ); + let op = DevicePluginOptions { + pre_start_required: false, + }; + + // We will ignore this dummy uri because UDS does not use it. + let kubelet_socket_closure = kubelet_socket.to_string(); + let channel = Endpoint::try_from("dummy://[::]:50051")? + .connect_with_connector(service_fn(move |_: Uri| { + UnixStream::connect(kubelet_socket_closure.clone()) + })) + .await?; + let mut registration_client = registration_client::RegistrationClient::new(channel); + + let register_request = tonic::Request::new(v1beta1::RegisterRequest { + version: K8S_DEVICE_PLUGIN_VERSION.into(), + endpoint: socket_name.to_string(), + resource_name: capability_id.to_string(), + options: Some(op), + }); + trace!( + "register - before call to register with the kubelet at socket {}", + kubelet_socket + ); + + // If fail to register with the kubelet, terminate device plugin + if registration_client + .register(register_request) + .await + .is_err() + { + trace!( + "register - failed to register Instance {} with the kubelet ... terminating device plugin", + instance_name + ); + server_ender_sender.send(()).await?; + } + Ok(()) + } +} + +/// This acts as a signal future to gracefully shutdown DevicePluginServer upon its completion. +/// Ends when it receives message from `list_and_watch`. +async fn shutdown_signal(mut server_ender_receiver: mpsc::Receiver<()>) { + match server_ender_receiver.recv().await { + Some(_) => trace!( + "shutdown_signal - received signal ... device plugin service gracefully shutting down" + ), + None => trace!("shutdown_signal - connection to server_ender_sender closed ... error"), + } +} + +#[cfg(test)] +pub mod tests { + use super::super::v1beta1::{ + registration_server::{Registration, RegistrationServer}, + Empty, RegisterRequest, + }; + use super::*; + use tempfile::Builder; + + struct MockRegistration { + pub return_error: bool, + } + + // Mock implementation of kubelet's registration service for tests. + // Can be configured with its `return_error` field to return an error. + #[async_trait] + impl Registration for MockRegistration { + async fn register( + &self, + _request: tonic::Request, + ) -> Result, tonic::Status> { + if self.return_error { + Err(tonic::Status::invalid_argument( + "mock discovery handler error", + )) + } else { + Ok(tonic::Response::new(Empty {})) + } + } + } + + #[tokio::test] + async fn test_register() { + let device_plugins_dirs = Builder::new().prefix("device-plugins").tempdir().unwrap(); + let kubelet_socket = device_plugins_dirs + .path() + .join("kubelet.sock") + .to_str() + .unwrap() + .to_string(); + + // Start kubelet registration server + let mut uds = + UnixListener::bind(kubelet_socket.clone()).expect("Failed to bind to socket path"); + + let registration = MockRegistration { + return_error: false, + }; + let service = RegistrationServer::new(registration); + task::spawn(async move { + Server::builder() + .add_service(service) + .serve_with_incoming(uds.incoming().map_ok(unix_stream::UnixStream)) + .await + .unwrap(); + }); + + // Make sure registration server has started + akri_shared::uds::unix_stream::try_connect(&kubelet_socket) + .await + .unwrap(); + + let device_plugin_builder = DevicePluginBuilder {}; + let (server_ender_sender, _) = mpsc::channel(1); + // Test successful registration + assert!(device_plugin_builder + .register( + "random_instance_id", + "socket.sock", + "random_instance", + server_ender_sender, + &kubelet_socket + ) + .await + .is_ok()); + } + + #[tokio::test] + async fn test_register_error() { + let device_plugin_builder = DevicePluginBuilder {}; + let (server_ender_sender, mut server_ender_receiver) = mpsc::channel(1); + let device_plugins_dirs = Builder::new().prefix("device-plugins").tempdir().unwrap(); + let kubelet_socket = device_plugins_dirs + .path() + .join("kubelet.sock") + .to_str() + .unwrap() + .to_string(); + + // Try to register when no registration service exists + assert!(device_plugin_builder + .register( + "random_instance_id", + "socket.sock", + "random_instance", + server_ender_sender.clone(), + &kubelet_socket + ) + .await + .is_err()); + + // Start kubelet registration server + let mut uds = + UnixListener::bind(kubelet_socket.clone()).expect("Failed to bind to socket path"); + let registration = MockRegistration { return_error: true }; + let service = RegistrationServer::new(registration); + task::spawn(async move { + Server::builder() + .add_service(service) + .serve_with_incoming(uds.incoming().map_ok(unix_stream::UnixStream)) + .await + .unwrap(); + }); + + // Make sure registration server has started + akri_shared::uds::unix_stream::try_connect(&kubelet_socket) + .await + .unwrap(); + + // Test that when registration fails, no error is thrown but the DevicePluginService is signaled to shutdown + assert!(device_plugin_builder + .register( + "random_instance_id", + "socket.sock", + "random_instance", + server_ender_sender, + &kubelet_socket + ) + .await + .is_ok()); + // Make sure DevicePluginService is signaled to shutdown + server_ender_receiver.recv().await.unwrap(); + } +} diff --git a/agent/src/util/device_plugin_service.rs b/agent/src/util/device_plugin_service.rs index 6d9440f21..c0566f702 100644 --- a/agent/src/util/device_plugin_service.rs +++ b/agent/src/util/device_plugin_service.rs @@ -1,44 +1,34 @@ use super::constants::{ - HEALTHY, K8S_DEVICE_PLUGIN_VERSION, KUBELET_SOCKET, LIST_AND_WATCH_SLEEP_SECS, UNHEALTHY, + HEALTHY, KUBELET_UPDATE_CHANNEL_CAPACITY, LIST_AND_WATCH_SLEEP_SECS, UNHEALTHY, }; use super::v1beta1; use super::v1beta1::{ - device_plugin_server::{DevicePlugin, DevicePluginServer}, - registration_client, AllocateRequest, AllocateResponse, DevicePluginOptions, Empty, - ListAndWatchResponse, PreStartContainerRequest, PreStartContainerResponse, + device_plugin_server::DevicePlugin, AllocateRequest, AllocateResponse, DevicePluginOptions, + DeviceSpec, Empty, ListAndWatchResponse, Mount, PreStartContainerRequest, + PreStartContainerResponse, }; +use akri_discovery_utils::discovery::v0::Device; use akri_shared::{ akri::{ - configuration::{Configuration, ProtocolHandler}, + configuration::Configuration, instance::Instance, retry::{random_delay, MAX_INSTANCE_UPDATE_TRIES}, - AKRI_PREFIX, AKRI_SLOT_ANNOTATION_NAME, + AKRI_SLOT_ANNOTATION_NAME, }, k8s, k8s::KubeInterface, }; -use futures::stream::TryStreamExt; use log::{error, info, trace}; -use std::{ - collections::HashMap, - convert::TryFrom, - env, - path::Path, - sync::Arc, - time::{Duration, Instant, SystemTime, UNIX_EPOCH}, -}; +#[cfg(test)] +use mock_instant::Instant; +#[cfg(not(test))] +use std::time::Instant; +use std::{collections::HashMap, sync::Arc, time::Duration}; use tokio::{ - net::UnixListener, - net::UnixStream, sync::{broadcast, mpsc, Mutex}, - task, - time::{delay_for, timeout}, -}; -use tonic::{ - transport::{Endpoint, Server, Uri}, - Code, Request, Response, Status, + time::timeout, }; -use tower::service_fn; +use tonic::{Code, Request, Response, Status}; /// Message sent in channel to `list_and_watch`. /// Dictates what action `list_and_watch` should take upon being awoken. @@ -50,9 +40,9 @@ pub enum ListAndWatchMessageKind { End, } -/// Describes the discoverability of an instance for this node +/// Describes whether an instance was discovered or the time at which it was no longer discovered. #[derive(PartialEq, Debug, Clone)] -pub enum ConnectivityStatus { +pub enum InstanceConnectivityStatus { /// Was discovered Online, /// Could not be discovered. Instant contains time at which it was no longer discovered. @@ -64,8 +54,8 @@ pub enum ConnectivityStatus { pub struct InstanceInfo { /// Sender to tell `list_and_watch` to either prematurely continue looping or end pub list_and_watch_message_sender: broadcast::Sender, - /// Instance's `ConnectivityStatus` - pub connectivity_status: ConnectivityStatus, + /// Instance's `InstanceConnectivityStatus` + pub connectivity_status: InstanceConnectivityStatus, } pub type InstanceMap = Arc>>; @@ -81,32 +71,34 @@ pub type InstanceMap = Arc>>; #[derive(Clone)] pub struct DevicePluginService { /// Instance CRD name - instance_name: String, + pub instance_name: String, /// Socket endpoint - endpoint: String, + pub endpoint: String, /// Instance's Configuration - config: Configuration, + pub config: Configuration, /// Name of Instance's Configuration CRD - config_name: String, + pub config_name: String, /// UID of Instance's Configuration CRD - config_uid: String, + pub config_uid: String, /// Namespace of Instance's Configuration CRD - config_namespace: String, + pub config_namespace: String, /// Instance is [not]shared - shared: bool, + pub shared: bool, /// Hostname of node this Device Plugin is running on - node_name: String, - /// Information that must be communicated with broker. Stored in Instance CRD as metadata. - instance_properties: HashMap, + pub node_name: String, /// Map of all Instances that have the same Configuration CRD as this one - instance_map: InstanceMap, + pub instance_map: InstanceMap, /// Receiver for list_and_watch continue or end messages /// Note: since the tonic grpc generated list_and_watch definition takes in &self, /// using broadcast sender instead of mpsc receiver /// Can clone broadcast sender and subscribe receiver to use in spawned thread in list_and_watch - list_and_watch_message_sender: broadcast::Sender, + pub list_and_watch_message_sender: broadcast::Sender, /// Upon send, terminates function that acts as the shutdown signal for this service - server_ender_sender: mpsc::Sender<()>, + pub server_ender_sender: mpsc::Sender<()>, + /// Device that the instance represents. + /// Contains information about environment variables and volumes that should be mounted + /// into requesting Pods. + pub device: Device, } #[tonic::async_trait] @@ -144,7 +136,8 @@ impl DevicePlugin for DevicePluginService { let mut list_and_watch_message_receiver = self.list_and_watch_message_sender.subscribe(); // Create a channel that list_and_watch can periodically send updates to kubelet on - let (mut kubelet_update_sender, kubelet_update_receiver) = mpsc::channel(4); + let (mut kubelet_update_sender, kubelet_update_receiver) = + mpsc::channel(KUBELET_UPDATE_CHANNEL_CAPACITY); // Spawn thread so can send kubelet the receiving end of the channel to listen on tokio::spawn(async move { let mut keep_looping = true; @@ -323,10 +316,12 @@ impl DevicePluginService { } // Successfully reserved device_usage_slot[s] for this node. // Add response to list of responses + let broker_properties = + get_all_broker_properties(&self.config.broker_properties, &self.device.properties); let response = build_container_allocate_response( + broker_properties, akri_annotations, - &self.instance_properties, - &self.config.protocol, + &self.device, ); container_responses.push(response); } @@ -357,7 +352,7 @@ fn get_slot_value( instance: &Instance, ) -> Result { if let Some(allocated_node) = instance.device_usage.get(device_usage_id) { - if allocated_node == "" { + if allocated_node.is_empty() { Ok(node_name.to_string()) } else if allocated_node == node_name { Ok("".to_string()) @@ -445,36 +440,40 @@ async fn try_update_instance_device_usage( Ok(()) } -/// This sets the volume mounts and environment variables according to the instance's protocol. +/// This sets the volume mounts and environment variables according to the instance's `DiscoveryHandler`. fn build_container_allocate_response( + broker_properties: HashMap, annotations: HashMap, - instance_properties: &HashMap, - protocol: &ProtocolHandler, + device: &Device, ) -> v1beta1::ContainerAllocateResponse { - let mut mounts: Vec = Vec::new(); - - // Set mounts according to protocol - match protocol { - ProtocolHandler::udev(_handler_config) => { - trace!("get_volumes_and_mounts - setting volumes and mounts for udev protocol"); - mounts = instance_properties - .iter() - .map(|(_id, devpath)| v1beta1::Mount { - container_path: devpath.clone(), - host_path: devpath.clone(), - read_only: true, - }) - .collect(); - } - _ => trace!("get_volumes_and_mounts - no mounts or volumes required by this protocol"), - } + // Cast v0 discovery Mount and DeviceSpec types to v1beta1 DevicePlugin types + let mounts: Vec = device + .mounts + .clone() + .into_iter() + .map(|mount| Mount { + container_path: mount.container_path, + host_path: mount.host_path, + read_only: mount.read_only, + }) + .collect(); + let device_specs: Vec = device + .device_specs + .clone() + .into_iter() + .map(|device_spec| DeviceSpec { + container_path: device_spec.container_path, + host_path: device_spec.host_path, + permissions: device_spec.permissions, + }) + .collect(); - // Create response, setting environment variables to be an instance's properties (specified by protocol) + // Create response, setting environment variables to be an instance's properties. v1beta1::ContainerAllocateResponse { annotations, mounts, - envs: instance_properties.clone(), - ..Default::default() + devices: device_specs, + envs: broker_properties, } } @@ -506,8 +505,10 @@ async fn try_create_instance( shared: dps.shared, nodes: vec![dps.node_name.clone()], device_usage, - metadata: dps.instance_properties.clone(), - rbac: "rbac".to_string(), + broker_properties: get_all_broker_properties( + &dps.config.broker_properties, + &dps.device.properties, + ), }; // Try up to MAX_INSTANCE_UPDATE_TRIES to create or update instance, breaking on success @@ -588,7 +589,7 @@ async fn try_create_instance( dps.instance_name.clone(), InstanceInfo { list_and_watch_message_sender: dps.list_and_watch_message_sender.clone(), - connectivity_status: ConnectivityStatus::Online, + connectivity_status: InstanceConnectivityStatus::Online, }, ); @@ -627,7 +628,7 @@ async fn build_list_and_watch_response( .get(&dps.instance_name) .unwrap() .connectivity_status - != ConnectivityStatus::Online + != InstanceConnectivityStatus::Online { trace!("build_list_and_watch_response - device for Instance {} is offline ... returning unhealthy devices", dps.instance_name); return Ok(build_unhealthy_virtual_devices( @@ -689,12 +690,12 @@ fn build_virtual_devices( let mut devices: Vec = Vec::new(); for (device_name, allocated_node) in device_usage { // Throw error if unshared resource is reserved by another node - if !shared && allocated_node != "" && allocated_node != node_name { + if !shared && !allocated_node.is_empty() && allocated_node != node_name { panic!("build_virtual_devices - unshared device reserved by a different node"); } // Advertise the device as Unhealthy if it is // USED by !this_node && SHARED - let unhealthy = shared && allocated_node != "" && allocated_node != node_name; + let unhealthy = shared && !allocated_node.is_empty() && allocated_node != node_name; let health = if unhealthy { UNHEALTHY.to_string() } else { @@ -721,7 +722,7 @@ pub async fn terminate_device_plugin_service( instance_map: InstanceMap, ) -> Result<(), Box> { let mut instance_map = instance_map.lock().await; - trace!( + info!( "terminate_device_plugin_service -- forcing list_and_watch to end for Instance {}", instance_name ); @@ -740,198 +741,6 @@ pub async fn terminate_device_plugin_service( Ok(()) } -/// This creates a new DevicePluginService for an instance and registers it with kubelet -pub async fn build_device_plugin( - instance_name: String, - config_name: String, - config_uid: String, - config_namespace: String, - config: Configuration, - shared: bool, - instance_properties: HashMap, - instance_map: InstanceMap, - device_plugin_path: &str, -) -> Result<(), Box> { - info!("build_device_plugin - entered for device {}", instance_name); - let capability_id: String = format!("{}/{}", AKRI_PREFIX, instance_name); - let unique_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)?; - let device_endpoint: String = format!("{}-{}.sock", instance_name, unique_time.as_secs()); - let socket_path: String = Path::new(device_plugin_path) - .join(device_endpoint.clone()) - .to_str() - .unwrap() - .to_string(); - // Channel capacity set to 6 because 3 possible senders (allocate, update_connectivity_status, and handle_config_delete) - // and and receiver only periodically checks channel - let (list_and_watch_message_sender, _) = broadcast::channel(6); - // Channel capacity set to 2 because worst case both register and list_and_watch send messages at same time and receiver is always listening - let (server_ender_sender, server_ender_receiver) = mpsc::channel(2); - let device_plugin_service = DevicePluginService { - instance_name: instance_name.clone(), - endpoint: device_endpoint.clone(), - config, - config_name: config_name.clone(), - config_uid: config_uid.clone(), - config_namespace: config_namespace.clone(), - shared, - node_name: env::var("AGENT_NODE_NAME")?, - instance_properties, - instance_map: instance_map.clone(), - list_and_watch_message_sender: list_and_watch_message_sender.clone(), - server_ender_sender: server_ender_sender.clone(), - }; - - serve( - device_plugin_service, - socket_path.clone(), - server_ender_receiver, - ) - .await?; - - register( - capability_id, - device_endpoint, - &instance_name, - server_ender_sender, - ) - .await?; - - Ok(()) -} - -/// This acts as a signal future to gracefully shutdown DevicePluginServer upon its completion. -/// Ends when it receives message from `list_and_watch`. -async fn shutdown_signal(mut server_ender_receiver: mpsc::Receiver<()>) { - match server_ender_receiver.recv().await { - Some(_) => trace!( - "shutdown_signal - received signal ... device plugin service gracefully shutting down" - ), - None => trace!("shutdown_signal - connection to server_ender_sender closed ... error"), - } -} - -// This serves DevicePluginServer -async fn serve( - device_plugin_service: DevicePluginService, - socket_path: String, - server_ender_receiver: mpsc::Receiver<()>, -) -> Result<(), Box> { - info!( - "serve - creating a device plugin server that will listen at: {}", - socket_path - ); - tokio::fs::create_dir_all(Path::new(&socket_path[..]).parent().unwrap()) - .await - .expect("Failed to create dir at socket path"); - let mut uds = UnixListener::bind(socket_path.clone()).expect("Failed to bind to socket path"); - let service = DevicePluginServer::new(device_plugin_service); - let socket_path_to_delete = socket_path.clone(); - task::spawn(async move { - Server::builder() - .add_service(service) - .serve_with_incoming_shutdown( - uds.incoming().map_ok(unix::UnixStream), - shutdown_signal(server_ender_receiver), - ) - .await - .unwrap(); - trace!( - "serve - gracefully shutdown ... deleting socket {}", - socket_path_to_delete - ); - // Socket may already be deleted in the case of kubelet restart - std::fs::remove_file(socket_path_to_delete).unwrap_or(()); - }); - - // Test that server is running, trying for at most 10 seconds - // Similar to grpc.timeout, which is yet to be implemented for tonic - // See issue: https://github.com/hyperium/tonic/issues/75 - let mut connected = false; - let start = SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("Time went backwards") - .as_secs(); - let start_plus_10 = start + 10; - - while (SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("Time went backwards") - .as_secs() - < start_plus_10) - && !connected - { - let path = socket_path.clone(); - if let Ok(_v) = Endpoint::try_from("lttp://[::]:50051")? - .connect_with_connector(service_fn(move |_: Uri| UnixStream::connect(path.clone()))) - .await - { - connected = true - } else { - delay_for(Duration::from_secs(1)).await - } - } - - if !connected { - error!( - "serve - could not connect to Device Plugin server on socket {}", - socket_path - ); - } - Ok(()) -} - -/// This registers DevicePlugin with kubelet. -/// During registration, the device plugin must send -/// (1) name of unix socket, -/// (2) Device-Plugin API it was built against (v1beta1), -/// (3) resource name akri.sh/device_id. -/// If registration request to kubelet fails, terminates DevicePluginService. -async fn register( - capability_id: String, - socket_name: String, - instance_name: &str, - mut server_ender_sender: mpsc::Sender<()>, -) -> Result<(), Box> { - info!( - "register - entered for Instance {} and socket_name: {}", - capability_id, socket_name - ); - let op = DevicePluginOptions { - pre_start_required: false, - }; - - // lttp://... is a fake uri that is unused (in service_fn) but necessary for uds connection - let channel = Endpoint::try_from("lttp://[::]:50051")? - .connect_with_connector(service_fn(|_: Uri| UnixStream::connect(KUBELET_SOCKET))) - .await?; - let mut registration_client = registration_client::RegistrationClient::new(channel); - - let register_request = tonic::Request::new(v1beta1::RegisterRequest { - version: K8S_DEVICE_PLUGIN_VERSION.into(), - endpoint: socket_name, - resource_name: capability_id, - options: Some(op), - }); - trace!( - "register - before call to register with Kubelet at socket {}", - KUBELET_SOCKET - ); - - // If fail to register with kubelet, terminate device plugin - if registration_client - .register(register_request) - .await - .is_err() - { - trace!( - "register - failed to register Instance {} with kubelet ... terminating device plugin", - instance_name - ); - server_ender_sender.send(()).await?; - } - Ok(()) -} - /// This creates an Instance's unique name pub fn get_device_instance_name(id: &str, config_name: &str) -> String { format!("{}-{}", config_name, &id) @@ -939,70 +748,40 @@ pub fn get_device_instance_name(id: &str, config_name: &str) -> String { .replace("/", "-") } -/// Module to enable UDS with tonic grpc. -/// This is unix only since the underlying UnixStream and UnixListener libraries are unix only. -#[cfg(unix)] -mod unix { - use std::{ - pin::Pin, - task::{Context, Poll}, - }; - - use tokio::io::{AsyncRead, AsyncWrite}; - use tonic::transport::server::Connected; - - #[derive(Debug)] - pub struct UnixStream(pub tokio::net::UnixStream); - - impl Connected for UnixStream {} - - impl AsyncRead for UnixStream { - fn poll_read( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &mut [u8], - ) -> Poll> { - Pin::new(&mut self.0).poll_read(cx, buf) - } - } - - impl AsyncWrite for UnixStream { - fn poll_write( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - buf: &[u8], - ) -> Poll> { - Pin::new(&mut self.0).poll_write(cx, buf) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(&mut self.0).poll_flush(cx) - } - - fn poll_shutdown( - mut self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - Pin::new(&mut self.0).poll_shutdown(cx) - } - } +// Aggregate a Configuration and Device's properties so they can be displayed in an Instance and injected into brokers as environment variables. +pub fn get_all_broker_properties( + configuration_properties: &HashMap, + device_properties: &HashMap, +) -> HashMap { + configuration_properties + .clone() + .into_iter() + .chain(device_properties.clone()) + .collect::>() } #[cfg(test)] mod device_plugin_service_tests { - use super::super::v1beta1::device_plugin_client::DevicePluginClient; + use super::super::{ + device_plugin_builder::{DevicePluginBuilder, DevicePluginBuilderInterface}, + v1beta1::device_plugin_client::DevicePluginClient, + }; use super::*; use akri_shared::akri::configuration::KubeAkriConfig; use akri_shared::{ akri::instance::{Instance, KubeAkriInstance}, - k8s::test_kube::MockKubeImpl, + k8s::MockKubeInterface, }; - use mockall::predicate::*; use std::{ + convert::TryFrom, fs, io::{Error, ErrorKind}, + time::SystemTime, }; use tempfile::Builder; + use tokio::net::UnixStream; + use tonic::transport::{Endpoint, Uri}; + use tower::service_fn; enum NodeName { ThisNode, @@ -1016,11 +795,11 @@ mod device_plugin_service_tests { } fn configure_find_instance( - mock: &mut MockKubeImpl, + mock: &mut MockKubeInterface, result_file: &'static str, instance_name: String, instance_namespace: String, - device_usage_node: &'static str, + device_usage_node: String, node_name: NodeName, ) { let instance_name_clone = instance_name.clone(); @@ -1046,14 +825,14 @@ mod device_plugin_service_tests { } fn create_device_plugin_service( - connectivity_status: ConnectivityStatus, + connectivity_status: InstanceConnectivityStatus, add_to_instance_map: bool, ) -> (DevicePluginService, DevicePluginServiceReceivers) { - let path_to_config = "../test/json/config-a.json"; - let kube_akri_config_json = + let path_to_config = "../test/yaml/config-a.yaml"; + let kube_akri_config_yaml = fs::read_to_string(path_to_config).expect("Unable to read file"); let kube_akri_config: KubeAkriConfig = - serde_json::from_str(&kube_akri_config_json).unwrap(); + serde_yaml::from_str(&kube_akri_config_yaml).unwrap(); let device_instance_name = get_device_instance_name("b494b6", &kube_akri_config.metadata.name); let unique_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH); @@ -1075,7 +854,14 @@ mod device_plugin_service_tests { map.insert(device_instance_name.clone(), instance_info); } let instance_map: InstanceMap = Arc::new(Mutex::new(map)); - + let mut properties = HashMap::new(); + properties.insert("DEVICE_LOCATION_INFO".to_string(), "endpoint".to_string()); + let device = Device { + id: "n/a".to_string(), + properties, + mounts: Vec::new(), + device_specs: Vec::new(), + }; let dps = DevicePluginService { instance_name: device_instance_name, endpoint: device_endpoint, @@ -1085,10 +871,10 @@ mod device_plugin_service_tests { config_namespace: kube_akri_config.metadata.namespace.unwrap(), shared: false, node_name: "node-a".to_string(), - instance_properties: HashMap::new(), instance_map, list_and_watch_message_sender, server_ender_sender, + device, }; ( dps, @@ -1113,6 +899,16 @@ mod device_plugin_service_tests { } } + fn get_kube_not_found_error() -> kube::Error { + // Mock error thrown when instance not found + kube::Error::Api(kube::ErrorResponse { + status: "Failure".to_string(), + message: "instances.akri.sh \"akri-blah-901a7b\" not found".to_string(), + reason: "NotFound".to_string(), + code: k8s::ERROR_NOT_FOUND, + }) + } + // Tests that instance names are formatted correctly #[test] fn test_get_device_instance_name() { @@ -1128,8 +924,26 @@ mod device_plugin_service_tests { ); } + // Test that a Device and Configuration's properties are aggregated and that + // a Device property overwrites a Configuration's. + #[test] + fn test_get_all_broker_properties() { + let mut device_properties = HashMap::new(); + device_properties.insert("ENDPOINT".to_string(), "123".to_string()); + device_properties.insert("OVERWRITE".to_string(), "222".to_string()); + let mut configuration_properties = HashMap::new(); + configuration_properties.insert("USE HD".to_string(), "true".to_string()); + configuration_properties.insert("OVERWRITE".to_string(), "111".to_string()); + let all_properties = + get_all_broker_properties(&configuration_properties, &device_properties); + assert_eq!(all_properties.len(), 3); + assert_eq!(all_properties.get("ENDPOINT").unwrap(), "123"); + assert_eq!(all_properties.get("USE HD").unwrap(), "true"); + assert_eq!(all_properties.get("OVERWRITE").unwrap(), "222"); + } + fn configure_find_configuration( - mock: &mut MockKubeImpl, + mock: &mut MockKubeInterface, config_name: String, config_namespace: String, ) { @@ -1139,11 +953,11 @@ mod device_plugin_service_tests { namespace == config_namespace && name == config_name }) .returning(move |_, _| { - let path_to_config = "../test/json/config-a.json"; - let kube_akri_config_json = + let path_to_config = "../test/yaml/config-a.yaml"; + let kube_akri_config_yaml = fs::read_to_string(path_to_config).expect("Unable to read file"); let kube_akri_config: KubeAkriConfig = - serde_json::from_str(&kube_akri_config_json).unwrap(); + serde_yaml::from_str(&kube_akri_config_yaml).unwrap(); Ok(kube_akri_config) }); } @@ -1153,8 +967,8 @@ mod device_plugin_service_tests { async fn test_try_create_instance() { let _ = env_logger::builder().is_test(true).try_init(); let (device_plugin_service, _device_plugin_service_receivers) = - create_device_plugin_service(ConnectivityStatus::Online, false); - let mut mock = MockKubeImpl::new(); + create_device_plugin_service(InstanceConnectivityStatus::Online, false); + let mut mock = MockKubeInterface::new(); configure_find_configuration( &mut mock, device_plugin_service.config_name.clone(), @@ -1169,10 +983,7 @@ mod device_plugin_service_tests { .withf(move |name: &str, namespace: &str| { namespace == config_namespace && name == instance_name }) - .returning(move |_, _| { - let error = Error::new(ErrorKind::InvalidInput, "Configuration doesn't exist"); - Err(Box::new(error)) - }); + .returning(move |_, _| Err(get_kube_not_found_error())); let instance_name = device_plugin_service.instance_name.clone(); let config_namespace = device_plugin_service.config_namespace.clone(); mock.expect_create_instance() @@ -1201,8 +1012,8 @@ mod device_plugin_service_tests { async fn test_try_create_instance_already_created() { let _ = env_logger::builder().is_test(true).try_init(); let (device_plugin_service, _device_plugin_service_receivers) = - create_device_plugin_service(ConnectivityStatus::Online, false); - let mut mock = MockKubeImpl::new(); + create_device_plugin_service(InstanceConnectivityStatus::Online, false); + let mut mock = MockKubeInterface::new(); configure_find_configuration( &mut mock, device_plugin_service.config_name.clone(), @@ -1213,7 +1024,7 @@ mod device_plugin_service_tests { "../test/json/local-instance.json", device_plugin_service.instance_name.clone(), device_plugin_service.config_namespace.clone(), - "", + String::new(), NodeName::OtherNode, ); let instance_name = device_plugin_service.instance_name.clone(); @@ -1244,8 +1055,8 @@ mod device_plugin_service_tests { async fn test_try_create_instance_already_created_no_update() { let _ = env_logger::builder().is_test(true).try_init(); let (device_plugin_service, _device_plugin_service_receivers) = - create_device_plugin_service(ConnectivityStatus::Online, false); - let mut mock = MockKubeImpl::new(); + create_device_plugin_service(InstanceConnectivityStatus::Online, false); + let mut mock = MockKubeInterface::new(); configure_find_configuration( &mut mock, device_plugin_service.config_name.clone(), @@ -1256,7 +1067,7 @@ mod device_plugin_service_tests { "../test/json/local-instance.json", device_plugin_service.instance_name.clone(), device_plugin_service.config_namespace.clone(), - "", + String::new(), NodeName::ThisNode, ); let dps = Arc::new(device_plugin_service); @@ -1275,10 +1086,10 @@ mod device_plugin_service_tests { async fn test_try_create_instance_no_config() { let _ = env_logger::builder().is_test(true).try_init(); let (device_plugin_service, _device_plugin_service_receivers) = - create_device_plugin_service(ConnectivityStatus::Online, false); + create_device_plugin_service(InstanceConnectivityStatus::Online, false); let config_name = device_plugin_service.config_name.clone(); let config_namespace = device_plugin_service.config_namespace.clone(); - let mut mock = MockKubeImpl::new(); + let mut mock = MockKubeInterface::new(); mock.expect_find_configuration() .times(1) .withf(move |name: &str, namespace: &str| { @@ -1300,8 +1111,8 @@ mod device_plugin_service_tests { async fn test_try_create_instance_error() { let _ = env_logger::builder().is_test(true).try_init(); let (device_plugin_service, _device_plugin_service_receivers) = - create_device_plugin_service(ConnectivityStatus::Online, false); - let mut mock = MockKubeImpl::new(); + create_device_plugin_service(InstanceConnectivityStatus::Online, false); + let mut mock = MockKubeInterface::new(); configure_find_configuration( &mut mock, device_plugin_service.config_name.clone(), @@ -1316,7 +1127,7 @@ mod device_plugin_service_tests { .withf(move |name: &str, namespace: &str| { namespace == config_namespace && name == instance_name }) - .returning(move |_, _| Err(None.ok_or("failure")?)); + .returning(move |_, _| Err(get_kube_not_found_error())); let instance_name = device_plugin_service.instance_name.clone(); let config_namespace = device_plugin_service.config_namespace.clone(); mock.expect_create_instance() @@ -1346,7 +1157,7 @@ mod device_plugin_service_tests { async fn test_list_and_watch() { let _ = env_logger::builder().is_test(true).try_init(); let (device_plugin_service, device_plugin_service_receivers) = - create_device_plugin_service(ConnectivityStatus::Online, false); + create_device_plugin_service(InstanceConnectivityStatus::Online, false); let device_plugin_temp_dir = Builder::new().prefix("device-plugins-").tempdir().unwrap(); let socket_path: String = device_plugin_temp_dir .path() @@ -1357,14 +1168,16 @@ mod device_plugin_service_tests { let list_and_watch_message_sender = device_plugin_service.list_and_watch_message_sender.clone(); let instance_name = device_plugin_service.instance_name.clone(); - serve( - device_plugin_service, - socket_path.clone(), - device_plugin_service_receivers.server_ender_receiver, - ) - .await - .unwrap(); - let channel = Endpoint::try_from("lttp://[::]:50051") + let device_plugin_builder = DevicePluginBuilder {}; + device_plugin_builder + .serve( + device_plugin_service, + socket_path.clone(), + device_plugin_service_receivers.server_ender_receiver, + ) + .await + .unwrap(); + let channel = Endpoint::try_from("dummy://[::]:50051") .unwrap() .connect_with_connector(service_fn(move |_: Uri| { UnixStream::connect(socket_path.clone()) @@ -1445,13 +1258,13 @@ mod device_plugin_service_tests { assert!(result.is_err()); } - // Tests when ConnectivityStatus is offline and unhealthy devices are returned + // Tests when InstanceConnectivityStatus is offline and unhealthy devices are returned #[tokio::test] async fn test_build_list_and_watch_response_offline() { let _ = env_logger::builder().is_test(true).try_init(); let (device_plugin_service, _device_plugin_service_receivers) = - create_device_plugin_service(ConnectivityStatus::Offline(Instant::now()), true); - let mock = MockKubeImpl::new(); + create_device_plugin_service(InstanceConnectivityStatus::Offline(Instant::now()), true); + let mock = MockKubeInterface::new(); let devices = build_list_and_watch_response(Arc::new(device_plugin_service), Arc::new(mock)) .await @@ -1466,19 +1279,16 @@ mod device_plugin_service_tests { async fn test_build_list_and_watch_response_no_instance() { let _ = env_logger::builder().is_test(true).try_init(); let (device_plugin_service, _device_plugin_service_receivers) = - create_device_plugin_service(ConnectivityStatus::Online, true); + create_device_plugin_service(InstanceConnectivityStatus::Online, true); let instance_name = device_plugin_service.instance_name.clone(); let instance_namespace = device_plugin_service.config_namespace.clone(); - let mut mock = MockKubeImpl::new(); + let mut mock = MockKubeInterface::new(); mock.expect_find_instance() .times(1) .withf(move |name: &str, namespace: &str| { namespace == instance_namespace && name == instance_name }) - .returning(move |_, _| { - let error = Error::new(ErrorKind::InvalidInput, "Instance doesn't exist"); - Err(Box::new(error)) - }); + .returning(move |_, _| Err(get_kube_not_found_error())); let devices = build_list_and_watch_response(Arc::new(device_plugin_service), Arc::new(mock)) .await @@ -1493,16 +1303,16 @@ mod device_plugin_service_tests { async fn test_build_list_and_watch_response_no_instance_update() { let _ = env_logger::builder().is_test(true).try_init(); let (device_plugin_service, _device_plugin_service_receivers) = - create_device_plugin_service(ConnectivityStatus::Online, true); + create_device_plugin_service(InstanceConnectivityStatus::Online, true); let instance_name = device_plugin_service.instance_name.clone(); let instance_namespace = device_plugin_service.config_namespace.clone(); - let mut mock = MockKubeImpl::new(); + let mut mock = MockKubeInterface::new(); configure_find_instance( &mut mock, "../test/json/local-instance.json", instance_name.clone(), instance_namespace.clone(), - "", + String::new(), NodeName::ThisNode, ); let devices = @@ -1512,23 +1322,20 @@ mod device_plugin_service_tests { check_devices(instance_name, devices); } - // Test when device_usage[id] == "" - // internal_allocate should set device_usage[id] = m.nodeName, return - #[tokio::test] - async fn test_internal_allocate_success() { - let _ = env_logger::builder().is_test(true).try_init(); - let (device_plugin_service, mut device_plugin_service_receivers) = - create_device_plugin_service(ConnectivityStatus::Online, true); + fn setup_internal_allocate_tests( + mock: &mut MockKubeInterface, + device_plugin_service: &DevicePluginService, + formerly_allocated_node: String, + newly_allocated_node: String, + ) -> Request { let device_usage_id_slot = format!("{}-0", device_plugin_service.instance_name); let device_usage_id_slot_2 = device_usage_id_slot.clone(); - let node_name = device_plugin_service.node_name.clone(); - let mut mock = MockKubeImpl::new(); configure_find_instance( - &mut mock, + mock, "../test/json/local-instance.json", device_plugin_service.instance_name.clone(), device_plugin_service.config_namespace.clone(), - "", + formerly_allocated_node, NodeName::ThisNode, ); mock.expect_update_instance() @@ -1538,14 +1345,64 @@ mod device_plugin_service_tests { .device_usage .get(&device_usage_id_slot) .unwrap() - == &node_name + == &newly_allocated_node }) .returning(move |_, _, _| Ok(())); let devices_i_ds = vec![device_usage_id_slot_2]; let container_requests = vec![v1beta1::ContainerAllocateRequest { devices_i_ds }]; - let requests = Request::new(AllocateRequest { container_requests }); + Request::new(AllocateRequest { container_requests }) + } + + // Test that environment variables set in a Configuration will be set in brokers + #[tokio::test] + async fn test_internal_allocate_env_vars() { + let _ = env_logger::builder().is_test(true).try_init(); + let (device_plugin_service, mut device_plugin_service_receivers) = + create_device_plugin_service(InstanceConnectivityStatus::Online, true); + let node_name = device_plugin_service.node_name.clone(); + let mut mock = MockKubeInterface::new(); + let request = setup_internal_allocate_tests( + &mut mock, + &device_plugin_service, + String::new(), + node_name, + ); + let broker_envs = device_plugin_service + .internal_allocate(request, Arc::new(mock)) + .await + .unwrap() + .into_inner() + .container_responses[0] + .envs + .clone(); + assert_eq!(broker_envs.get("RESOLUTION_WIDTH").unwrap(), "800"); + assert_eq!(broker_envs.get("RESOLUTION_HEIGHT").unwrap(), "600"); + // Check that Device properties are set as env vars by checking for + // property of device created in `create_device_plugin_service` + assert_eq!(broker_envs.get("DEVICE_LOCATION_INFO").unwrap(), "endpoint"); + assert!(device_plugin_service_receivers + .list_and_watch_message_receiver + .try_recv() + .is_err()); + } + + // Test when device_usage[id] == "" + // internal_allocate should set device_usage[id] = m.nodeName, return + #[tokio::test] + async fn test_internal_allocate_success() { + let _ = env_logger::builder().is_test(true).try_init(); + let (device_plugin_service, mut device_plugin_service_receivers) = + create_device_plugin_service(InstanceConnectivityStatus::Online, true); + let node_name = device_plugin_service.node_name.clone(); + let mut mock = MockKubeInterface::new(); + let request = setup_internal_allocate_tests( + &mut mock, + &device_plugin_service, + String::new(), + node_name, + ); assert!(device_plugin_service - .internal_allocate(requests, Arc::new(mock),) + .internal_allocate(request, Arc::new(mock),) .await .is_ok()); assert!(device_plugin_service_receivers @@ -1560,33 +1417,16 @@ mod device_plugin_service_tests { async fn test_internal_allocate_deallocate() { let _ = env_logger::builder().is_test(true).try_init(); let (device_plugin_service, mut device_plugin_service_receivers) = - create_device_plugin_service(ConnectivityStatus::Online, true); - let device_usage_id_slot = format!("{}-0", device_plugin_service.instance_name); - let device_usage_id_slot_2 = device_usage_id_slot.clone(); - let mut mock = MockKubeImpl::new(); - configure_find_instance( + create_device_plugin_service(InstanceConnectivityStatus::Online, true); + let mut mock = MockKubeInterface::new(); + let request = setup_internal_allocate_tests( &mut mock, - "../test/json/local-instance.json", - device_plugin_service.instance_name.clone(), - device_plugin_service.config_namespace.clone(), - "node-a", - NodeName::ThisNode, + &device_plugin_service, + "node-a".to_string(), + String::new(), ); - mock.expect_update_instance() - .times(1) - .withf(move |instance_to_update: &Instance, _, _| { - instance_to_update - .device_usage - .get(&device_usage_id_slot) - .unwrap() - == "" - }) - .returning(move |_, _, _| Ok(())); - let devices_i_ds = vec![device_usage_id_slot_2]; - let container_requests = vec![v1beta1::ContainerAllocateRequest { devices_i_ds }]; - let requests = Request::new(AllocateRequest { container_requests }); match device_plugin_service - .internal_allocate(requests, Arc::new(mock)) + .internal_allocate(request, Arc::new(mock)) .await { Ok(_) => { @@ -1613,15 +1453,15 @@ mod device_plugin_service_tests { async fn test_internal_allocate_taken() { let _ = env_logger::builder().is_test(true).try_init(); let (device_plugin_service, mut device_plugin_service_receivers) = - create_device_plugin_service(ConnectivityStatus::Online, true); + create_device_plugin_service(InstanceConnectivityStatus::Online, true); let device_usage_id_slot = format!("{}-0", device_plugin_service.instance_name); - let mut mock = MockKubeImpl::new(); + let mut mock = MockKubeInterface::new(); configure_find_instance( &mut mock, "../test/json/local-instance.json", device_plugin_service.instance_name.clone(), device_plugin_service.config_namespace.clone(), - "other", + "other".to_string(), NodeName::ThisNode, ); let devices_i_ds = vec![device_usage_id_slot]; @@ -1652,15 +1492,15 @@ mod device_plugin_service_tests { async fn test_internal_allocate_no_id() { let _ = env_logger::builder().is_test(true).try_init(); let (device_plugin_service, mut device_plugin_service_receivers) = - create_device_plugin_service(ConnectivityStatus::Online, true); + create_device_plugin_service(InstanceConnectivityStatus::Online, true); let device_usage_id_slot = format!("{}-100", device_plugin_service.instance_name); - let mut mock = MockKubeImpl::new(); + let mut mock = MockKubeInterface::new(); configure_find_instance( &mut mock, "../test/json/local-instance.json", device_plugin_service.instance_name.clone(), device_plugin_service.config_namespace.clone(), - "other", + "other".to_string(), NodeName::ThisNode, ); let devices_i_ds = vec![device_usage_id_slot]; diff --git a/agent/src/util/discovery_operator.rs b/agent/src/util/discovery_operator.rs new file mode 100644 index 000000000..d3241d9ae --- /dev/null +++ b/agent/src/util/discovery_operator.rs @@ -0,0 +1,1783 @@ +use super::super::INSTANCE_COUNT_METRIC; +use super::{ + constants::{ + DISCOVERY_HANDLER_OFFLINE_GRACE_PERIOD_SECS, SHARED_INSTANCE_OFFLINE_GRACE_PERIOD_SECS, + }, + device_plugin_builder::{DevicePluginBuilder, DevicePluginBuilderInterface}, + device_plugin_service, + device_plugin_service::{ + get_device_instance_name, InstanceConnectivityStatus, InstanceInfo, InstanceMap, + }, + embedded_discovery_handlers::get_discovery_handler, + registration::{ + DiscoveryDetails, DiscoveryHandlerEndpoint, DiscoveryHandlerStatus, + RegisteredDiscoveryHandlerMap, + }, + streaming_extension::StreamingExt, +}; +use akri_discovery_utils::discovery::v0::{ + discovery_handler_client::DiscoveryHandlerClient, Device, DiscoverRequest, DiscoverResponse, +}; +use akri_shared::{ + akri::configuration::KubeAkriConfig, + k8s, + os::env_var::{ActualEnvVarQuery, EnvVarQuery}, +}; +use blake2::{ + digest::{Update, VariableOutput}, + VarBlake2b, +}; +use log::{error, trace}; +#[cfg(test)] +use mock_instant::Instant; +#[cfg(test)] +use mockall::{automock, predicate::*}; +#[cfg(not(test))] +use std::time::Instant; +use std::{collections::HashMap, convert::TryFrom, sync::Arc}; +use tokio::sync::mpsc; +use tonic::{ + transport::{Endpoint, Uri}, + Status, +}; + +/// StreamType provides a wrapper around the two different types of streams returned from embedded +/// or embedded discovery handlers and ones running externally. +pub enum StreamType { + Embedded(mpsc::Receiver>), + External(tonic::Streaming), +} + +/// A DiscoveryOperator is created for each Configuration that is applied to the cluster. +/// It handles discovery of the devices specified in a Configuration by calling `Discover` on +/// all `DiscoveryHandlers` registered with name `Configuration.discovery_handler.name.` +/// For each device discovered by the discovery handlers, it creates a device plugin. +/// If a device disappears, it deletes the associated instance after a grace period (for non-local devices). +/// Note: Since this structure is automocked, the compiler does not seem to be able to confirm that all the +/// methods are being used. Therefore, #[allow(dead_code)] has been added to all methods that are not invoked or +/// tested on a DiscoveryOperator. +#[derive(Clone)] +pub struct DiscoveryOperator { + /// Map of registered discovery handlers + discovery_handler_map: RegisteredDiscoveryHandlerMap, + /// The Akri Configuration associated with this `DiscoveryOperator`. + /// The Configuration tells the `DiscoveryOperator` what to look for. + config: KubeAkriConfig, + /// Map of Akri Instances discovered by this `DiscoveryOperator` + instance_map: InstanceMap, +} + +#[cfg_attr(test, automock)] +impl DiscoveryOperator { + pub fn new( + discovery_handler_map: RegisteredDiscoveryHandlerMap, + config: KubeAkriConfig, + instance_map: InstanceMap, + ) -> Self { + DiscoveryOperator { + discovery_handler_map, + config, + instance_map, + } + } + /// Returns discovery_handler_map field. Allows the struct to be mocked. + #[allow(dead_code)] + pub fn get_discovery_handler_map(&self) -> RegisteredDiscoveryHandlerMap { + self.discovery_handler_map.clone() + } + /// Returns config field. Allows the struct to be mocked. + #[allow(dead_code)] + pub fn get_config(&self) -> KubeAkriConfig { + self.config.clone() + } + /// Returns instance_map field. Allows the struct to be mocked. + #[allow(dead_code)] + pub fn get_instance_map(&self) -> InstanceMap { + self.instance_map.clone() + } + #[allow(dead_code)] + pub async fn stop_all_discovery(&self) { + let mut discovery_handler_map = self.discovery_handler_map.lock().unwrap().clone(); + if let Some(discovery_handler_details_map) = + discovery_handler_map.get_mut(&self.config.spec.discovery_handler.name) + { + for (endpoint, dh_details) in discovery_handler_details_map.clone() { + match dh_details.close_discovery_handler_connection.send(()) { + Ok(_) => trace!("stop_all_discovery - discovery client for {} discovery handler at endpoint {:?} told to stop", self.config.spec.discovery_handler.name, endpoint), + Err(e) => error!("stop_all_discovery - discovery client {} discovery handler at endpoint {:?} could not receive stop message with error {:?}", self.config.spec.discovery_handler.name, endpoint, e) + } + } + } + } + + /// Calls discover on the Discovery Handler at the given endpoint and returns the connection stream. + pub async fn get_stream(&self, endpoint: &DiscoveryHandlerEndpoint) -> Option { + let discover_request = tonic::Request::new(DiscoverRequest { + discovery_details: self.config.spec.discovery_handler.discovery_details.clone(), + }); + trace!("get_stream - endpoint is {:?}", endpoint); + match endpoint { + DiscoveryHandlerEndpoint::Embedded => { + match get_discovery_handler(&self.config.spec.discovery_handler) { + Ok(discovery_handler) => { + trace!( + "get_stream - using embedded {} discovery handler", + self.config.spec.discovery_handler.name + ); + match discovery_handler.discover(discover_request).await { + Ok(device_update_receiver) => { + Some(StreamType::Embedded(device_update_receiver.into_inner())) + } + Err(e) => { + error!("get_stream - could not connect to DiscoveryHandler at endpoint {:?} with error {}", endpoint, e); + None + } + } + } + Err(e) => { + error!("get_stream - no embedded discovery handler found with name {} with error {:?}", self.config.spec.discovery_handler.name, e); + None + } + } + } + DiscoveryHandlerEndpoint::Uds(socket) => { + // Clone socket for closure which has static lifetime + let socket = socket.clone(); + // We will ignore this dummy uri because UDS does not use it. + match Endpoint::try_from("dummy://[::]:50051") + .unwrap() + .connect_with_connector(tower::service_fn(move |_: Uri| { + let endpoint = socket.clone(); + tokio::net::UnixStream::connect(endpoint) + })) + .await + { + Ok(channel) => { + trace!( + "get_stream - connecting to external {} discovery handler over UDS", + self.config.spec.discovery_handler.name + ); + let mut discovery_handler_client = DiscoveryHandlerClient::new(channel); + match discovery_handler_client.discover(discover_request).await { + Ok(device_update_receiver) => { + Some(StreamType::External(device_update_receiver.into_inner())) + } + Err(e) => { + error!("get_stream - could not connect to DiscoveryHandler at endpoint {:?} with error {}", endpoint, e); + None + } + } + } + Err(e) => { + error!("get_stream - failed to connect to {} discovery handler over UDS with error {}", self.config.spec.discovery_handler.name, e); + None + } + } + } + DiscoveryHandlerEndpoint::Network(addr) => { + match DiscoveryHandlerClient::connect(addr.clone()).await { + Ok(mut discovery_handler_client) => { + trace!( + "get_stream - connecting to external {} discovery handler over network", + self.config.spec.discovery_handler.name + ); + match discovery_handler_client.discover(discover_request).await { + Ok(device_update_receiver) => { + Some(StreamType::External(device_update_receiver.into_inner())) + } + Err(e) => { + error!("get_stream - could not connect to DiscoveryHandler at endpoint {:?} with error {}", endpoint, e); + None + } + } + } + Err(e) => { + error!("get_stream - failed to connect to {} discovery handler over network with error {}", self.config.spec.discovery_handler.name, e); + None + } + } + } + } + } + /// Listens for new discovery responses and calls a function to handle the new discovery results. + /// Runs until notified to stop discovery. + #[allow(dead_code)] + pub async fn internal_do_discover<'a>( + &'a self, + kube_interface: Arc>, + dh_details: &'a DiscoveryDetails, + stream: &'a mut dyn StreamingExt, + ) -> Result<(), Status> { + // clone objects for thread + let discovery_operator = Arc::new(self.clone()); + let stop_discovery_receiver: &mut tokio::sync::broadcast::Receiver<()> = + &mut dh_details.close_discovery_handler_connection.subscribe(); + loop { + // Wait for either new discovery results or a message to stop discovery + tokio::select! { + _ = stop_discovery_receiver.recv() => { + trace!("internal_do_discover - received message to stop discovery for endpoint {:?} serving protocol {}", dh_details.endpoint, discovery_operator.get_config().spec.discovery_handler.name); + break; + }, + result = stream.get_message() => { + let message = result?; + if let Some(response) = message { + trace!("internal_do_discover - got discovery results {:?}", response.devices); + self.handle_discovery_results( + kube_interface.clone(), + response.devices, + dh_details.shared, + Box::new(DevicePluginBuilder{}), + ) + .await + .unwrap(); + } else { + error!("internal_do_discover - received result of type None. Should not happen."); + break; + } + } + } + } + + Ok(()) + } + + /// Sets the connectivity status of a discovery handler. If a discovery handler goes offline, mark_offline_or_deregister_discovery_handler should be used. + pub fn set_discovery_handler_connectivity_status( + &self, + endpoint: &DiscoveryHandlerEndpoint, + connectivity_status: DiscoveryHandlerStatus, + ) { + trace!("set_discovery_handler_connectivity_status - set status of {:?} for {} discovery handler at endpoint {:?}", connectivity_status, self.config.spec.discovery_handler.name, endpoint); + let mut registered_dh_map = self.discovery_handler_map.lock().unwrap(); + let discovery_handler_details_map = registered_dh_map + .get_mut(&self.config.spec.discovery_handler.name) + .unwrap(); + let dh_details = discovery_handler_details_map.get_mut(endpoint).unwrap(); + dh_details.connectivity_status = connectivity_status; + } + + /// This is called when no connection can be made with a discovery handler at its endpoint. + /// It takes action based on a Discovery Handler's (DH's) current `DiscoveryHandlerStatus`. + /// If `DiscoveryHandlerStatus::Waiting`, connectivity status changed to Offline. + /// If `DiscoveryHandlerStatus::Offline`, DH is removed from the `RegisteredDiscoveryHandlersMap` + /// if it have been offline for longer than the grace period. + /// If `DiscoveryHandlerStatus::Active`, this should not happen, Error is returned. + pub async fn mark_offline_or_deregister_discovery_handler( + &self, + endpoint: &DiscoveryHandlerEndpoint, + ) -> Result { + trace!("mark_offline_or_deregister_discovery_handler - {} discovery handler at endpoint {:?} is offline", self.config.spec.discovery_handler.name, endpoint); + let mut deregistered = false; + let mut registered_dh_map = self.discovery_handler_map.lock().unwrap(); + let discovery_handler_details_map = registered_dh_map + .get_mut(&self.config.spec.discovery_handler.name) + .unwrap(); + let dh_details = discovery_handler_details_map.get_mut(endpoint).unwrap(); + match dh_details.connectivity_status { + DiscoveryHandlerStatus::Offline(instant) => { + if instant.elapsed().as_secs() > DISCOVERY_HANDLER_OFFLINE_GRACE_PERIOD_SECS { + trace!("mark_offline_or_deregister_discovery_handler - de-registering {} discovery handler at endpoint {:?} since been offline for longer than 5 minutes", self.config.spec.discovery_handler.name, endpoint); + // Remove discovery handler from map if timed out + discovery_handler_details_map.remove(endpoint).unwrap(); + deregistered = true; + } + } + DiscoveryHandlerStatus::Waiting | DiscoveryHandlerStatus::Active => { + dh_details.connectivity_status = DiscoveryHandlerStatus::Offline(Instant::now()); + } + } + Ok(deregistered) + } + + /// Checks if any of this DiscoveryOperator's Configuration's Instances have been offline for too long. + /// If a non-local device has not come back online before `SHARED_INSTANCE_OFFLINE_GRACE_PERIOD_SECS`, + /// the associated Device Plugin and Instance are terminated and deleted, respectively. + pub async fn delete_offline_instances( + &self, + kube_interface: Arc>, + ) -> Result<(), Box> { + trace!( + "delete_offline_instances - entered for configuration {}", + self.config.metadata.name + ); + let kube_interface_clone = kube_interface.clone(); + let instance_map = self.instance_map.lock().await.clone(); + for (instance, instance_info) in instance_map.clone() { + if let InstanceConnectivityStatus::Offline(instant) = instance_info.connectivity_status + { + let time_offline = instant.elapsed().as_secs(); + // If instance has been offline for longer than the grace period or it is unshared, terminate the associated device plugin + // TODO: make grace period configurable + if time_offline >= SHARED_INSTANCE_OFFLINE_GRACE_PERIOD_SECS { + trace!("delete_offline_instances - instance {} has been offline too long ... terminating device plugin", instance); + device_plugin_service::terminate_device_plugin_service( + &instance, + self.instance_map.clone(), + ) + .await + .unwrap(); + k8s::try_delete_instance( + (*kube_interface_clone).as_ref(), + &instance, + self.config.metadata.namespace.as_ref().unwrap(), + ) + .await?; + } + } + } + Ok(()) + } + + /// Takes in a list of discovered devices and determines if there are any new devices or no longer visible devices. + /// For each new device, it creates a DevicePluginService. + /// For each previously visible device that was no longer discovered, it calls a function that updates the InstanceConnectivityStatus + /// of the instance or deletes it if it is a local device. + pub async fn handle_discovery_results( + &self, + kube_interface: Arc>, + discovery_results: Vec, + shared: bool, + device_plugin_builder: Box, + ) -> Result<(), Box> { + trace!( + "handle_discovery_results - for config {} with discovery results {:?}", + self.config.metadata.name, + discovery_results + ); + let currently_visible_instances: HashMap = discovery_results + .iter() + .map(|discovery_result| { + let id = generate_instance_digest(&discovery_result.id, shared); + let instance_name = get_device_instance_name(&id, &self.config.metadata.name); + (instance_name, discovery_result.clone()) + }) + .collect(); + INSTANCE_COUNT_METRIC + .with_label_values(&[&self.config.metadata.name, &shared.to_string()]) + .set(currently_visible_instances.len() as i64); + // Update the connectivity status of instances and return list of visible instances that don't have Instance CRs + let instance_map = self.instance_map.lock().await.clone(); + // Find all visible instances that do not have Instance CRDs yet + let new_discovery_results: Vec = currently_visible_instances + .iter() + .filter(|(name, _)| !instance_map.contains_key(*name)) + .map(|(_, p)| p.clone()) + .collect(); + self.update_instance_connectivity_status( + kube_interface, + currently_visible_instances, + shared, + ) + .await?; + + // If there are newly visible instances associated with a Config, make a device plugin and Instance CR for them + if !new_discovery_results.is_empty() { + for discovery_result in new_discovery_results { + let id = generate_instance_digest(&discovery_result.id, shared); + let instance_name = get_device_instance_name(&id, &self.config.metadata.name); + trace!( + "handle_discovery_results - new instance {} came online", + instance_name + ); + let instance_map = self.instance_map.clone(); + if let Err(e) = device_plugin_builder + .build_device_plugin( + instance_name, + &self.config, + shared, + instance_map, + discovery_result.clone(), + ) + .await + { + error!("handle_discovery_results - error {} building device plugin ... trying again on next iteration", e); + } + } + } + Ok(()) + } + + /// Takes in a list of currently visible instances and either updates an Instance's InstanceConnectivityStatus or deletes an Instance. + /// If a non-local/network based device is not longer visible it's InstanceConnectivityStatus is changed to Offline(time now). + /// The associated DevicePluginService checks its InstanceConnectivityStatus before sending a response back to kubelet + /// and will send all unhealthy devices if its status is Offline, preventing kubelet from allocating any more pods to it. + /// An Instance CRD is deleted and it's DevicePluginService shutdown if its: + /// (A) non-local Instance is still not visible after 5 minutes or (B) local instance is still not visible. + pub async fn update_instance_connectivity_status( + &self, + kube_interface: Arc>, + currently_visible_instances: HashMap, + shared: bool, + ) -> Result<(), Box> { + let instance_map = self.instance_map.lock().await.clone(); + for (instance, instance_info) in instance_map { + trace!( + "update_instance_connectivity_status - checking connectivity status of instance {}", + instance + ); + if currently_visible_instances.contains_key(&instance) { + let connectivity_status = instance_info.connectivity_status; + // If instance is visible, make sure connectivity status is (updated to be) Online + if let InstanceConnectivityStatus::Offline(_instant) = connectivity_status { + trace!( + "update_instance_connectivity_status - instance {} that was temporarily offline is back online", + instance + ); + let list_and_watch_message_sender = instance_info.list_and_watch_message_sender; + let updated_instance_info = InstanceInfo { + connectivity_status: InstanceConnectivityStatus::Online, + list_and_watch_message_sender: list_and_watch_message_sender.clone(), + }; + self.instance_map + .lock() + .await + .insert(instance.clone(), updated_instance_info); + // Signal list_and_watch to update kubelet that the devices are healthy. + list_and_watch_message_sender + .send(device_plugin_service::ListAndWatchMessageKind::Continue) + .unwrap(); + } else { + trace!( + "update_instance_connectivity_status - instance {} still online", + instance + ); + } + } else { + // If the instance is not visible: + // // If the instance is local, remove it + // // If the instance is not local + // // // If it has not already been labeled offline, label it + // // // If the instance has already been labeled offline + // // // remove instance from map if grace period has elapsed without the instance coming back online + let mut remove_instance = false; + match instance_info.connectivity_status { + InstanceConnectivityStatus::Online => { + if !shared { + remove_instance = true; + } else { + let sender = instance_info.list_and_watch_message_sender.clone(); + let updated_instance_info = InstanceInfo { + connectivity_status: InstanceConnectivityStatus::Offline( + Instant::now(), + ), + list_and_watch_message_sender: instance_info + .list_and_watch_message_sender + .clone(), + }; + self.instance_map + .lock() + .await + .insert(instance.clone(), updated_instance_info); + trace!( + "update_instance_connectivity_status - instance {} went offline ... starting timer and forcing list_and_watch to continue", + instance + ); + sender + .send(device_plugin_service::ListAndWatchMessageKind::Continue) + .unwrap(); + } + } + InstanceConnectivityStatus::Offline(instant) => { + let time_offline = instant.elapsed().as_secs(); + // If instance has been offline for longer than the grace period, terminate the associated device plugin + if time_offline >= SHARED_INSTANCE_OFFLINE_GRACE_PERIOD_SECS { + remove_instance = true; + } + } + } + if remove_instance { + trace!("update_instance_connectivity_status - instance {} has been offline too long ... terminating device plugin", instance); + device_plugin_service::terminate_device_plugin_service( + &instance, + self.instance_map.clone(), + ) + .await + .unwrap(); + k8s::try_delete_instance( + (*kube_interface).as_ref(), + &instance, + self.config.metadata.namespace.as_ref().unwrap(), + ) + .await + .unwrap(); + } + } + } + Ok(()) + } +} + +pub mod start_discovery { + use super::super::registration::{ + DiscoveryDetails, DiscoveryHandlerEndpoint, DiscoveryHandlerStatus, + }; + // Use this `mockall` macro to automate importing a mock type in test mode, or a real type otherwise. + #[double] + pub use super::DiscoveryOperator; + use super::StreamType; + use akri_shared::k8s; + use mockall_double::double; + use std::{sync::Arc, time::Duration}; + use tokio::sync::{broadcast, mpsc}; + + /// This is spawned as a task for each Configuration and continues to run + /// until the Configuration is deleted, at which point, this function is signaled to stop. + /// It consists of three subtasks: + /// 1) Initiates discovery on all already registered discovery handlers in the RegisteredDiscoveryHandlerMap + /// with the same discovery handler name as the Configuration (Configuration.discovery_handler.name). + /// 2) Listens for new discover handlers to come online for this Configuration and initiates discovery. + /// 3) Checks whether Offline Instances have exceeded their grace period, in which case it + /// deletes the Instance. + pub async fn start_discovery( + discovery_operator: DiscoveryOperator, + new_discovery_handler_sender: broadcast::Sender, + stop_all_discovery_sender: broadcast::Sender<()>, + finished_all_discovery_sender: &mut mpsc::Sender<()>, + kube_interface: Arc>, + ) -> Result<(), Box> { + let config = discovery_operator.get_config(); + info!( + "start_discovery - entered for {} discovery handler", + config.spec.discovery_handler.name + ); + let config_name = config.metadata.name.clone(); + let mut tasks = Vec::new(); + let discovery_operator = Arc::new(discovery_operator); + + // Call discover on already registered Discovery Handlers requested by this Configuration's + let known_dh_discovery_operator = discovery_operator.clone(); + let known_dh_kube_interface = kube_interface.clone(); + tasks.push(tokio::spawn(async move { + do_discover(known_dh_discovery_operator, known_dh_kube_interface) + .await + .unwrap(); + })); + + // Listen for new discovery handlers to call discover on + let mut stop_all_discovery_receiver = stop_all_discovery_sender.subscribe(); + let mut new_discovery_handler_receiver = new_discovery_handler_sender.subscribe(); + let new_dh_discovery_operator = discovery_operator.clone(); + tasks.push(tokio::spawn(async move { + listen_for_new_discovery_handlers( + new_dh_discovery_operator, + &mut new_discovery_handler_receiver, + &mut stop_all_discovery_receiver, + ) + .await + .unwrap(); + })); + + // Non-local devices are only allowed to be offline for `SHARED_INSTANCE_OFFLINE_GRACE_PERIOD_SECS` minutes before being removed. + // This task periodically checks if devices have been offline for too long. + let mut stop_all_discovery_receiver = stop_all_discovery_sender.subscribe(); + let offline_dh_discovery_operator = discovery_operator.clone(); + let offline_dh_kube_interface = kube_interface.clone(); + tasks.push(tokio::spawn(async move { + loop { + offline_dh_discovery_operator + .delete_offline_instances(offline_dh_kube_interface.clone()) + .await + .unwrap(); + if tokio::time::timeout( + Duration::from_secs(30), + stop_all_discovery_receiver.recv(), + ) + .await.is_ok() + { + trace!("start_discovery - received message to stop checking connectivity status for configuration {}", config_name); + break; + } + } + })); + futures::future::try_join_all(tasks).await?; + finished_all_discovery_sender.send(()).await?; + Ok(()) + } + + /// Waits to be notified of new discovery handlers. If the discovery handler does discovery for this Configuration, + /// discovery is kicked off. + async fn listen_for_new_discovery_handlers( + discovery_operator: Arc, + new_discovery_handler_receiver: &mut broadcast::Receiver, + stop_all_discovery_receiver: &mut broadcast::Receiver<()>, + ) -> Result<(), Box> { + let mut discovery_tasks = Vec::new(); + loop { + tokio::select! { + _ = stop_all_discovery_receiver.recv() => { + trace!("listen_for_new_discovery_handlers - received message to stop discovery for configuration {}", discovery_operator.get_config().metadata.name); + discovery_operator.stop_all_discovery().await; + break; + }, + result = new_discovery_handler_receiver.recv() => { + // Check if it is one of this Configuration's discovery handlers + if let Ok(discovery_handler_name) = result { + if discovery_handler_name == discovery_operator.get_config().spec.discovery_handler.name { + trace!("listen_for_new_discovery_handlers - received new registered discovery handler for configuration {}", discovery_operator.get_config().metadata.name); + let new_discovery_operator = discovery_operator.clone(); + discovery_tasks.push(tokio::spawn(async move { + do_discover(new_discovery_operator, Arc::new(Box::new(k8s::create_kube_interface()))).await.unwrap(); + })); + } + } + } + } + } + // Wait for all discovery handlers to complete discovery + futures::future::try_join_all(discovery_tasks).await?; + Ok(()) + } + + /// A Configuration specifies the name of `DiscoveryHandlers` that should be utilized for discovery. + /// This tries to establish connection with each `DiscoveryHandler` registered under the requested + /// `DiscoveryHandler` name and spawns a discovery thread for each connection. + /// This function also manages the `DiscoveryHandlerStatus` of each `DiscoveryHandler` as follows: + /// /// `DiscoveryHandlerStatus::Active` if a connection is established via a call to get_stream + /// /// `DiscoveryHandlerStatus::Waitin`g after a connection has finished due to either being signaled to stop connecting + /// /// or an error being returned from the discovery handler (that is not a broken pipe) + /// /// `DiscoveryHandlerStatus::Offline` if a connection cannot be established via a call to get_stream + /// If a connection cannot be established, continues to try, sleeping between iteration. + /// Removes the discovery handler from the `RegisteredDiscoveryHandlerMap` if it has been offline for longer than the grace period. + pub async fn do_discover( + discovery_operator: Arc, + kube_interface: Arc>, + ) -> Result<(), Box> { + let mut discovery_tasks = Vec::new(); + let config = discovery_operator.get_config(); + trace!( + "do_discover - entered for {} discovery handler", + config.spec.discovery_handler.name + ); + // get clone of map + let mut discovery_handler_map = discovery_operator + .get_discovery_handler_map() + .lock() + .unwrap() + .clone(); + trace!( + "do_discover - discovery_handler_map is {:?}", + discovery_handler_map + ); + if let Some(discovery_handler_details_map) = + discovery_handler_map.get_mut(&config.spec.discovery_handler.name) + { + for (endpoint, dh_details) in discovery_handler_details_map.clone() { + trace!( + "do_discover - for {} discovery handler at endpoint {:?}", + config.spec.discovery_handler.name, + endpoint + ); + // Only use DiscoveryHandler if it doesn't have a client yet + if dh_details.connectivity_status != DiscoveryHandlerStatus::Active { + trace!( + "do_discover - {} discovery handler at endpoint {:?} doesn't have client", + config.spec.discovery_handler.name, + endpoint + ); + let discovery_operator = discovery_operator.clone(); + let kube_interface = kube_interface.clone(); + discovery_tasks.push(tokio::spawn(async move { + do_discover_on_discovery_handler( + discovery_operator.clone(), + kube_interface.clone(), + &endpoint, + &dh_details, + ) + .await + .unwrap(); + })); + } + } + } + futures::future::try_join_all(discovery_tasks).await?; + Ok(()) + } + + /// Try to connect to discovery handler until connection has been established or grace period has passed + async fn do_discover_on_discovery_handler<'a>( + discovery_operator: Arc, + kube_interface: Arc>, + endpoint: &'a DiscoveryHandlerEndpoint, + dh_details: &'a DiscoveryDetails, + ) -> Result<(), Box> { + loop { + let deregistered; + match discovery_operator.get_stream(&endpoint).await { + Some(stream_type) => { + // Since connection was established, be sure that the Discovery Handler is marked as having a client + discovery_operator.set_discovery_handler_connectivity_status( + &endpoint, + DiscoveryHandlerStatus::Active, + ); + match stream_type { + StreamType::External(mut stream) => { + match discovery_operator + .internal_do_discover( + kube_interface.clone(), + &dh_details, + &mut stream, + ) + .await + { + Ok(_) => { + discovery_operator.set_discovery_handler_connectivity_status( + &endpoint, + DiscoveryHandlerStatus::Waiting, + ); + break; + } + Err(status) => { + if status.message().contains("broken pipe") { + // Mark all associated instances as offline + error!("do_discover_on_discovery_handler - connection with Discovery Handler dropped with status {:?}. Marking all instances offline.", status); + discovery_operator + .update_instance_connectivity_status( + kube_interface.clone(), + std::collections::HashMap::new(), + dh_details.shared, + ) + .await?; + deregistered = discovery_operator + .mark_offline_or_deregister_discovery_handler(&endpoint) + .await + .unwrap(); + } else { + trace!("do_discover_on_discovery_handler - Discovery Handlers returned error status {}. Marking all instances offline.", status); + // TODO: Possibly mark config as invalid + // Mark all associated instances as offline by declaring no visible instances + discovery_operator + .update_instance_connectivity_status( + kube_interface.clone(), + std::collections::HashMap::new(), + dh_details.shared, + ) + .await?; + discovery_operator + .set_discovery_handler_connectivity_status( + &endpoint, + DiscoveryHandlerStatus::Waiting, + ); + break; + } + } + } + } + StreamType::Embedded(mut stream) => { + discovery_operator + .internal_do_discover( + kube_interface.clone(), + &dh_details, + &mut stream, + ) + .await + .unwrap(); + discovery_operator.set_discovery_handler_connectivity_status( + &endpoint, + DiscoveryHandlerStatus::Waiting, + ); + break; + } + } + } + None => { + deregistered = discovery_operator + .mark_offline_or_deregister_discovery_handler(&endpoint) + .await + .unwrap(); + } + } + if deregistered { + break; + } else { + // If a connection cannot be established with the Discovery Handler, it will sleep and try again. + // This continues until connection established, the Discovery Handler is deregistered due to grace period elapsing, + // or the Discovery Handler is told to stop discovery. + let mut stop_discovery_receiver = + dh_details.close_discovery_handler_connection.subscribe(); + let mut sleep_duration = Duration::from_secs(60); + if cfg!(test) { + sleep_duration = Duration::from_millis(100); + } + + if tokio::time::timeout(sleep_duration, stop_discovery_receiver.recv()) + .await + .is_ok() + { + trace!("do_discover_on_discovery_handler - received message to stop discovery for {} Discovery Handler at endpoint {:?}", dh_details.name, dh_details.endpoint); + break; + } + } + } + Ok(()) + } +} + +/// Generates an digest of an Instance's id. There should be a unique digest and Instance for each discovered device. +/// This means that the id of non-local devices that could be visible to multiple nodes should always resolve +/// to the same instance name (which is suffixed with this digest). +/// However, local devices' Instances should have unique hashes even if they have the same id. +/// To ensure this, the node's name is added to the id before it is hashed. +pub fn generate_instance_digest(id_to_digest: &str, shared: bool) -> String { + let env_var_query = ActualEnvVarQuery {}; + inner_generate_instance_digest(id_to_digest, shared, &env_var_query) +} + +pub fn inner_generate_instance_digest( + id_to_digest: &str, + shared: bool, + query: &impl EnvVarQuery, +) -> String { + let mut id_to_digest = id_to_digest.to_string(); + // For local devices, include node hostname in id_to_digest so instances have unique names + if !shared { + id_to_digest = format!( + "{}{}", + &id_to_digest, + query.get_env_var("AGENT_NODE_NAME").unwrap() + ); + } + let mut digest = String::new(); + let mut hasher = VarBlake2b::new(3).unwrap(); + hasher.update(id_to_digest); + hasher.finalize_variable(|var| { + digest = var + .iter() + .map(|num| format!("{:02x}", num)) + .collect::>() + .join("") + }); + digest +} + +#[cfg(test)] +pub mod tests { + use super::super::{ + device_plugin_builder::MockDevicePluginBuilderInterface, + registration::{ + inner_register_embedded_discovery_handlers, DiscoveryDetails, DiscoveryHandlerStatus, + }, + }; + use super::*; + use akri_discovery_utils::discovery::mock_discovery_handler; + use akri_shared::{ + akri::configuration::KubeAkriConfig, k8s::MockKubeInterface, os::env_var::MockEnvVarQuery, + }; + use mock_instant::{Instant, MockClock}; + use mockall::Sequence; + use std::time::Duration; + use tokio::sync::broadcast; + + pub async fn build_instance_map( + config: &KubeAkriConfig, + visible_discovery_results: &mut Vec, + list_and_watch_message_receivers: &mut Vec< + broadcast::Receiver, + >, + connectivity_status: InstanceConnectivityStatus, + ) -> InstanceMap { + let device1 = Device { + id: "filter1".to_string(), + properties: HashMap::new(), + mounts: Vec::default(), + device_specs: Vec::default(), + }; + let device2 = Device { + id: "filter2".to_string(), + properties: HashMap::new(), + mounts: Vec::default(), + device_specs: Vec::default(), + }; + let discovery_results = vec![device1, device2]; + *visible_discovery_results = discovery_results.clone(); + generate_instance_map( + discovery_results, + list_and_watch_message_receivers, + connectivity_status, + &config.metadata.name, + ) + } + + fn generate_instance_map( + discovery_results: Vec, + list_and_watch_message_receivers: &mut Vec< + broadcast::Receiver, + >, + connectivity_status: InstanceConnectivityStatus, + config_name: &str, + ) -> InstanceMap { + Arc::new(tokio::sync::Mutex::new( + discovery_results + .iter() + .map(|device| { + let (list_and_watch_message_sender, list_and_watch_message_receiver) = + broadcast::channel(2); + list_and_watch_message_receivers.push(list_and_watch_message_receiver); + let instance_name = get_device_instance_name(&device.id, &config_name); + ( + instance_name, + InstanceInfo { + list_and_watch_message_sender, + connectivity_status: connectivity_status.clone(), + }, + ) + }) + .collect(), + )) + } + + fn create_mock_discovery_operator( + discovery_handler_map: RegisteredDiscoveryHandlerMap, + config: KubeAkriConfig, + instance_map: InstanceMap, + ) -> MockDiscoveryOperator { + let ctx = MockDiscoveryOperator::new_context(); + let discovery_handler_map_clone = discovery_handler_map.clone(); + let config_clone = config.clone(); + let instance_map_clone = instance_map.clone(); + ctx.expect().return_once(move |_, _, _| { + // let mut discovery_handler_status_seq = Sequence::new(); + let mut mock = MockDiscoveryOperator::default(); + mock.expect_get_discovery_handler_map() + .returning(move || discovery_handler_map_clone.clone()); + mock.expect_get_config() + .returning(move || config_clone.clone()); + mock.expect_get_instance_map() + .returning(move || instance_map_clone.clone()); + mock + }); + let mock = MockDiscoveryOperator::new(discovery_handler_map, config, instance_map); + mock + } + + // Creates a discovery handler with specified properties and adds it to the RegisteredDiscoveryHandlerMap. + pub fn add_discovery_handler_to_map( + dh_name: &str, + endpoint: &DiscoveryHandlerEndpoint, + shared: bool, + registered_dh_map: RegisteredDiscoveryHandlerMap, + ) { + let discovery_handler_details = + create_discovery_handler_details(dh_name, endpoint.clone(), shared); + // Add discovery handler to registered discovery handler map + let dh_details_map = match registered_dh_map.lock().unwrap().clone().get_mut(dh_name) { + Some(dh_details_map) => { + dh_details_map.insert(endpoint.clone(), discovery_handler_details); + dh_details_map.clone() + } + None => { + let mut dh_details_map = HashMap::new(); + dh_details_map.insert(endpoint.clone(), discovery_handler_details); + dh_details_map + } + }; + registered_dh_map + .lock() + .unwrap() + .insert(dh_name.to_string(), dh_details_map); + } + + fn create_discovery_handler_details( + name: &str, + endpoint: DiscoveryHandlerEndpoint, + shared: bool, + ) -> DiscoveryDetails { + let (close_discovery_handler_connection, _) = broadcast::channel(2); + DiscoveryDetails { + name: name.to_string(), + endpoint, + shared, + close_discovery_handler_connection: close_discovery_handler_connection.clone(), + connectivity_status: DiscoveryHandlerStatus::Waiting, + } + } + + fn setup_test_do_discover() -> (MockDiscoveryOperator, RegisteredDiscoveryHandlerMap) { + let discovery_handler_map = Arc::new(std::sync::Mutex::new(HashMap::new())); + add_discovery_handler_to_map( + "debugEcho", + &DiscoveryHandlerEndpoint::Uds("socket.sock".to_string()), + false, + discovery_handler_map.clone(), + ); + + // Build discovery operator + let path_to_config = "../test/yaml/config-a.yaml"; + let config_yaml = std::fs::read_to_string(path_to_config).expect("Unable to read file"); + let config: KubeAkriConfig = serde_yaml::from_str(&config_yaml).unwrap(); + let discovery_operator = create_mock_discovery_operator( + discovery_handler_map.clone(), + config, + Arc::new(tokio::sync::Mutex::new(HashMap::new())), + ); + (discovery_operator, discovery_handler_map) + } + + #[test] + fn test_generate_instance_digest() { + let mut mock_env_var_a = MockEnvVarQuery::new(); + mock_env_var_a + .expect_get_env_var() + .returning(|_| Ok("node-a".to_string())); + let id = "video1"; + let first_unshared_video_digest = + inner_generate_instance_digest(id, false, &mock_env_var_a); + let first_shared_video_digest = inner_generate_instance_digest(id, true, &mock_env_var_a); + let mut mock_env_var_b = MockEnvVarQuery::new(); + mock_env_var_b + .expect_get_env_var() + .returning(|_| Ok("node-b".to_string())); + let second_unshared_video_digest = + inner_generate_instance_digest(id, false, &mock_env_var_b); + let second_shared_video_digest = inner_generate_instance_digest(id, true, &mock_env_var_b); + // unshared instances visible to different nodes should NOT have the same digest + assert_ne!(first_unshared_video_digest, second_unshared_video_digest); + // shared instances visible to different nodes should have the same digest + assert_eq!(first_shared_video_digest, second_shared_video_digest); + } + + #[tokio::test] + async fn test_stop_all_discovery() { + let dh_name = "debugEcho"; + let discovery_handler_map = Arc::new(std::sync::Mutex::new(HashMap::new())); + let endpoint1 = DiscoveryHandlerEndpoint::Uds("socket.sock".to_string()); + add_discovery_handler_to_map(dh_name, &endpoint1, false, discovery_handler_map.clone()); + let mut close_discovery_handler_connection_receiver1 = discovery_handler_map + .lock() + .unwrap() + .get(dh_name) + .unwrap() + .get(&endpoint1) + .unwrap() + .close_discovery_handler_connection + .subscribe(); + let endpoint2 = DiscoveryHandlerEndpoint::Uds("socket2.sock".to_string()); + add_discovery_handler_to_map(dh_name, &endpoint2, false, discovery_handler_map.clone()); + let mut close_discovery_handler_connection_receiver2 = discovery_handler_map + .lock() + .unwrap() + .get(dh_name) + .unwrap() + .get(&endpoint2) + .unwrap() + .close_discovery_handler_connection + .subscribe(); + let path_to_config = "../test/yaml/config-a.yaml"; + let config_yaml = std::fs::read_to_string(path_to_config).expect("Unable to read file"); + let config: KubeAkriConfig = serde_yaml::from_str(&config_yaml).unwrap(); + let discovery_operator = Arc::new(DiscoveryOperator::new( + discovery_handler_map, + config, + Arc::new(tokio::sync::Mutex::new(HashMap::new())), + )); + tokio::spawn(async move { + discovery_operator.stop_all_discovery().await; + }); + assert!(close_discovery_handler_connection_receiver1 + .recv() + .await + .is_ok()); + assert!(close_discovery_handler_connection_receiver2 + .recv() + .await + .is_ok()); + } + + #[tokio::test] + async fn test_start_discovery_termination() { + let _ = env_logger::builder().is_test(true).try_init(); + let (mut mock_discovery_operator, discovery_handler_map) = setup_test_do_discover(); + let (marked_offline_sender, mut marked_offline_receiver) = + tokio::sync::broadcast::channel(1); + mock_discovery_operator + .expect_get_stream() + .returning(|_| None); + mock_discovery_operator + .expect_mark_offline_or_deregister_discovery_handler() + .withf(move |endpoint: &DiscoveryHandlerEndpoint| { + endpoint == &DiscoveryHandlerEndpoint::Uds("socket.sock".to_string()) + }) + .returning(move |_| { + marked_offline_sender.clone().send(()).unwrap(); + Ok(false) + }); + mock_discovery_operator + .expect_delete_offline_instances() + .times(1) + .returning(move |_| Ok(())); + let stop_dh_discovery_sender = discovery_handler_map + .lock() + .unwrap() + .get_mut("debugEcho") + .unwrap() + .clone() + .get(&DiscoveryHandlerEndpoint::Uds("socket.sock".to_string())) + .unwrap() + .clone() + .close_discovery_handler_connection + .clone(); + mock_discovery_operator + .expect_stop_all_discovery() + .times(1) + .returning(move || { + stop_dh_discovery_sender.clone().send(()).unwrap(); + }); + let (new_dh_sender, _) = broadcast::channel(2); + let (stop_all_discovery_sender, _) = broadcast::channel(2); + let (mut finished_discovery_sender, mut finished_discovery_receiver) = + tokio::sync::mpsc::channel(2); + let thread_new_dh_sender = new_dh_sender.clone(); + let thread_stop_all_discovery_sender = stop_all_discovery_sender.clone(); + let mock_kube_interface: Arc> = + Arc::new(Box::new(MockKubeInterface::new())); + let handle = tokio::spawn(async move { + start_discovery::start_discovery( + mock_discovery_operator, + thread_new_dh_sender, + thread_stop_all_discovery_sender, + &mut finished_discovery_sender, + mock_kube_interface, + ) + .await + .unwrap(); + }); + + // Wait until do_discovery has gotten to point the DH marked offline + marked_offline_receiver.recv().await.unwrap(); + stop_all_discovery_sender.send(()).unwrap(); + finished_discovery_receiver.recv().await.unwrap(); + // Make sure that all threads have finished + handle.await.unwrap(); + } + + // Test that DH is connected to on second try getting stream and + // that connectivity status is changed from Waiting -> Active -> Waiting again + // when a successful connection is made and completed. + #[tokio::test] + async fn test_do_discover_completed_internal_connection() { + let _ = env_logger::builder().is_test(true).try_init(); + let (mut mock_discovery_operator, _) = setup_test_do_discover(); + let mut get_stream_seq = Sequence::new(); + // First time cannot get stream and is marked offline + mock_discovery_operator + .expect_get_stream() + .times(1) + .returning(|_| None) + .in_sequence(&mut get_stream_seq); + mock_discovery_operator + .expect_mark_offline_or_deregister_discovery_handler() + .withf(move |endpoint: &DiscoveryHandlerEndpoint| { + endpoint == &DiscoveryHandlerEndpoint::Uds("socket.sock".to_string()) + }) + .times(1) + .returning(|_| Ok(false)); + // Second time successfully get stream + let (_, rx) = mpsc::channel(2); + let stream_type = Some(StreamType::Embedded(rx)); + mock_discovery_operator + .expect_get_stream() + .times(1) + .return_once(move |_| stream_type) + .in_sequence(&mut get_stream_seq); + // Make sure discovery handler is marked as Active + let mut discovery_handler_status_seq = Sequence::new(); + mock_discovery_operator + .expect_set_discovery_handler_connectivity_status() + .withf( + move |endpoint: &DiscoveryHandlerEndpoint, + connectivity_status: &DiscoveryHandlerStatus| { + endpoint == &DiscoveryHandlerEndpoint::Uds("socket.sock".to_string()) + && connectivity_status == &DiscoveryHandlerStatus::Active + }, + ) + .times(1) + .returning(|_, _| ()) + .in_sequence(&mut discovery_handler_status_seq); + // Discovery should be initiated + mock_discovery_operator + .expect_internal_do_discover() + .times(1) + .returning(|_, _, _| Ok(())); + // Make sure after discovery is complete that the DH is marked Online again + mock_discovery_operator + .expect_set_discovery_handler_connectivity_status() + .withf( + move |endpoint: &DiscoveryHandlerEndpoint, + connectivity_status: &DiscoveryHandlerStatus| { + endpoint == &DiscoveryHandlerEndpoint::Uds("socket.sock".to_string()) + && connectivity_status == &DiscoveryHandlerStatus::Waiting + }, + ) + .times(1) + .returning(|_, _| ()) + .in_sequence(&mut discovery_handler_status_seq); + let mock_kube_interface: Arc> = + Arc::new(Box::new(MockKubeInterface::new())); + start_discovery::do_discover(Arc::new(mock_discovery_operator), mock_kube_interface) + .await + .unwrap(); + } + + #[tokio::test] + async fn test_handle_discovery_results() { + let _ = env_logger::builder().is_test(true).try_init(); + // Set node name for generating instance id + std::env::set_var("AGENT_NODE_NAME", "node-a"); + let mock_kube_interface: Arc> = + Arc::new(Box::new(MockKubeInterface::new())); + let discovery_handler_map: RegisteredDiscoveryHandlerMap = + Arc::new(std::sync::Mutex::new(HashMap::new())); + let path_to_config = "../test/yaml/config-a.yaml"; + let config_yaml = std::fs::read_to_string(path_to_config).expect("Unable to read file"); + let config: KubeAkriConfig = serde_yaml::from_str(&config_yaml).unwrap(); + let config_name = config.metadata.name.clone(); + INSTANCE_COUNT_METRIC + .with_label_values(&[&config_name, "true"]) + .set(0); + let device1 = Device { + id: "device1".to_string(), + properties: HashMap::new(), + mounts: Vec::default(), + device_specs: Vec::default(), + }; + let device2 = Device { + id: "device2".to_string(), + properties: HashMap::new(), + mounts: Vec::default(), + device_specs: Vec::default(), + }; + let discovery_results: Vec = vec![device1, device2]; + let discovery_operator = Arc::new(DiscoveryOperator::new( + discovery_handler_map, + config, + Arc::new(tokio::sync::Mutex::new(HashMap::new())), + )); + let mut mock_device_plugin_builder = MockDevicePluginBuilderInterface::new(); + mock_device_plugin_builder + .expect_build_device_plugin() + .times(2) + .returning(move |_, _, _, _, _| Ok(())); + discovery_operator + .handle_discovery_results( + mock_kube_interface, + discovery_results, + true, + Box::new(mock_device_plugin_builder), + ) + .await + .unwrap(); + + assert_eq!( + INSTANCE_COUNT_METRIC + .with_label_values(&[&config_name, "true"]) + .get(), + 2 + ); + } + + // Checks either that InstanceConnectivityStatus changed to expected value until success or exceeded tries + // or that all instances have been deleted from map. + // Sleep between tries to give update_instance_connectivity_status the chance chance to grab mutex InstanceMap. + async fn check_status_or_empty_loop( + status: InstanceConnectivityStatus, + equality: bool, + instance_map: InstanceMap, + check_empty: bool, + ) { + let mut keep_looping = false; + let mut map_is_empty = false; + let tries: i8 = 5; + for _x in 0..tries { + println!("try number {}", _x); + keep_looping = false; + tokio::time::delay_for(Duration::from_millis(100)).await; + let unwrapped_instance_map = instance_map.lock().await.clone(); + if check_empty && unwrapped_instance_map.is_empty() { + map_is_empty = true; + break; + } + for (_, instance_info) in unwrapped_instance_map { + if instance_info.connectivity_status != status && equality { + keep_looping = true; + } + if instance_info.connectivity_status == status && !equality { + keep_looping = true; + } + } + if !keep_looping { + break; + } + } + if keep_looping { + panic!( + "failed to assert that all instances had status equal T/F: [{}] to status [{:?}]", + equality, status + ); + } + if check_empty && !map_is_empty { + panic!("instances were not cleared from map"); + } + } + + #[tokio::test] + async fn test_delete_offline_instances() { + let _ = env_logger::builder().is_test(true).try_init(); + let path_to_config = "../test/yaml/config-a.yaml"; + let config_yaml = std::fs::read_to_string(path_to_config).expect("Unable to read file"); + let config: KubeAkriConfig = serde_yaml::from_str(&config_yaml).unwrap(); + let mut list_and_watch_message_receivers = Vec::new(); + let discovery_handler_map: RegisteredDiscoveryHandlerMap = + Arc::new(std::sync::Mutex::new(HashMap::new())); + let mut visible_discovery_results = Vec::new(); + + // Assert no action (to delete instances by mock kube interface) is taken for all online instances + let instance_map: InstanceMap = build_instance_map( + &config, + &mut visible_discovery_results, + &mut list_and_watch_message_receivers, + InstanceConnectivityStatus::Online, + ) + .await; + let mock = MockKubeInterface::new(); + let discovery_operator = Arc::new(DiscoveryOperator::new( + discovery_handler_map.clone(), + config.clone(), + instance_map, + )); + discovery_operator + .delete_offline_instances(Arc::new(Box::new(mock))) + .await + .unwrap(); + + // Assert no action (to delete instances by mock kube interface) is taken for instances offline for less than grace period + let mock_now = Instant::now(); + MockClock::advance(Duration::from_secs(30)); + let instance_map: InstanceMap = build_instance_map( + &config, + &mut visible_discovery_results, + &mut list_and_watch_message_receivers, + InstanceConnectivityStatus::Offline(mock_now), + ) + .await; + let mock = MockKubeInterface::new(); + let discovery_operator = Arc::new(DiscoveryOperator::new( + discovery_handler_map.clone(), + config.clone(), + instance_map, + )); + discovery_operator + .delete_offline_instances(Arc::new(Box::new(mock))) + .await + .unwrap(); + + // Assert that all instances that have been offline for more than 5 minutes are deleted + let mock_now = Instant::now(); + MockClock::advance(Duration::from_secs(301)); + let instance_map: InstanceMap = build_instance_map( + &config, + &mut visible_discovery_results, + &mut list_and_watch_message_receivers, + InstanceConnectivityStatus::Offline(mock_now), + ) + .await; + let mut mock = MockKubeInterface::new(); + mock.expect_delete_instance() + .times(2) + .returning(move |_, _| Ok(())); + let discovery_operator = Arc::new(DiscoveryOperator::new( + discovery_handler_map.clone(), + config.clone(), + instance_map.clone(), + )); + discovery_operator + .delete_offline_instances(Arc::new(Box::new(mock))) + .await + .unwrap(); + // Make sure all instances are deleted from map. Note, first 3 arguments are ignored. + check_status_or_empty_loop(InstanceConnectivityStatus::Online, true, instance_map, true) + .await; + } + + // 1: InstanceConnectivityStatus of all instances that go offline is changed from Online to Offline + // 2: InstanceConnectivityStatus of shared instances that come back online in under 5 minutes is changed from Offline to Online + // 3: InstanceConnectivityStatus of unshared instances that come back online before next periodic discovery is changed from Offline to Online + #[tokio::test(core_threads = 2)] + async fn test_update_instance_connectivity_status_factory() { + let _ = env_logger::builder().is_test(true).try_init(); + let path_to_config = "../test/yaml/config-a.yaml"; + let config_yaml = std::fs::read_to_string(path_to_config).expect("Unable to read file"); + let config: KubeAkriConfig = serde_yaml::from_str(&config_yaml).unwrap(); + let config_name = config.metadata.name.clone(); + let mut list_and_watch_message_receivers = Vec::new(); + let mut visible_discovery_results = Vec::new(); + let discovery_handler_map: RegisteredDiscoveryHandlerMap = + Arc::new(std::sync::Mutex::new(HashMap::new())); + let discovery_handler_map_clone = discovery_handler_map.clone(); + // set environment variable to set whether debug echo instances are shared + let mut mock_env_var_shared = MockEnvVarQuery::new(); + mock_env_var_shared + .expect_get_env_var() + .returning(|_| Ok("false".to_string())); + inner_register_embedded_discovery_handlers( + discovery_handler_map_clone, + &mock_env_var_shared, + ) + .unwrap(); + + // + // 1: Assert that InstanceConnectivityStatus of non local instances that are no longer visible is changed to Offline + // + let instance_map: InstanceMap = build_instance_map( + &config, + &mut visible_discovery_results, + &mut list_and_watch_message_receivers, + InstanceConnectivityStatus::Online, + ) + .await; + let shared = true; + run_update_instance_connectivity_status( + config.clone(), + HashMap::new(), + shared, + instance_map.clone(), + discovery_handler_map.clone(), + MockKubeInterface::new(), + ) + .await; + + // Check that no instances are still online + check_status_or_empty_loop( + InstanceConnectivityStatus::Online, + false, + instance_map, + false, + ) + .await; + + // + // 2: Assert that InstanceConnectivityStatus of shared instances that come back online in <5 mins is changed to Online + // + let mock_now = Instant::now(); + MockClock::advance(Duration::from_secs(30)); + let instance_map: InstanceMap = build_instance_map( + &config, + &mut visible_discovery_results, + &mut list_and_watch_message_receivers, + InstanceConnectivityStatus::Offline(mock_now), + ) + .await; + let currently_visible_instances: HashMap = visible_discovery_results + .iter() + .map(|device| { + let instance_name = get_device_instance_name(&device.id, &config_name); + (instance_name, device.clone()) + }) + .collect(); + let shared = true; + run_update_instance_connectivity_status( + config.clone(), + currently_visible_instances.clone(), + shared, + instance_map.clone(), + discovery_handler_map.clone(), + MockKubeInterface::new(), + ) + .await; + + // Check that all instances marked online + check_status_or_empty_loop( + InstanceConnectivityStatus::Online, + true, + instance_map, + false, + ) + .await; + + // + // 3: Assert that shared instances that are offline for more than 5 minutes are removed from the instance map + // + let mock_now = Instant::now(); + MockClock::advance(Duration::from_secs(301)); + let instance_map: InstanceMap = build_instance_map( + &config, + &mut visible_discovery_results, + &mut list_and_watch_message_receivers, + InstanceConnectivityStatus::Offline(mock_now), + ) + .await; + let mut mock = MockKubeInterface::new(); + mock.expect_delete_instance() + .times(2) + .returning(move |_, _| Ok(())); + let shared = true; + run_update_instance_connectivity_status( + config.clone(), + HashMap::new(), + shared, + instance_map.clone(), + discovery_handler_map.clone(), + mock, + ) + .await; + // Make sure all instances are deleted from map. Note, first 3 arguments are ignored. + check_status_or_empty_loop(InstanceConnectivityStatus::Online, true, instance_map, true) + .await; + + // + // 4: Assert that local devices that go offline are removed from the instance map + // + let mut mock = MockKubeInterface::new(); + mock.expect_delete_instance() + .times(2) + .returning(move |_, _| Ok(())); + + let instance_map: InstanceMap = build_instance_map( + &config, + &mut visible_discovery_results, + &mut list_and_watch_message_receivers, + InstanceConnectivityStatus::Online, + ) + .await; + let shared = false; + run_update_instance_connectivity_status( + config, + HashMap::new(), + shared, + instance_map.clone(), + discovery_handler_map.clone(), + mock, + ) + .await; + // Make sure all instances are deleted from map. Note, first 3 arguments are ignored. + check_status_or_empty_loop(InstanceConnectivityStatus::Online, true, instance_map, true) + .await; + } + + async fn run_update_instance_connectivity_status( + config: KubeAkriConfig, + currently_visible_instances: HashMap, + shared: bool, + instance_map: InstanceMap, + discovery_handler_map: RegisteredDiscoveryHandlerMap, + mock: MockKubeInterface, + ) { + let discovery_operator = Arc::new(DiscoveryOperator::new( + discovery_handler_map, + config, + instance_map.clone(), + )); + discovery_operator + .update_instance_connectivity_status( + Arc::new(Box::new(mock)), + currently_visible_instances, + shared, + ) + .await + .unwrap(); + } + + fn setup_non_mocked_dh( + dh_name: &str, + endpoint: &DiscoveryHandlerEndpoint, + ) -> DiscoveryOperator { + let path_to_config = "../test/yaml/config-a.yaml"; + let config_yaml = std::fs::read_to_string(path_to_config).expect("Unable to read file"); + let config: KubeAkriConfig = serde_yaml::from_str(&config_yaml).unwrap(); + let discovery_handler_map = Arc::new(std::sync::Mutex::new(HashMap::new())); + add_discovery_handler_to_map(dh_name, endpoint, false, discovery_handler_map.clone()); + DiscoveryOperator::new( + discovery_handler_map, + config, + Arc::new(tokio::sync::Mutex::new(HashMap::new())), + ) + } + + #[tokio::test] + async fn test_set_discovery_handler_connectivity_status() { + let _ = env_logger::builder().is_test(true).try_init(); + let discovery_handler_name = "debugEcho"; + let endpoint = DiscoveryHandlerEndpoint::Uds("socket.sock".to_string()); + let discovery_operator = setup_non_mocked_dh(discovery_handler_name, &endpoint); + // Test that an online discovery handler is marked Active + discovery_operator + .set_discovery_handler_connectivity_status(&endpoint, DiscoveryHandlerStatus::Active); + assert_eq!( + discovery_operator + .discovery_handler_map + .lock() + .unwrap() + .get_mut(discovery_handler_name) + .unwrap() + .clone() + .get(&endpoint) + .unwrap() + .clone() + .connectivity_status, + DiscoveryHandlerStatus::Active + ); + } + + #[tokio::test] + async fn test_mark_offline_or_deregister_discovery_handler() { + let _ = env_logger::builder().is_test(true).try_init(); + let discovery_handler_name = "debugEcho"; + let endpoint = DiscoveryHandlerEndpoint::Uds("socket.sock".to_string()); + let discovery_operator = setup_non_mocked_dh(discovery_handler_name, &endpoint); + // Test that an online discovery handler is marked offline + assert_eq!( + discovery_operator + .mark_offline_or_deregister_discovery_handler(&endpoint) + .await + .unwrap(), + false + ); + if let DiscoveryHandlerStatus::Offline(_) = discovery_operator + .discovery_handler_map + .lock() + .unwrap() + .get_mut(discovery_handler_name) + .unwrap() + .clone() + .get(&endpoint) + .unwrap() + .clone() + .connectivity_status + { + // expected + } else { + panic!("DiscoveryHandlerStatus should be changed to offline"); + } + // Test that an offline discovery handler IS NOT deregistered if the time has not passed + assert_eq!( + discovery_operator + .mark_offline_or_deregister_discovery_handler(&endpoint) + .await + .unwrap(), + false + ); + + // Test that an offline discovery handler IS deregistered if the time has passed + let mock_now = Instant::now(); + MockClock::advance(Duration::from_secs(301)); + discovery_operator + .discovery_handler_map + .lock() + .unwrap() + .get_mut(discovery_handler_name) + .unwrap() + .get_mut(&endpoint) + .unwrap() + .connectivity_status = DiscoveryHandlerStatus::Offline(mock_now); + assert_eq!( + discovery_operator + .mark_offline_or_deregister_discovery_handler(&endpoint) + .await + .unwrap(), + true + ); + } + + #[tokio::test] + async fn test_get_stream_embedded() { + let _ = env_logger::builder().is_test(true).try_init(); + std::env::set_var(super::super::constants::ENABLE_DEBUG_ECHO_LABEL, "yes"); + let path_to_config = "../test/yaml/config-a.yaml"; + let config_yaml = std::fs::read_to_string(path_to_config).expect("Unable to read file"); + let config: KubeAkriConfig = serde_yaml::from_str(&config_yaml).unwrap(); + let discovery_handler_map = Arc::new(std::sync::Mutex::new(HashMap::new())); + let endpoint = DiscoveryHandlerEndpoint::Embedded; + let dh_name = akri_debug_echo::DISCOVERY_HANDLER_NAME.to_string(); + add_discovery_handler_to_map(&dh_name, &endpoint, false, discovery_handler_map.clone()); + let discovery_operator = DiscoveryOperator::new( + discovery_handler_map, + config, + Arc::new(tokio::sync::Mutex::new(HashMap::new())), + ); + // test embedded debugEcho socket + if let Some(StreamType::Embedded(_)) = discovery_operator + .get_stream(&DiscoveryHandlerEndpoint::Embedded) + .await + { + // expected + } else { + panic!("expected internal stream"); + } + } + + async fn setup_and_run_mock_discovery_handler( + endpoint: &str, + endpoint_dir: &str, + dh_endpoint: &DiscoveryHandlerEndpoint, + return_error: bool, + ) -> DiscoveryOperator { + let discovery_operator = setup_non_mocked_dh("mockName", dh_endpoint); + // Start mock DH, specifying that it should successfully run + let _dh_server_thread_handle = mock_discovery_handler::run_mock_discovery_handler( + endpoint_dir, + endpoint, + return_error, + Vec::new(), + ) + .await; + // Make sure registration server has started + akri_shared::uds::unix_stream::try_connect(&endpoint) + .await + .unwrap(); + discovery_operator + } + + #[tokio::test] + async fn test_get_stream_no_dh() { + let (_, endpoint) = + mock_discovery_handler::get_mock_discovery_handler_dir_and_endpoint("mock.sock"); + let dh_endpoint = DiscoveryHandlerEndpoint::Uds(endpoint.to_string()); + let discovery_operator = setup_non_mocked_dh("mock", &dh_endpoint); + // Should not be able to get stream if DH is not running + assert!(discovery_operator.get_stream(&dh_endpoint).await.is_none()); + } + + #[tokio::test] + async fn test_get_stream_error() { + // Start mock DH, specifying that it should return an error + let return_error = true; + let (endpoint_dir, endpoint) = + mock_discovery_handler::get_mock_discovery_handler_dir_and_endpoint("mock.sock"); + let dh_endpoint = DiscoveryHandlerEndpoint::Uds(endpoint.to_string()); + let discovery_operator = setup_and_run_mock_discovery_handler( + &endpoint, + &endpoint_dir, + &dh_endpoint, + return_error, + ) + .await; + // Assert that get_stream returns none if the DH returns error + assert!(discovery_operator.get_stream(&dh_endpoint).await.is_none()); + } + + #[tokio::test] + async fn test_get_stream_external_success() { + // Start mock DH, specifying that it should NOT return an error + let return_error = false; + let (endpoint_dir, endpoint) = + mock_discovery_handler::get_mock_discovery_handler_dir_and_endpoint("mock.sock"); + let dh_endpoint = DiscoveryHandlerEndpoint::Uds(endpoint.to_string()); + let discovery_operator = setup_and_run_mock_discovery_handler( + &endpoint, + &endpoint_dir, + &dh_endpoint, + return_error, + ) + .await; + if let Some(StreamType::External(mut receiver)) = + discovery_operator.get_stream(&dh_endpoint).await + { + // MockDiscoveryHandler returns an empty array of devices + assert_eq!( + receiver.get_message().await.unwrap().unwrap().devices.len(), + 0 + ); + } else { + panic!("expected external stream"); + } + } +} diff --git a/agent/src/util/embedded_discovery_handlers.rs b/agent/src/util/embedded_discovery_handlers.rs new file mode 100644 index 000000000..2e0427fd5 --- /dev/null +++ b/agent/src/util/embedded_discovery_handlers.rs @@ -0,0 +1,138 @@ +use akri_debug_echo::discovery_handler::DebugEchoDiscoveryDetails; +use akri_discovery_utils::discovery::{ + v0::discovery_handler_server::DiscoveryHandler, DiscoverStream, +}; +#[cfg(any(test, feature = "onvif-feat"))] +use akri_onvif::discovery_handler::OnvifDiscoveryDetails; +#[cfg(any(test, feature = "opcua-feat"))] +use akri_opcua::discovery_handler::OpcuaDiscoveryDetails; +use akri_shared::{ + akri::configuration::DiscoveryHandlerInfo, + os::env_var::{ActualEnvVarQuery, EnvVarQuery}, +}; +#[cfg(any(test, feature = "udev-feat"))] +use akri_udev::discovery_handler::UdevDiscoveryDetails; +use anyhow::Error; +use log::trace; + +/// Returns the appropriate embedded DiscoveryHandler as determined by the deserialized discovery_details string. +pub fn get_discovery_handler( + discovery_handler_info: &DiscoveryHandlerInfo, +) -> Result>, Error> { + let query_var_set = ActualEnvVarQuery {}; + inner_get_discovery_handler(discovery_handler_info, &query_var_set) +} + +fn inner_get_discovery_handler( + discovery_handler_info: &DiscoveryHandlerInfo, + query: &impl EnvVarQuery, +) -> Result>, Error> { + trace!( + "inner_get_discovery_handler - for DiscoveryHandlerInfo {:?}", + discovery_handler_info + ); + // Determine whether it is an embedded discovery handler + match discovery_handler_info.name.as_str() { + #[cfg(any(test, feature = "onvif-feat"))] + akri_onvif::DISCOVERY_HANDLER_NAME => { + trace!("here in onvif"); + let _discovery_handler_config: OnvifDiscoveryDetails = serde_yaml::from_str(&discovery_handler_info.discovery_details).map_err(|e| anyhow::format_err!("ONVIF Configuration discovery details improperly configured with error {:?}", e))?; + Ok(Box::new( + akri_onvif::discovery_handler::DiscoveryHandlerImpl::new(None), + )) + } + #[cfg(any(test, feature = "udev-feat"))] + akri_udev::DISCOVERY_HANDLER_NAME => { + let _discovery_handler_config: UdevDiscoveryDetails = serde_yaml::from_str(&discovery_handler_info.discovery_details).map_err(|e| anyhow::format_err!("udev Configuration discovery details improperly configured with error {:?}", e))?; + Ok(Box::new( + akri_udev::discovery_handler::DiscoveryHandlerImpl::new(None), + )) + } + #[cfg(any(test, feature = "opcua-feat"))] + akri_opcua::DISCOVERY_HANDLER_NAME => { + let _discovery_handler_config: OpcuaDiscoveryDetails = serde_yaml::from_str(&discovery_handler_info.discovery_details).map_err(|e| anyhow::format_err!("OPC UA Configuration discovery details improperly configured with error {:?}", e))?; + Ok(Box::new( + akri_opcua::discovery_handler::DiscoveryHandlerImpl::new(None), + )) + } + akri_debug_echo::DISCOVERY_HANDLER_NAME => { + match query.get_env_var(super::constants::ENABLE_DEBUG_ECHO_LABEL) { + Ok(_) => { + let _discovery_handler_config: DebugEchoDiscoveryDetails = serde_yaml::from_str(&discovery_handler_info.discovery_details).map_err(|e| anyhow::format_err!("debug echo Configuration discovery details improperly configured with error {:?}", e))?; + Ok(Box::new( + akri_debug_echo::discovery_handler::DiscoveryHandlerImpl::new(None))) + }, + _ => Err(anyhow::format_err!("Debug echo discovery handler not configured")), + } + } + _ => Err(anyhow::format_err!( + "No embedded discovery handler found for configuration with discovery handler info {:?}", + discovery_handler_info + )), + } +} + +#[cfg(test)] +mod tests { + use super::*; + use akri_shared::{akri::configuration::DiscoveryHandlerInfo, os::env_var::MockEnvVarQuery}; + use std::env::VarError; + + #[test] + fn test_inner_get_discovery_handler() { + let _ = env_logger::builder().is_test(true).try_init(); + let mock_query = MockEnvVarQuery::new(); + let deserialized = serde_json::from_str::( + r#"{"name":"onvif", "discoveryDetails":"{}"}"#, + ) + .unwrap(); + assert!(inner_get_discovery_handler(&deserialized, &mock_query).is_ok()); + + let udev_yaml = r#" + name: udev + discoveryDetails: |+ + udevRules: [] + "#; + let deserialized: DiscoveryHandlerInfo = serde_yaml::from_str(&udev_yaml).unwrap(); + assert!(inner_get_discovery_handler(&deserialized, &mock_query).is_ok()); + + let yaml = r#" + name: opcua + discoveryDetails: |+ + opcuaDiscoveryMethod: + standard: {} + "#; + let deserialized: DiscoveryHandlerInfo = serde_yaml::from_str(&yaml).unwrap(); + assert!(inner_get_discovery_handler(&deserialized, &mock_query).is_ok()); + + let deserialized = serde_json::from_str::( + r#"{"name":"random", "discoveryDetails":"some details"}"#, + ) + .unwrap(); + assert!(inner_get_discovery_handler(&deserialized, &mock_query).is_err()); + } + + #[tokio::test] + async fn test_factory_for_debug_echo() { + let debug_echo_yaml = r#" + discoveryHandler: + name: debugEcho + discoveryDetails: |+ + descriptions: + - "foo1" + "#; + let deserialized: DiscoveryHandlerInfo = serde_yaml::from_str(&debug_echo_yaml).unwrap(); + // Test that errors without environment var set + let mut mock_query_without_var_set = MockEnvVarQuery::new(); + mock_query_without_var_set + .expect_get_env_var() + .returning(|_| Err(VarError::NotPresent)); + assert!(inner_get_discovery_handler(&deserialized, &mock_query_without_var_set,).is_err()); + // Test that succeeds when env var set + let mut mock_query_with_var_set = MockEnvVarQuery::new(); + mock_query_with_var_set + .expect_get_env_var() + .returning(|_| Ok("1".to_string())); + assert!(inner_get_discovery_handler(&deserialized, &mock_query_with_var_set).is_ok()); + } +} diff --git a/agent/src/util/mod.rs b/agent/src/util/mod.rs index cdc5f45ef..44b2bed80 100644 --- a/agent/src/util/mod.rs +++ b/agent/src/util/mod.rs @@ -1,6 +1,11 @@ pub mod config_action; pub mod constants; pub mod crictl_containers; +mod device_plugin_builder; mod device_plugin_service; +pub mod discovery_operator; +pub mod embedded_discovery_handlers; +pub mod registration; pub mod slot_reconciliation; +pub mod streaming_extension; mod v1beta1; diff --git a/agent/src/util/registration.rs b/agent/src/util/registration.rs new file mode 100644 index 000000000..a70cdd109 --- /dev/null +++ b/agent/src/util/registration.rs @@ -0,0 +1,468 @@ +use super::constants::{ + CLOSE_DISCOVERY_HANDLER_CONNECTION_CHANNEL_CAPACITY, ENABLE_DEBUG_ECHO_LABEL, +}; +use akri_discovery_utils::discovery::v0::{ + register_discovery_handler_request::EndpointType, + registration_server::{Registration, RegistrationServer}, + Empty, RegisterDiscoveryHandlerRequest, +}; +use akri_shared::{ + os::env_var::{ActualEnvVarQuery, EnvVarQuery}, + uds::unix_stream, +}; +use futures::TryStreamExt; +#[cfg(test)] +use mock_instant::Instant; +use std::collections::HashMap; +use std::sync::{Arc, Mutex}; +#[cfg(not(test))] +use std::time::Instant; +use tokio::sync::broadcast; +use tonic::{transport::Server, Request, Response, Status}; + +/// Map of `DiscoveryHandlers` of the same type (registered with the same name) where key is the endpoint of the +/// Discovery Handler and value is `DiscoveryDetails`. +pub type DiscoveryHandlerDetailsMap = HashMap; + +/// Map of all registered `DiscoveryHandlers` where key is `DiscoveryHandler` name and value is a map of all +/// `DiscoveryHandlers` with that name. +pub type RegisteredDiscoveryHandlerMap = + Arc>>; + +/// Alias illustrating that `AgentRegistration.new_discovery_handler_sender`, sends the Discovery Handler name of the +/// newly registered Discovery Handler. +pub type DiscoveryHandlerName = String; + +/// A Discovery Handler's endpoint, distinguished by URI type +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub enum DiscoveryHandlerEndpoint { + /// Embedded means the Discovery Handler is running inside the Agent + Embedded, + /// Uds means the Discovery Handler is running on a specified unix domain socket + Uds(String), + /// Network means the Discovery Handler is running at an specified URL + Network(String), +} + +/// Describes the connectivity status of a Discovery Handler. +#[derive(PartialEq, Debug, Clone)] +pub enum DiscoveryHandlerStatus { + /// This discovery handler is currently doing discovery on behalf of the Agent + Active, + /// This discovery handler is available and waiting for a discover call from the Agent + Waiting, + /// Not returning discovery results + Offline(Instant), +} + +/// Details about a `DiscoveryHandler` and a sender for terminating its clients when needed. +#[derive(Debug, Clone)] +pub struct DiscoveryDetails { + /// Name of the `DiscoveryHandler` + pub name: String, + /// Endpoint of the `DiscoveryHandler` + pub endpoint: DiscoveryHandlerEndpoint, + /// Whether instances discovered by the `DiscoveryHandler` can be shared/seen by multiple nodes. + pub shared: bool, + /// Channel over which the Registration service tells a DiscoveryOperator client to close a connection with a + /// `DiscoveryHandler`, if any. A broadcast channel is used so both the sending and receiving ends can be cloned. + pub close_discovery_handler_connection: broadcast::Sender<()>, + /// Connection state of the `DiscoveryHandler`. + pub connectivity_status: DiscoveryHandlerStatus, +} + +/// This maps the endpoint string and endpoint type of a `RegisterDiscoveryHandlerRequest` into a +/// `DiscoveryHandlerEndpoint` so as to support embedded `DiscoveryHandlers`. +pub fn create_discovery_handler_endpoint( + endpoint: &str, + endpoint_type: EndpointType, +) -> DiscoveryHandlerEndpoint { + match endpoint_type { + EndpointType::Network => DiscoveryHandlerEndpoint::Network(endpoint.to_string()), + EndpointType::Uds => DiscoveryHandlerEndpoint::Uds(endpoint.to_string()), + } +} + +/// Hosts a register service that external Discovery Handlers can call in order to be added to the +/// RegisteredDiscoveryHandlerMap that is shared with DiscoveryOperators. When a new Discovery Handler is registered, a +/// message is broadcast to inform any running DiscoveryOperators in case they should use the new Discovery Handler. +pub struct AgentRegistration { + new_discovery_handler_sender: broadcast::Sender, + registered_discovery_handlers: RegisteredDiscoveryHandlerMap, +} + +impl AgentRegistration { + pub fn new( + new_discovery_handler_sender: broadcast::Sender, + registered_discovery_handlers: RegisteredDiscoveryHandlerMap, + ) -> Self { + AgentRegistration { + new_discovery_handler_sender, + registered_discovery_handlers, + } + } +} + +#[tonic::async_trait] +impl Registration for AgentRegistration { + /// Adds new `DiscoveryHandler`s to the RegisteredDiscoveryHandlerMap and broadcasts a message to any running + /// DiscoveryOperators that a new `DiscoveryHandler` exists. If the discovery handler is already registered at an + /// endpoint and the register request has changed, the previously registered DH is told to stop discovery and is + /// removed from the map. Then, the updated DH is registered. + async fn register_discovery_handler( + &self, + request: Request, + ) -> Result, Status> { + let req = request.into_inner(); + let dh_name = req.name.clone(); + let endpoint = req.endpoint.clone(); + let dh_endpoint = create_discovery_handler_endpoint( + &endpoint, + EndpointType::from_i32(req.endpoint_type).unwrap(), + ); + info!( + "register_discovery_handler - called with register request {:?}", + req + ); + let (close_discovery_handler_connection, _) = + broadcast::channel(CLOSE_DISCOVERY_HANDLER_CONNECTION_CHANNEL_CAPACITY); + let discovery_handler_details = DiscoveryDetails { + name: dh_name.clone(), + endpoint: dh_endpoint.clone(), + shared: req.shared, + close_discovery_handler_connection, + connectivity_status: DiscoveryHandlerStatus::Waiting, + }; + let mut registered_discovery_handlers = self.registered_discovery_handlers.lock().unwrap(); + // Check if any DiscoveryHandlers have been registered under this name + if let Some(register_request_map) = registered_discovery_handlers.get_mut(&dh_name) { + if let Some(dh_details) = register_request_map.get(&dh_endpoint) { + // Check if DH at that endpoint is already registered but changed request + if dh_details.shared != req.shared || dh_details.endpoint != dh_endpoint { + // Stop current discovery with this DH if any. A receiver may not exist if + // 1) no configuration has been applied that uses this DH or + // 2) a connection cannot be made with the DH's endpoint + dh_details + .close_discovery_handler_connection + .send(()) + .unwrap_or_default(); + } else { + // Already registered. Return early. + return Ok(Response::new(Empty {})); + } + } + // New or updated Discovery Handler + register_request_map.insert(dh_endpoint, discovery_handler_details); + } else { + // First Discovery Handler registered under this name + let mut register_request_map = HashMap::new(); + register_request_map.insert(dh_endpoint, discovery_handler_details); + registered_discovery_handlers.insert(dh_name.clone(), register_request_map); + } + // Notify of new Discovery Handler + if self + .new_discovery_handler_sender + .send(dh_name.clone()) + .is_err() + { + // If no configurations have been applied, no receivers can nor need to be updated about the new discovery + // handler + trace!("register_discovery_handler - new {} discovery handler registered but no active discovery operators to receive the message", dh_name); + } + Ok(Response::new(Empty {})) + } +} + +/// Serves the Agent registration service over UDS. +pub async fn run_registration_server( + discovery_handler_map: RegisteredDiscoveryHandlerMap, + new_discovery_handler_sender: broadcast::Sender, +) -> Result<(), Box> { + internal_run_registration_server( + discovery_handler_map, + new_discovery_handler_sender, + &akri_discovery_utils::get_registration_socket(), + ) + .await +} + +pub async fn internal_run_registration_server( + discovery_handler_map: RegisteredDiscoveryHandlerMap, + new_discovery_handler_sender: broadcast::Sender, + socket_path: &str, +) -> Result<(), Box> { + info!("internal_run_registration_server - entered"); + let registration = AgentRegistration::new(new_discovery_handler_sender, discovery_handler_map); + trace!( + "internal_run_registration_server - registration server listening on socket {}", + socket_path + ); + // Delete socket in case previously created/used + std::fs::remove_file(&socket_path).unwrap_or(()); + let mut uds = + tokio::net::UnixListener::bind(socket_path).expect("Failed to bind to socket path"); + Server::builder() + .add_service(RegistrationServer::new(registration)) + .serve_with_incoming(uds.incoming().map_ok(unix_stream::UnixStream)) + .await?; + trace!( + "internal_run_registration_server - gracefully shutdown ... deleting socket {}", + socket_path + ); + std::fs::remove_file(socket_path).unwrap_or(()); + Ok(()) +} + +#[cfg(any(test, feature = "agent-full"))] +pub fn register_embedded_discovery_handlers( + discovery_handler_map: RegisteredDiscoveryHandlerMap, +) -> Result<(), Box> { + info!("register_embedded_discovery_handlers - entered"); + let env_var_query = ActualEnvVarQuery {}; + inner_register_embedded_discovery_handlers(discovery_handler_map, &env_var_query)?; + Ok(()) +} + +/// Adds all embedded Discovery Handlers to the RegisteredDiscoveryHandlerMap, specifying an endpoint of +/// Endpoint::Embedded to signal that it is an embedded Discovery Handler. +#[cfg(any(test, feature = "agent-full"))] +pub fn inner_register_embedded_discovery_handlers( + discovery_handler_map: RegisteredDiscoveryHandlerMap, + query: &impl EnvVarQuery, +) -> Result<(), Box> { + type Details = (String, bool); + let mut embedded_discovery_handlers: Vec
= Vec::new(); + if query.get_env_var(ENABLE_DEBUG_ECHO_LABEL).is_ok() { + let shared: bool = query + .get_env_var(akri_debug_echo::DEBUG_ECHO_INSTANCES_SHARED_LABEL) + .unwrap() + .parse() + .unwrap(); + embedded_discovery_handlers + .push((akri_debug_echo::DISCOVERY_HANDLER_NAME.to_string(), shared)); + } + #[cfg(feature = "onvif-feat")] + embedded_discovery_handlers.push(( + akri_onvif::DISCOVERY_HANDLER_NAME.to_string(), + akri_onvif::SHARED, + )); + #[cfg(feature = "udev-feat")] + embedded_discovery_handlers.push(( + akri_udev::DISCOVERY_HANDLER_NAME.to_string(), + akri_udev::SHARED, + )); + #[cfg(feature = "opcua-feat")] + embedded_discovery_handlers.push(( + akri_opcua::DISCOVERY_HANDLER_NAME.to_string(), + akri_opcua::SHARED, + )); + + embedded_discovery_handlers.into_iter().for_each(|dh| { + let (name, shared) = dh; + let (close_discovery_handler_connection, _) = + broadcast::channel(CLOSE_DISCOVERY_HANDLER_CONNECTION_CHANNEL_CAPACITY); + let discovery_handler_details = DiscoveryDetails { + name: name.clone(), + endpoint: DiscoveryHandlerEndpoint::Embedded, + shared, + close_discovery_handler_connection, + connectivity_status: DiscoveryHandlerStatus::Waiting, + }; + let mut register_request_map = HashMap::new(); + register_request_map.insert( + DiscoveryHandlerEndpoint::Embedded, + discovery_handler_details, + ); + discovery_handler_map + .lock() + .unwrap() + .insert(name, register_request_map); + }); + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use akri_discovery_utils::discovery::v0::registration_client::RegistrationClient; + use akri_shared::os::env_var::MockEnvVarQuery; + use std::convert::TryFrom; + use tempfile::Builder; + use tokio::net::UnixStream; + use tonic::transport::{Endpoint, Uri}; + + #[test] + fn test_register_embedded_discovery_handlers() { + let mut seq = mockall::Sequence::new(); + // Enable debug echo and set environment variable to set whether debug echo instances are shared + let mut mock_env_var = MockEnvVarQuery::new(); + mock_env_var + .expect_get_env_var() + .times(1) + .withf(|label: &str| label == ENABLE_DEBUG_ECHO_LABEL) + .in_sequence(&mut seq) + .returning(|_| Ok("1".to_string())); + mock_env_var + .expect_get_env_var() + .times(1) + .withf(|label: &str| label == akri_debug_echo::DEBUG_ECHO_INSTANCES_SHARED_LABEL) + .in_sequence(&mut seq) + .returning(|_| Ok("false".to_string())); + let discovery_handler_map = Arc::new(Mutex::new(HashMap::new())); + inner_register_embedded_discovery_handlers(discovery_handler_map.clone(), &mock_env_var) + .unwrap(); + assert!(discovery_handler_map + .lock() + .unwrap() + .get("debugEcho") + .is_some()); + #[cfg(feature = "onvif-feat")] + assert!(discovery_handler_map.lock().unwrap().get("onvif").is_some()); + #[cfg(feature = "opcua-feat")] + assert!(discovery_handler_map.lock().unwrap().get("opcua").is_some()); + #[cfg(feature = "udev-feat")] + assert!(discovery_handler_map.lock().unwrap().get("udev").is_some()); + } + + #[test] + fn test_register_embedded_discovery_handlers_no_debug_echo() { + let mut mock_env_var = MockEnvVarQuery::new(); + mock_env_var + .expect_get_env_var() + .times(1) + .withf(|label: &str| label == ENABLE_DEBUG_ECHO_LABEL) + .returning(|_| Err(std::env::VarError::NotPresent)); + let discovery_handler_map = Arc::new(Mutex::new(HashMap::new())); + inner_register_embedded_discovery_handlers(discovery_handler_map.clone(), &mock_env_var) + .unwrap(); + assert!(discovery_handler_map + .lock() + .unwrap() + .get("debugEcho") + .is_none()); + } + + #[tokio::test] + async fn test_run_registration_server_reregister_discovery_handler() { + let registration_socket_dir = Builder::new().tempdir().unwrap(); + let registration_socket_path = registration_socket_dir + .path() + .join("agent-registration.sock"); + let registration_socket_path_string_thread = + registration_socket_path.to_str().unwrap().to_string(); + let registration_socket_path_string = + registration_socket_path.to_str().unwrap().to_string(); + let (new_discovery_handler_sender, mut new_discovery_handler_receiver) = + broadcast::channel(4); + let discovery_handler_map = Arc::new(Mutex::new(HashMap::new())); + let thread_discovery_handler_map = discovery_handler_map.clone(); + + // Run registration service + tokio::spawn(async move { + internal_run_registration_server( + thread_discovery_handler_map, + new_discovery_handler_sender, + ®istration_socket_path_string_thread, + ) + .await + .unwrap(); + }); + + // Make sure registration service is running + assert!(unix_stream::try_connect(®istration_socket_path_string) + .await + .is_ok()); + // Connect to registration service + let channel = Endpoint::try_from("dummy://[::]:50051") + .unwrap() + .connect_with_connector(tower::service_fn(move |_: Uri| { + UnixStream::connect(registration_socket_path_string.clone()) + })) + .await + .unwrap(); + // Create registration client + let mut registration_client = RegistrationClient::new(channel); + + // Test registering a discovery handler with UDS endpoint + let endpoint_string = "/path/to/socket/name.sock".to_string(); + let discovery_handler_endpoint = DiscoveryHandlerEndpoint::Uds(endpoint_string.clone()); + let request = RegisterDiscoveryHandlerRequest { + name: "name".to_string(), + endpoint: endpoint_string.clone(), + endpoint_type: EndpointType::Uds as i32, + shared: true, + }; + assert!(registration_client + .register_discovery_handler(request.clone()) + .await + .is_ok()); + assert_eq!(new_discovery_handler_receiver.recv().await.unwrap(), "name"); + let discovery_handler_details = discovery_handler_map + .lock() + .unwrap() + .get("name") + .unwrap() + .get(&discovery_handler_endpoint) + .unwrap() + .clone(); + assert_eq!( + discovery_handler_details.endpoint, + DiscoveryHandlerEndpoint::Uds(request.endpoint.clone()) + ); + assert_eq!(discovery_handler_details.shared, request.shared); + + // When a discovery handler is re-registered with the same register request, no message should be sent to + // terminate any existing discovery clients. + let mut stop_discovery_receiver = discovery_handler_details + .close_discovery_handler_connection + .subscribe(); + assert!(registration_client + .register_discovery_handler(request) + .await + .is_ok()); + assert!(stop_discovery_receiver.try_recv().is_err()); + + // When a discovery handler at a specified endpoint re-registers at the same endpoint but with a different + // locality current discovery handler clients should be notified to terminate and the entry in the + // RegisteredDiscoveryHandlersMap should be replaced. + let local_request = RegisterDiscoveryHandlerRequest { + name: "name".to_string(), + endpoint: endpoint_string, + endpoint_type: EndpointType::Uds as i32, + shared: false, + }; + assert!(registration_client + .register_discovery_handler(local_request.clone()) + .await + .is_ok()); + assert!(stop_discovery_receiver.try_recv().is_ok()); + let discovery_handler_details = discovery_handler_map + .lock() + .unwrap() + .get("name") + .unwrap() + .get(&discovery_handler_endpoint) + .unwrap() + .clone(); + assert_eq!( + discovery_handler_details.endpoint, + DiscoveryHandlerEndpoint::Uds(local_request.endpoint) + ); + assert_eq!(discovery_handler_details.shared, local_request.shared); + } + + #[test] + fn test_create_discovery_handler_endpoint() { + // Assert the endpoint with EndpointType::Uds in converted to DiscoveryHandlerEndpoint::Uds(endpoint) + assert_eq!( + create_discovery_handler_endpoint("/path/to/socket.sock", EndpointType::Uds), + DiscoveryHandlerEndpoint::Uds("/path/to/socket.sock".to_string()) + ); + + // Assert the endpoint with EndpointType::Network in converted to DiscoveryHandlerEndpoint::Network(endpoint) + assert_eq!( + create_discovery_handler_endpoint("http://10.1.2.3:1000", EndpointType::Network), + DiscoveryHandlerEndpoint::Network("http://10.1.2.3:1000".to_string()) + ); + } +} diff --git a/agent/src/util/slot_reconciliation.rs b/agent/src/util/slot_reconciliation.rs index 9a5f76f2b..c59600a16 100644 --- a/agent/src/util/slot_reconciliation.rs +++ b/agent/src/util/slot_reconciliation.rs @@ -2,7 +2,8 @@ use super::{constants::SLOT_RECONCILIATION_CHECK_DELAY_SECS, crictl_containers}; use akri_shared::{akri::instance::Instance, k8s::KubeInterface}; use async_trait::async_trait; use k8s_openapi::api::core::v1::PodStatus; -use mockall::predicate::*; +#[cfg(test)] +use mockall::{automock, predicate::*}; use std::{ collections::{HashMap, HashSet}, sync::{Arc, Mutex}, @@ -12,15 +13,7 @@ use tokio::process::Command; type SlotQueryResult = Result, Box>; -// -// mockall and async_trait do not work effortlessly together ... to enable both, -// follow the example here: -// https://github.com/mibes/mockall-async/blob/53aec15219a720ef5ac483959ff8821cb7d656ae/src/main.rs -// -// When async traits are supported by Rust without the async_trait crate, we should -// add: -// #[automock] -// +#[cfg_attr(test, automock)] #[async_trait] pub trait SlotQuery { async fn get_node_slots(&self) -> SlotQueryResult; @@ -258,8 +251,7 @@ impl DevicePluginSlotReconciler { .collect::>(); let modified_instance = Instance { configuration_name: instance.spec.configuration_name.clone(), - metadata: instance.spec.metadata.clone(), - rbac: instance.spec.rbac.clone(), + broker_properties: instance.spec.broker_properties.clone(), shared: instance.spec.shared, device_usage: modified_device_usage, nodes: instance.spec.nodes.clone(), @@ -350,49 +342,14 @@ pub async fn periodic_slot_reconciliation( } } -pub mod test_crictl { - use super::{SlotQuery, SlotQueryResult}; - use async_trait::async_trait; - use mockall::predicate::*; - use mockall::*; - - // - // mockall and async_trait do not work effortlessly together ... to enable both, - // follow the example here: - // https://github.com/mibes/mockall-async/blob/53aec15219a720ef5ac483959ff8821cb7d656ae/src/main.rs - // - // We can probably eliminate this when async traits are supported by Rust without - // the async_trait crate. - // - mock! { - pub SlotQueryImpl { - fn get_node_slots(&self) -> SlotQueryResult; - } - } - - #[async_trait] - impl SlotQuery for MockSlotQueryImpl { - async fn get_node_slots(&self) -> SlotQueryResult { - self.get_node_slots() - } - } -} - #[cfg(test)] mod reconcile_tests { - use super::test_crictl::MockSlotQueryImpl; use super::*; - use akri_shared::{ - akri::instance::KubeAkriInstanceList, k8s::test_kube::MockKubeImpl, os::file, - }; + use akri_shared::{akri::instance::KubeAkriInstanceList, k8s::MockKubeInterface, os::file}; use k8s_openapi::api::core::v1::{PodSpec, PodStatus}; use kube::api::{Object, ObjectList}; - fn configure_get_node_slots( - mock: &mut MockSlotQueryImpl, - result: HashSet, - error: bool, - ) { + fn configure_get_node_slots(mock: &mut MockSlotQuery, result: HashSet, error: bool) { mock.expect_get_node_slots().times(1).returning(move || { if !error { Ok(result.clone()) @@ -402,7 +359,7 @@ mod reconcile_tests { }); } - fn configure_get_instances(mock: &mut MockKubeImpl, result_file: &'static str) { + fn configure_get_instances(mock: &mut MockKubeInterface, result_file: &'static str) { mock.expect_get_instances().times(1).returning(move || { let instance_list_json = file::read_file_to_string(result_file); let instance_list: KubeAkriInstanceList = @@ -412,7 +369,7 @@ mod reconcile_tests { } fn configure_find_pods_with_field( - mock: &mut MockKubeImpl, + mock: &mut MockKubeInterface, selector: &'static str, result_file: &'static str, ) { @@ -444,7 +401,7 @@ mod reconcile_tests { grace_period: Duration, reconciler: &DevicePluginSlotReconciler, ) { - let mut slot_query = MockSlotQueryImpl::new(); + let mut slot_query = MockSlotQuery::new(); // slot_query to identify one slot used by this node configure_get_node_slots( &mut slot_query, @@ -452,7 +409,7 @@ mod reconcile_tests { node_slots.node_slots_error, ); - let mut kube_interface = MockKubeImpl::new(); + let mut kube_interface = MockKubeInterface::new(); if !node_slots.node_slots_error { // kube_interface to find Instance with node-a using slots: // config-a-359973-1 & config-a-359973-3 diff --git a/agent/src/util/streaming_extension.rs b/agent/src/util/streaming_extension.rs new file mode 100644 index 000000000..8bc15fce5 --- /dev/null +++ b/agent/src/util/streaming_extension.rs @@ -0,0 +1,31 @@ +use akri_discovery_utils::discovery::v0::DiscoverResponse; +use async_trait::async_trait; +use tokio::sync::mpsc; +use tonic::{Code, Status}; + +/// An extension trait that is used to get the latest message from both embedded and +/// external Discovery Handlers' streams. +#[async_trait] +pub trait StreamingExt: Send { + async fn get_message(&mut self) -> Result, Status>; +} + +#[async_trait] +impl StreamingExt for mpsc::Receiver> { + async fn get_message(&mut self) -> Result, Status> { + match self.recv().await { + Some(result) => match result { + Ok(res) => Ok(Some(res)), + Err(e) => Err(e), + }, + None => Err(Status::new(Code::Unavailable, "broken pipe")), + } + } +} + +#[async_trait] +impl StreamingExt for tonic::codec::Streaming { + async fn get_message(&mut self) -> Result, Status> { + self.message().await + } +} diff --git a/art/README.md b/art/README.md new file mode 100644 index 000000000..73cf10f1f --- /dev/null +++ b/art/README.md @@ -0,0 +1,63 @@ +# Akri Logo & Assets + + +## Logo + +![Screen Shot 2020-11-24 at 11 10 44 AM](https://user-images.githubusercontent.com/686194/104776503-0c842b80-572f-11eb-8c69-a74df6d3d3f3.png) + + +--- + +This directory contains the Akri icon and logo in different settings. Choose the format that works best for the scenario in which the logo is being used: + +* Is the background light or dark? +* Is there more vertical or horizontal space to work with? +* Will the logo appear alongside the project name (like on a social media profile avatar)? If so, omit the workmark and just use the icon. Avatars should retain white space around the branding to ensure it is not cropped or squished when viewed in context. + +## Typography + +Akri's wordmark uses a monospaced typeface (Inconsolata) to lend a technical, developer-centric style. Ideally complimentary text should follow these attributes - I recommend use of a common, device-friendly default [monospaced typeface](https://en.wikipedia.org/wiki/List_of_monospaced_typefaces) to style headlines to work well with Inconsolata. + +Standard page copy *should not* be written in a monospaced font in this way - this typography style is decorative and not used extensively, or legibility can suffer. Standard page copy should use [standard sans-serif typefaces](https://fontsarena.com/blog/operating-systems-default-sans-serif-fonts/) such as Helvetica, Segoe, Arial, etc. + +* [Download Inconsolata](https://fonts.google.com/specimen/Inconsolata) _Open Font Licence_ +* [Standard Monospaced Typefaces](https://www.cssfontstack.com/) +* [Open Font License](https://scripts.sil.org/cms/scripts/page.php?site_id=nrsi&id=OFL) + +--- + +## Akri Color Palette: + +akri-colors + +--- + +Brand only (not for text): + +``` +$purplebrand: #3E446F; +$greenbrand: #78FFC9; +$greenshade: #8CCFCE; +$purpleshade: #524886; +``` + +Accent colors: + +``` +$accentgreen: #01A7C2; +$accentblue: #8AC4FF; +$accentorange: #DC9E82; +$accentgold: #D4B483; +$accentbrown: #92817A; +$accentgrey: #AFAAB9; +``` + +Content colors: + +``` +$dpurple: #343867; // ideal for text +$dgreen: #0AABA9; +$dgray: #41455D; +$lgray: #EBEDF2; +$lwhite: #F5F9FD; // ideal for bg +``` diff --git a/art/icon/akri-icon-dark.png b/art/icon/akri-icon-dark.png new file mode 100644 index 000000000..f92bcd8cd Binary files /dev/null and b/art/icon/akri-icon-dark.png differ diff --git a/art/icon/akri-icon-dark.svg b/art/icon/akri-icon-dark.svg new file mode 100644 index 000000000..600ba35fa --- /dev/null +++ b/art/icon/akri-icon-dark.svg @@ -0,0 +1,33 @@ + + + + icon dark + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/art/icon/akri-icon-light.png b/art/icon/akri-icon-light.png new file mode 100644 index 000000000..1d9611f0e Binary files /dev/null and b/art/icon/akri-icon-light.png differ diff --git a/art/icon/akri-icon-light.svg b/art/icon/akri-icon-light.svg new file mode 100644 index 000000000..0245fc8ab --- /dev/null +++ b/art/icon/akri-icon-light.svg @@ -0,0 +1,33 @@ + + + + icon + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/art/icon/favicons/android-chrome-192x192.png b/art/icon/favicons/android-chrome-192x192.png new file mode 100644 index 000000000..1ce27e72b Binary files /dev/null and b/art/icon/favicons/android-chrome-192x192.png differ diff --git a/art/icon/favicons/android-chrome-512x512.png b/art/icon/favicons/android-chrome-512x512.png new file mode 100644 index 000000000..5a076f3d2 Binary files /dev/null and b/art/icon/favicons/android-chrome-512x512.png differ diff --git a/art/icon/favicons/apple-touch-icon.png b/art/icon/favicons/apple-touch-icon.png new file mode 100644 index 000000000..d6ba9de0b Binary files /dev/null and b/art/icon/favicons/apple-touch-icon.png differ diff --git a/art/icon/favicons/favicon-16x16.png b/art/icon/favicons/favicon-16x16.png new file mode 100644 index 000000000..5e8f0d60f Binary files /dev/null and b/art/icon/favicons/favicon-16x16.png differ diff --git a/art/icon/favicons/favicon-32x32.png b/art/icon/favicons/favicon-32x32.png new file mode 100644 index 000000000..5ac107677 Binary files /dev/null and b/art/icon/favicons/favicon-32x32.png differ diff --git a/art/icon/favicons/favicon.ico b/art/icon/favicons/favicon.ico new file mode 100644 index 000000000..88d6c1650 Binary files /dev/null and b/art/icon/favicons/favicon.ico differ diff --git a/art/icon/favicons/mstile-150x150.png b/art/icon/favicons/mstile-150x150.png new file mode 100644 index 000000000..d0caa81d3 Binary files /dev/null and b/art/icon/favicons/mstile-150x150.png differ diff --git a/art/logo-horizontal/akri-logo-horizontal-dark.png b/art/logo-horizontal/akri-logo-horizontal-dark.png new file mode 100644 index 000000000..e82eb541e Binary files /dev/null and b/art/logo-horizontal/akri-logo-horizontal-dark.png differ diff --git a/art/logo-horizontal/akri-logo-horizontal-dark.svg b/art/logo-horizontal/akri-logo-horizontal-dark.svg new file mode 100644 index 000000000..52dde6a97 --- /dev/null +++ b/art/logo-horizontal/akri-logo-horizontal-dark.svg @@ -0,0 +1,34 @@ + + + + logo strip + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/art/logo-horizontal/akri-logo-horizontal-light.png b/art/logo-horizontal/akri-logo-horizontal-light.png new file mode 100644 index 000000000..66ad2f385 Binary files /dev/null and b/art/logo-horizontal/akri-logo-horizontal-light.png differ diff --git a/art/logo-horizontal/akri-logo-horizontal-light.svg b/art/logo-horizontal/akri-logo-horizontal-light.svg new file mode 100644 index 000000000..c2bcb0500 --- /dev/null +++ b/art/logo-horizontal/akri-logo-horizontal-light.svg @@ -0,0 +1,34 @@ + + + + logo strip + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/art/logo-stacked/akri-logo-stack-dark.png b/art/logo-stacked/akri-logo-stack-dark.png new file mode 100644 index 000000000..6e71d9930 Binary files /dev/null and b/art/logo-stacked/akri-logo-stack-dark.png differ diff --git a/art/logo-stacked/akri-logo-stack-dark.svg b/art/logo-stacked/akri-logo-stack-dark.svg new file mode 100644 index 000000000..10ef2624d --- /dev/null +++ b/art/logo-stacked/akri-logo-stack-dark.svg @@ -0,0 +1,34 @@ + + + + logo stack dark + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/art/logo-stacked/akri-logo-stack-light.png b/art/logo-stacked/akri-logo-stack-light.png new file mode 100644 index 000000000..b82bed3cd Binary files /dev/null and b/art/logo-stacked/akri-logo-stack-light.png differ diff --git a/art/logo-stacked/akri-logo-stack-light.svg b/art/logo-stacked/akri-logo-stack-light.svg new file mode 100644 index 000000000..e8bdb5d26 --- /dev/null +++ b/art/logo-stacked/akri-logo-stack-light.svg @@ -0,0 +1,34 @@ + + + + logo stack + Created with Sketch. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/art/logo-theme-004.sketch b/art/logo-theme-004.sketch new file mode 100644 index 000000000..2a0716ac6 Binary files /dev/null and b/art/logo-theme-004.sketch differ diff --git a/art/styleguide/akri-colors.png b/art/styleguide/akri-colors.png new file mode 100644 index 000000000..cf786d4ad Binary files /dev/null and b/art/styleguide/akri-colors.png differ diff --git a/art/styleguide/akri-logos.png b/art/styleguide/akri-logos.png new file mode 100644 index 000000000..d469db76a Binary files /dev/null and b/art/styleguide/akri-logos.png differ diff --git a/art/wordmark/akri-wordmark-dark.png b/art/wordmark/akri-wordmark-dark.png new file mode 100644 index 000000000..6936f50bd Binary files /dev/null and b/art/wordmark/akri-wordmark-dark.png differ diff --git a/art/wordmark/akri-wordmark-dark.svg b/art/wordmark/akri-wordmark-dark.svg new file mode 100644 index 000000000..d4371b5fe --- /dev/null +++ b/art/wordmark/akri-wordmark-dark.svg @@ -0,0 +1,10 @@ + + + + wordmark + Created with Sketch. + + + + + \ No newline at end of file diff --git a/art/wordmark/akri-wordmark-white.png b/art/wordmark/akri-wordmark-white.png new file mode 100644 index 000000000..a305b75b0 Binary files /dev/null and b/art/wordmark/akri-wordmark-white.png differ diff --git a/art/wordmark/akri-wordmark-white.svg b/art/wordmark/akri-wordmark-white.svg new file mode 100644 index 000000000..3eca508df --- /dev/null +++ b/art/wordmark/akri-wordmark-white.svg @@ -0,0 +1,10 @@ + + + + wordmark + Created with Sketch. + + + + + \ No newline at end of file diff --git a/build/akri-containers.mk b/build/akri-containers.mk index 25fb36369..85fe9402f 100644 --- a/build/akri-containers.mk +++ b/build/akri-containers.mk @@ -1,5 +1,26 @@ USE_OPENCV_BASE_VERSION = 0.0.7 +# Akri container defines +include build/akri-rust-containers.mk +include build/akri-dotnet-containers.mk +include build/akri-python-containers.mk + +# +# Functions for building Agent with or without Discovery Handlers +# +# Build the Agent without any Discovery Handlers embedded +define agent_build_slim + CARGO_INCREMENTAL=$(CARGO_INCREMENTAL) PKG_CONFIG_ALLOW_CROSS=1 cross build $(if $(BUILD_RELEASE_FLAG), --release) --target=$(1) --manifest-path agent/Cargo.toml +endef + +# Build the Agent with features that embed Discovery Handlers and rename the executable in case subsequently +# building a slim Agent +define agent_build_with_features + CARGO_INCREMENTAL=$(CARGO_INCREMENTAL) PKG_CONFIG_ALLOW_CROSS=1 cross build $(if $(BUILD_RELEASE_FLAG), --release) --target=$(1) --manifest-path agent/Cargo.toml \ + --features "${AGENT_FEATURES}" + mv target/$(1)/$(if $(BUILD_RELEASE_FLAG),release,debug)/agent target/$(1)/$(if $(BUILD_RELEASE_FLAG),release,debug)/${FULL_AGENT_EXECUTABLE_NAME} +endef + # # # INSTALL-CROSS: install cargo cross building tool: @@ -18,336 +39,66 @@ install-cross: # # To make all platforms: `make akri` # To make specific platforms: `BUILD_AMD64=1 BUILD_ARM32=0 BUILD_ARM64=1 make akri` -# To make single component: `make akri-[controller|agent|udev|onvif|streaming|opcua-monitoring|anomaly-detection]` -# To make specific platforms: `BUILD_AMD64=1 BUILD_ARM32=0 BUILD_ARM64=1 make akri-[controller|agent|udev|onvif|streaming|opcua-monitoring|anomaly-detection]` -# +# To make single component: `make akri-[controller|agent|udev|onvif|streaming|opcua-monitoring|anomaly-detection|webhook-configuration|debug-echo-discovery|http-discovery|udev-discovery|onvif-discovery|opcua-discovery]` +# To make specific platforms: `BUILD_AMD64=1 BUILD_ARM32=0 BUILD_ARM64=1 make akri-[controller|agent|udev|onvif|streaming|opcua-monitoring|anomaly-detection|webhook-configuration|debug-echo-discovery|http-discovery|udev-discovery|onvif-discovery|opcua-discovery]` +# To make an agent with embedded discovery handlers (on all platforms): `FULL_AGENT_EXECUTABLE_NAME=agent AGENT_FEATURES="agent-full onvif-feat opcua-feat udev-feat" make akri-agent` +# To make a slim agent without any embedded discovery handlers: `BUILD_SLIM_AGENT=1 make akri-agent` +# To make a slim and full Agent, with full agent executable renamed agent-full: `AGENT_FEATURES="agent-full onvif-feat opcua-feat udev-feat" BUILD_SLIM_AGENT=1 make akri-agent` # .PHONY: akri -akri: akri-build akri-docker -akri-controller: akri-build akri-docker-controller -akri-agent: akri-build akri-docker-agent -akri-udev: akri-build akri-docker-udev -akri-onvif: akri-build akri-docker-onvif -akri-streaming: akri-build akri-docker-streaming -akri-opcua-monitoring: akri-docker-opcua-monitoring -akri-anomaly-detection: akri-docker-anomaly-detection - +akri: akri-build akri-docker-all akri-build: install-cross akri-cross-build -akri-docker: akri-docker-build akri-docker-push-per-arch akri-docker-push-multi-arch-create akri-docker-push-multi-arch-push -akri-docker-controller: controller-build controller-docker-per-arch controller-docker-multi-arch-create controller-docker-multi-arch-push -akri-docker-agent: agent-build agent-docker-per-arch agent-docker-multi-arch-create agent-docker-multi-arch-push -akri-docker-udev: udev-build udev-docker-per-arch udev-docker-multi-arch-create udev-docker-multi-arch-push -akri-docker-onvif: onvif-build onvif-docker-per-arch onvif-docker-multi-arch-create onvif-docker-multi-arch-push -akri-docker-streaming: streaming-build streaming-docker-per-arch streaming-docker-multi-arch-create streaming-docker-multi-arch-push -akri-docker-opcua-monitoring: opcua-monitoring-build opcua-monitoring-docker-per-arch opcua-monitoring-docker-multi-arch-create opcua-monitoring-docker-multi-arch-push -akri-docker-anomaly-detection: anomaly-detection-build anomaly-detection-docker-per-arch anomaly-detection-docker-multi-arch-create anomaly-detection-docker-multi-arch-push +akri-docker-all: akri-docker-controller akri-docker-agent akri-docker-udev akri-docker-onvif akri-docker-streaming akri-docker-opcua-monitoring akri-docker-anomaly-detection akri-docker-webhook-configuration akri-docker-debug-echo-discovery akri-docker-http-discovery akri-docker-onvif-discovery akri-docker-opcua-discovery akri-docker-udev-discovery akri-cross-build: akri-cross-build-amd64 akri-cross-build-arm32 akri-cross-build-arm64 akri-cross-build-amd64: ifeq (1, $(BUILD_AMD64)) - PKG_CONFIG_ALLOW_CROSS=1 cross build --release --target=$(AMD64_TARGET) -endif -akri-cross-build-arm32: -ifeq (1, ${BUILD_ARM32}) - PKG_CONFIG_ALLOW_CROSS=1 cross build --release --target=$(ARM32V7_TARGET) -endif -akri-cross-build-arm64: -ifeq (1, ${BUILD_ARM64}) - PKG_CONFIG_ALLOW_CROSS=1 cross build --release --target=$(ARM64V8_TARGET) -endif - -akri-docker-build: controller-build agent-build udev-build onvif-build streaming-build opcua-monitoring-build anomaly-detection-build -controller-build: controller-build-amd64 controller-build-arm32 controller-build-arm64 -controller-build-amd64: -ifeq (1, ${BUILD_AMD64}) - docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.controller . -t $(PREFIX)/controller:$(LABEL_PREFIX)-$(AMD64_SUFFIX) --build-arg PLATFORM=$(AMD64_SUFFIX) --build-arg CROSS_BUILD_TARGET=$(AMD64_TARGET) -endif -controller-build-arm32: -ifeq (1, ${BUILD_ARM32}) - docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.controller . -t $(PREFIX)/controller:$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) --build-arg PLATFORM=$(ARM32V7_SUFFIX) --build-arg CROSS_BUILD_TARGET=$(ARM32V7_TARGET) -endif -controller-build-arm64: -ifeq (1, ${BUILD_ARM64}) - docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.controller . -t $(PREFIX)/controller:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) --build-arg PLATFORM=$(ARM64V8_SUFFIX) --build-arg CROSS_BUILD_TARGET=$(ARM64V8_TARGET) -endif - -agent-build: agent-build-amd64 agent-build-arm32 agent-build-arm64 -agent-build-amd64: -ifeq (1, ${BUILD_AMD64}) - docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.agent . -t $(PREFIX)/agent:$(LABEL_PREFIX)-$(AMD64_SUFFIX) --build-arg PLATFORM=$(AMD64_SUFFIX) --build-arg CROSS_BUILD_TARGET=$(AMD64_TARGET) -endif -agent-build-arm32: -ifeq (1, ${BUILD_ARM32}) - docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.agent . -t $(PREFIX)/agent:$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) --build-arg PLATFORM=$(ARM32V7_SUFFIX) --build-arg CROSS_BUILD_TARGET=$(ARM32V7_TARGET) -endif -agent-build-arm64: -ifeq (1, ${BUILD_ARM64}) - docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.agent . -t $(PREFIX)/agent:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) --build-arg PLATFORM=$(ARM64V8_SUFFIX) --build-arg CROSS_BUILD_TARGET=$(ARM64V8_TARGET) -endif - -udev-build: udev-build-amd64 udev-build-arm32 udev-build-arm64 -udev-build-amd64: -ifeq (1, ${BUILD_AMD64}) - docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.udev-video-broker . -t $(PREFIX)/udev-video-broker:$(LABEL_PREFIX)-$(AMD64_SUFFIX) --build-arg PLATFORM=$(AMD64_SUFFIX) --build-arg CROSS_BUILD_TARGET=$(AMD64_TARGET) -endif -udev-build-arm32: -ifeq (1, ${BUILD_ARM32}) - docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.udev-video-broker . -t $(PREFIX)/udev-video-broker:$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) --build-arg PLATFORM=$(ARM32V7_SUFFIX) --build-arg CROSS_BUILD_TARGET=$(ARM32V7_TARGET) -endif -udev-build-arm64: -ifeq (1, ${BUILD_ARM64}) - docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.udev-video-broker . -t $(PREFIX)/udev-video-broker:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) --build-arg PLATFORM=$(ARM64V8_SUFFIX) --build-arg CROSS_BUILD_TARGET=$(ARM64V8_TARGET) -endif - -onvif-build: onvif-build-amd64 onvif-build-arm32 onvif-build-arm64 -onvif-build-amd64: -ifeq (1, ${BUILD_AMD64}) - docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.onvif-video-broker . -t $(PREFIX)/onvif-video-broker:$(LABEL_PREFIX)-$(AMD64_SUFFIX) --build-arg OUTPUT_PLATFORM_TAG=$(USE_OPENCV_BASE_VERSION)-$(AMD64_SUFFIX) --build-arg DOTNET_PUBLISH_RUNTIME=linux-x64 -endif -onvif-build-arm32: -ifeq (1, ${BUILD_ARM32}) - docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.onvif-video-broker . -t $(PREFIX)/onvif-video-broker:$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) --build-arg OUTPUT_PLATFORM_TAG=$(USE_OPENCV_BASE_VERSION)-$(ARM32V7_SUFFIX) --build-arg DOTNET_PUBLISH_RUNTIME=linux-arm -endif -onvif-build-arm64: -ifeq (1, ${BUILD_ARM64}) - docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.onvif-video-broker . -t $(PREFIX)/onvif-video-broker:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) --build-arg OUTPUT_PLATFORM_TAG=$(USE_OPENCV_BASE_VERSION)-$(ARM64V8_SUFFIX) --build-arg DOTNET_PUBLISH_RUNTIME=linux-arm64 -endif - -opcua-monitoring-build: opcua-monitoring-build-amd64 opcua-monitoring-build-arm32 opcua-monitoring-build-arm64 -opcua-monitoring-build-amd64: -ifeq (1, ${BUILD_AMD64}) - docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.opcua-monitoring-broker . -t $(PREFIX)/opcua-monitoring-broker:$(LABEL_PREFIX)-$(AMD64_SUFFIX) --build-arg OUTPUT_PLATFORM_TAG=3.1-buster-slim --build-arg DOTNET_PUBLISH_RUNTIME=linux-x64 -endif -opcua-monitoring-build-arm32: -ifeq (1, ${BUILD_ARM32}) - docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.opcua-monitoring-broker . -t $(PREFIX)/opcua-monitoring-broker:$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) --build-arg OUTPUT_PLATFORM_TAG=3.1-buster-slim-$(ARM32V7_SUFFIX) --build-arg DOTNET_PUBLISH_RUNTIME=linux-arm -endif -opcua-monitoring-build-arm64: -ifeq (1, ${BUILD_ARM64}) - docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.opcua-monitoring-broker . -t $(PREFIX)/opcua-monitoring-broker:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) --build-arg OUTPUT_PLATFORM_TAG=3.1-buster-slim-$(ARM64V8_SUFFIX) --build-arg DOTNET_PUBLISH_RUNTIME=linux-arm64 -endif - -anomaly-detection-build: anomaly-detection-build-amd64 anomaly-detection-build-arm32 anomaly-detection-build-arm64 -anomaly-detection-build-amd64: -ifeq (1, ${BUILD_AMD64}) - docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.anomaly-detection-app . -t $(PREFIX)/anomaly-detection-app:$(LABEL_PREFIX)-$(AMD64_SUFFIX) --build-arg PLATFORM=$(AMD64_SUFFIX) -endif -anomaly-detection-build-arm32: -ifeq (1, ${BUILD_ARM32}) - docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.anomaly-detection-app . -t $(PREFIX)/anomaly-detection-app:$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) --build-arg PLATFORM=$(ARM32V7_SUFFIX) -endif -anomaly-detection-build-arm64: -ifeq (1, ${BUILD_ARM64}) - docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.anomaly-detection-app . -t $(PREFIX)/anomaly-detection-app:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) --build-arg PLATFORM=$(ARM64V8_SUFFIX) -endif - -streaming-build: streaming-build-amd64 streaming-build-arm32 streaming-build-arm64 -streaming-build-amd64: -ifeq (1, ${BUILD_AMD64}) - docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.video-streaming-app . -t $(PREFIX)/video-streaming-app:$(LABEL_PREFIX)-$(AMD64_SUFFIX) --build-arg PLATFORM=$(AMD64_SUFFIX) -endif -streaming-build-arm32: -ifeq (1, ${BUILD_ARM32}) - docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.video-streaming-app . -t $(PREFIX)/video-streaming-app:$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) --build-arg PLATFORM=$(ARM32V7_SUFFIX) -endif -streaming-build-arm64: -ifeq (1, ${BUILD_ARM64}) - docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.video-streaming-app . -t $(PREFIX)/video-streaming-app:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) --build-arg PLATFORM=$(ARM64V8_SUFFIX) -endif - -akri-docker-push-per-arch: controller-docker-per-arch agent-docker-per-arch udev-docker-per-arch onvif-docker-per-arch streaming-docker-per-arch opcua-monitoring-docker-per-arch anomaly-detection-docker-per-arch - -controller-docker-per-arch: controller-docker-per-arch-amd64 controller-docker-per-arch-arm32 controller-docker-per-arch-arm64 -controller-docker-per-arch-amd64: -ifeq (1, ${BUILD_AMD64}) - docker push $(PREFIX)/controller:$(LABEL_PREFIX)-$(AMD64_SUFFIX) -endif -controller-docker-per-arch-arm32: -ifeq (1, ${BUILD_ARM32}) - docker push $(PREFIX)/controller:$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) -endif -controller-docker-per-arch-arm64: -ifeq (1, ${BUILD_ARM64}) - docker push $(PREFIX)/controller:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) -endif - -agent-docker-per-arch: agent-docker-per-arch-amd64 agent-docker-per-arch-arm32 agent-docker-per-arch-arm64 -agent-docker-per-arch-amd64: -ifeq (1, ${BUILD_AMD64}) - docker push $(PREFIX)/agent:$(LABEL_PREFIX)-$(AMD64_SUFFIX) -endif -agent-docker-per-arch-arm32: -ifeq (1, ${BUILD_ARM32}) - docker push $(PREFIX)/agent:$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) + CARGO_INCREMENTAL=$(CARGO_INCREMENTAL) PKG_CONFIG_ALLOW_CROSS=1 cross build $(if $(BUILD_RELEASE_FLAG), --release) --target=$(AMD64_TARGET) --workspace --exclude agent $(foreach package,$(wordlist 1, 100, $(PACKAGES_TO_EXCLUDE)),--exclude $(package)) +ifneq ($(AGENT_FEATURES),) + $(call agent_build_with_features,$(AMD64_TARGET)) endif -agent-docker-per-arch-arm64: -ifeq (1, ${BUILD_ARM64}) - docker push $(PREFIX)/agent:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) -endif - -onvif-docker-per-arch: onvif-docker-per-arch-amd64 onvif-docker-per-arch-arm32 onvif-docker-per-arch-arm64 -onvif-docker-per-arch-amd64: -ifeq (1, ${BUILD_AMD64}) - docker push $(PREFIX)/onvif-video-broker:$(LABEL_PREFIX)-$(AMD64_SUFFIX) -endif -onvif-docker-per-arch-arm32: -ifeq (1, ${BUILD_ARM32}) - docker push $(PREFIX)/onvif-video-broker:$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) -endif -onvif-docker-per-arch-arm64: -ifeq (1, ${BUILD_ARM64}) - docker push $(PREFIX)/onvif-video-broker:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) -endif - -opcua-monitoring-docker-per-arch: opcua-monitoring-docker-per-arch-amd64 opcua-monitoring-docker-per-arch-arm32 opcua-monitoring-docker-per-arch-arm64 -opcua-monitoring-docker-per-arch-amd64: -ifeq (1, ${BUILD_AMD64}) - docker push $(PREFIX)/opcua-monitoring-broker:$(LABEL_PREFIX)-$(AMD64_SUFFIX) -endif -opcua-monitoring-docker-per-arch-arm32: -ifeq (1, ${BUILD_ARM32}) - docker push $(PREFIX)/opcua-monitoring-broker:$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) -endif -opcua-monitoring-docker-per-arch-arm64: -ifeq (1, ${BUILD_ARM64}) - docker push $(PREFIX)/opcua-monitoring-broker:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) -endif - -udev-docker-per-arch: udev-docker-per-arch-amd64 udev-docker-per-arch-arm32 udev-docker-per-arch-arm64 -udev-docker-per-arch-amd64: -ifeq (1, ${BUILD_AMD64}) - docker push $(PREFIX)/udev-video-broker:$(LABEL_PREFIX)-$(AMD64_SUFFIX) -endif -udev-docker-per-arch-arm32: -ifeq (1, ${BUILD_ARM32}) - docker push $(PREFIX)/udev-video-broker:$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) -endif -udev-docker-per-arch-arm64: -ifeq (1, ${BUILD_ARM64}) - docker push $(PREFIX)/udev-video-broker:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) -endif - -anomaly-detection-docker-per-arch: anomaly-detection-docker-per-arch-amd64 anomaly-detection-docker-per-arch-arm32 anomaly-detection-docker-per-arch-arm64 -anomaly-detection-docker-per-arch-amd64: -ifeq (1, ${BUILD_AMD64}) - docker push $(PREFIX)/anomaly-detection-app:$(LABEL_PREFIX)-$(AMD64_SUFFIX) -endif -anomaly-detection-docker-per-arch-arm32: -ifeq (1, ${BUILD_ARM32}) - docker push $(PREFIX)/anomaly-detection-app:$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) -endif -anomaly-detection-docker-per-arch-arm64: -ifeq (1, ${BUILD_ARM64}) - docker push $(PREFIX)/anomaly-detection-app:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) -endif - -streaming-docker-per-arch: streaming-docker-per-arch-amd64 streaming-docker-per-arch-arm32 streaming-docker-per-arch-arm64 -streaming-docker-per-arch-amd64: -ifeq (1, ${BUILD_AMD64}) - docker push $(PREFIX)/video-streaming-app:$(LABEL_PREFIX)-$(AMD64_SUFFIX) -endif -streaming-docker-per-arch-arm32: -ifeq (1, ${BUILD_ARM32}) - docker push $(PREFIX)/video-streaming-app:$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) -endif -streaming-docker-per-arch-arm64: -ifeq (1, ${BUILD_ARM64}) - docker push $(PREFIX)/video-streaming-app:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) -endif - -akri-docker-push-multi-arch-create: controller-docker-multi-arch-create agent-docker-multi-arch-create udev-docker-multi-arch-create onvif-docker-multi-arch-create streaming-docker-multi-arch-create opcua-monitoring-docker-multi-arch-create anomaly-detection-docker-multi-arch-create - -controller-docker-multi-arch-create: -ifeq (1, ${BUILD_AMD64}) - $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/controller:$(LABEL_PREFIX) $(PREFIX)/controller:$(LABEL_PREFIX)-$(AMD64_SUFFIX) -endif -ifeq (1, ${BUILD_ARM32}) - $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/controller:$(LABEL_PREFIX) $(PREFIX)/controller:$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) -endif -ifeq (1, ${BUILD_ARM64}) - $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/controller:$(LABEL_PREFIX) $(PREFIX)/controller:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) -endif - -agent-docker-multi-arch-create: -ifeq (1, ${BUILD_AMD64}) - $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/agent:$(LABEL_PREFIX) $(PREFIX)/agent:$(LABEL_PREFIX)-$(AMD64_SUFFIX) +ifeq (1, $(BUILD_SLIM_AGENT)) + $(call agent_build_slim,$(AMD64_TARGET)) endif -ifeq (1, ${BUILD_ARM32}) - $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/agent:$(LABEL_PREFIX) $(PREFIX)/agent:$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) endif -ifeq (1, ${BUILD_ARM64}) - $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/agent:$(LABEL_PREFIX) $(PREFIX)/agent:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) -endif - -udev-docker-multi-arch-create: -ifeq (1, ${BUILD_AMD64}) - $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/udev-video-broker:$(LABEL_PREFIX) $(PREFIX)/udev-video-broker:$(LABEL_PREFIX)-$(AMD64_SUFFIX) -endif -ifeq (1, ${BUILD_ARM32}) - $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/udev-video-broker:$(LABEL_PREFIX) $(PREFIX)/udev-video-broker:$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) +akri-cross-build-arm32: +ifeq (1, $(BUILD_ARM32)) + CARGO_INCREMENTAL=$(CARGO_INCREMENTAL) PKG_CONFIG_ALLOW_CROSS=1 cross build $(if $(BUILD_RELEASE_FLAG), --release) --target=$(ARM32V7_TARGET) --workspace --exclude agent $(foreach package,$(wordlist 1, 100, $(PACKAGES_TO_EXCLUDE)),--exclude $(package)) +ifneq ($(AGENT_FEATURES),) + $(call agent_build_with_features,$(ARM32V7_TARGET)) endif -ifeq (1, ${BUILD_ARM64}) - $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/udev-video-broker:$(LABEL_PREFIX) $(PREFIX)/udev-video-broker:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) -endif - -onvif-docker-multi-arch-create: -ifeq (1, ${BUILD_AMD64}) - $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/onvif-video-broker:$(LABEL_PREFIX) $(PREFIX)/onvif-video-broker:$(LABEL_PREFIX)-$(AMD64_SUFFIX) +ifeq (1, $(BUILD_SLIM_AGENT)) + $(call agent_build_slim,$(ARM32V7_TARGET)) endif -ifeq (1, ${BUILD_ARM32}) - $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/onvif-video-broker:$(LABEL_PREFIX) $(PREFIX)/onvif-video-broker:$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) endif +akri-cross-build-arm64: ifeq (1, ${BUILD_ARM64}) - $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/onvif-video-broker:$(LABEL_PREFIX) $(PREFIX)/onvif-video-broker:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) -endif - -opcua-monitoring-docker-multi-arch-create: -ifeq (1, ${BUILD_AMD64}) - $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/opcua-monitoring-broker:$(LABEL_PREFIX) $(PREFIX)/opcua-monitoring-broker:$(LABEL_PREFIX)-$(AMD64_SUFFIX) + CARGO_INCREMENTAL=$(CARGO_INCREMENTAL) PKG_CONFIG_ALLOW_CROSS=1 cross build $(if $(BUILD_RELEASE_FLAG), --release) --target=$(ARM64V8_TARGET) --workspace --exclude agent $(foreach package,$(wordlist 1, 100, $(PACKAGES_TO_EXCLUDE)),--exclude $(package)) +ifneq ($(AGENT_FEATURES),) + $(call agent_build_with_features,$(ARM64V8_TARGET)) endif -ifeq (1, ${BUILD_ARM32}) - $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/opcua-monitoring-broker:$(LABEL_PREFIX) $(PREFIX)/opcua-monitoring-broker:$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) +ifeq (1, $(BUILD_SLIM_AGENT)) + $(call agent_build_slim,$(ARM64V8_TARGET)) endif -ifeq (1, ${BUILD_ARM64}) - $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/opcua-monitoring-broker:$(LABEL_PREFIX) $(PREFIX)/opcua-monitoring-broker:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) endif -anomaly-detection-docker-multi-arch-create: -ifeq (1, ${BUILD_AMD64}) - $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/anomaly-detection-app:$(LABEL_PREFIX) $(PREFIX)/anomaly-detection-app:$(LABEL_PREFIX)-$(AMD64_SUFFIX) -endif -ifeq (1, ${BUILD_ARM32}) - $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/anomaly-detection-app:$(LABEL_PREFIX) $(PREFIX)/anomaly-detection-app:$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) -endif -ifeq (1, ${BUILD_ARM64}) - $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/anomaly-detection-app:$(LABEL_PREFIX) $(PREFIX)/anomaly-detection-app:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) -endif - -streaming-docker-multi-arch-create: -ifeq (1, ${BUILD_AMD64}) - $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/video-streaming-app:$(LABEL_PREFIX) $(PREFIX)/video-streaming-app:$(LABEL_PREFIX)-$(AMD64_SUFFIX) -endif -ifeq (1, ${BUILD_ARM32}) - $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/video-streaming-app:$(LABEL_PREFIX) $(PREFIX)/video-streaming-app:$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) -endif -ifeq (1, ${BUILD_ARM64}) - $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/video-streaming-app:$(LABEL_PREFIX) $(PREFIX)/video-streaming-app:$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) -endif +# Rust targets +$(eval $(call add_rust_targets,controller,controller)) +$(eval $(call add_rust_targets,agent,agent)) +$(eval $(call add_rust_targets,agent-full,agent-full)) +$(eval $(call add_rust_targets,udev,udev-video-broker)) +$(eval $(call add_rust_targets,webhook-configuration,webhook-configuration)) +$(eval $(call add_rust_targets,debug-echo-discovery,debug-echo-discovery)) +$(eval $(call add_rust_targets,http-discovery,http-discovery)) +$(eval $(call add_rust_targets,onvif-discovery,onvif-discovery)) +$(eval $(call add_rust_targets,opcua-discovery,opcua-discovery)) +$(eval $(call add_rust_targets,udev-discovery,udev-discovery)) -akri-docker-push-multi-arch-push: controller-docker-multi-arch-push agent-docker-multi-arch-push udev-docker-multi-arch-push onvif-docker-multi-arch-push streaming-docker-multi-arch-push opcua-monitoring-docker-multi-arch-push anomaly-detection-docker-multi-arch-push +# .NET targets +$(eval $(call add_onvif_target,onvif,onvif-video-broker)) +$(eval $(call add_opcua_target,opcua-monitoring,opcua-monitoring-broker)) -controller-docker-multi-arch-push: - $(ENABLE_DOCKER_MANIFEST) docker manifest push $(PREFIX)/controller:$(LABEL_PREFIX) -agent-docker-multi-arch-push: - $(ENABLE_DOCKER_MANIFEST) docker manifest push $(PREFIX)/agent:$(LABEL_PREFIX) -udev-docker-multi-arch-push: - $(ENABLE_DOCKER_MANIFEST) docker manifest push $(PREFIX)/udev-video-broker:$(LABEL_PREFIX) -onvif-docker-multi-arch-push: - $(ENABLE_DOCKER_MANIFEST) docker manifest push $(PREFIX)/onvif-video-broker:$(LABEL_PREFIX) -opcua-monitoring-docker-multi-arch-push: - $(ENABLE_DOCKER_MANIFEST) docker manifest push $(PREFIX)/opcua-monitoring-broker:$(LABEL_PREFIX) -anomaly-detection-docker-multi-arch-push: - $(ENABLE_DOCKER_MANIFEST) docker manifest push $(PREFIX)/anomaly-detection-app:$(LABEL_PREFIX) -streaming-docker-multi-arch-push: - $(ENABLE_DOCKER_MANIFEST) docker manifest push $(PREFIX)/video-streaming-app:$(LABEL_PREFIX) +# Python targets +$(eval $(call add_python_target,anomaly-detection,anomaly-detection-app)) +$(eval $(call add_python_target,streaming,video-streaming-app)) diff --git a/build/akri-dotnet-containers.mk b/build/akri-dotnet-containers.mk new file mode 100644 index 000000000..fe5d3cd02 --- /dev/null +++ b/build/akri-dotnet-containers.mk @@ -0,0 +1,96 @@ +define add_onvif_target + akri-$(1): akri-build akri-docker-$(1) + akri-docker-$(1): $(1)-build $(1)-docker-per-arch $(1)-docker-multi-arch-create $(1)-docker-multi-arch-push + $(1)-build: $(1)-build-amd64 $(1)-build-arm32 $(1)-build-arm64 + $(1)-docker-per-arch: $(1)-docker-per-arch-amd64 $(1)-docker-per-arch-arm32 $(1)-docker-per-arch-arm64 + + $(1)-build-amd64: + ifeq (1, ${BUILD_AMD64}) + docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.$(2) . -t $(PREFIX)/$(2):$(LABEL_PREFIX)-$(AMD64_SUFFIX) --build-arg OUTPUT_PLATFORM_TAG=$(USE_OPENCV_BASE_VERSION)-$(AMD64_SUFFIX) --build-arg DOTNET_PUBLISH_RUNTIME=linux-x64 + endif + $(1)-build-arm32: + ifeq (1, ${BUILD_ARM32}) + docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.$(2) . -t $(PREFIX)/$(2):$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) --build-arg OUTPUT_PLATFORM_TAG=$(USE_OPENCV_BASE_VERSION)-$(ARM32V7_SUFFIX) --build-arg DOTNET_PUBLISH_RUNTIME=linux-arm + endif + $(1)-build-arm64: + ifeq (1, ${BUILD_ARM64}) + docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.$(2) . -t $(PREFIX)/$(2):$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) --build-arg OUTPUT_PLATFORM_TAG=$(USE_OPENCV_BASE_VERSION)-$(ARM64V8_SUFFIX) --build-arg DOTNET_PUBLISH_RUNTIME=linux-arm64 + endif + + $(1)-docker-per-arch-amd64: + ifeq (1, ${BUILD_AMD64}) + docker push $(PREFIX)/$(2):$(LABEL_PREFIX)-$(AMD64_SUFFIX) + endif + $(1)-docker-per-arch-arm32: + ifeq (1, ${BUILD_ARM32}) + docker push $(PREFIX)/$(2):$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) + endif + $(1)-docker-per-arch-arm64: + ifeq (1, ${BUILD_ARM64}) + docker push $(PREFIX)/$(2):$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) + endif + + $(1)-docker-multi-arch-create: + ifeq (1, ${BUILD_AMD64}) + $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/$(2):$(LABEL_PREFIX) $(PREFIX)/$(2):$(LABEL_PREFIX)-$(AMD64_SUFFIX) + endif + ifeq (1, ${BUILD_ARM32}) + $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/$(2):$(LABEL_PREFIX) $(PREFIX)/$(2):$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) + endif + ifeq (1, ${BUILD_ARM64}) + $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/$(2):$(LABEL_PREFIX) $(PREFIX)/$(2):$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) + endif + + $(1)-docker-multi-arch-push: + $(ENABLE_DOCKER_MANIFEST) docker manifest push $(PREFIX)/$(2):$(LABEL_PREFIX) + +endef + +define add_opcua_target + akri-$(1): akri-build akri-docker-$(1) + akri-docker-$(1): $(1)-build $(1)-docker-per-arch $(1)-docker-multi-arch-create $(1)-docker-multi-arch-push + $(1)-build: $(1)-build-amd64 $(1)-build-arm32 $(1)-build-arm64 + $(1)-docker-per-arch: $(1)-docker-per-arch-amd64 $(1)-docker-per-arch-arm32 $(1)-docker-per-arch-arm64 + + $(1)-build-amd64: + ifeq (1, ${BUILD_AMD64}) + docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.$(2) . -t $(PREFIX)/$(2):$(LABEL_PREFIX)-$(AMD64_SUFFIX) --build-arg OUTPUT_PLATFORM_TAG=3.1-buster-slim --build-arg DOTNET_PUBLISH_RUNTIME=linux-x64 + endif + $(1)-build-arm32: + ifeq (1, ${BUILD_ARM32}) + docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.$(2) . -t $(PREFIX)/$(2):$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) --build-arg OUTPUT_PLATFORM_TAG=3.1-buster-slim-$(ARM32V7_SUFFIX) --build-arg DOTNET_PUBLISH_RUNTIME=linux-arm + endif + $(1)-build-arm64: + ifeq (1, ${BUILD_ARM64}) + docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.$(2) . -t $(PREFIX)/$(2):$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) --build-arg OUTPUT_PLATFORM_TAG=3.1-buster-slim-$(ARM64V8_SUFFIX) --build-arg DOTNET_PUBLISH_RUNTIME=linux-arm64 + endif + + $(1)-docker-per-arch-amd64: + ifeq (1, ${BUILD_AMD64}) + docker push $(PREFIX)/$(2):$(LABEL_PREFIX)-$(AMD64_SUFFIX) + endif + $(1)-docker-per-arch-arm32: + ifeq (1, ${BUILD_ARM32}) + docker push $(PREFIX)/$(2):$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) + endif + $(1)-docker-per-arch-arm64: + ifeq (1, ${BUILD_ARM64}) + docker push $(PREFIX)/$(2):$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) + endif + + $(1)-docker-multi-arch-create: + ifeq (1, ${BUILD_AMD64}) + $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/$(2):$(LABEL_PREFIX) $(PREFIX)/$(2):$(LABEL_PREFIX)-$(AMD64_SUFFIX) + endif + ifeq (1, ${BUILD_ARM32}) + $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/$(2):$(LABEL_PREFIX) $(PREFIX)/$(2):$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) + endif + ifeq (1, ${BUILD_ARM64}) + $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/$(2):$(LABEL_PREFIX) $(PREFIX)/$(2):$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) + endif + + $(1)-docker-multi-arch-push: + $(ENABLE_DOCKER_MANIFEST) docker manifest push $(PREFIX)/$(2):$(LABEL_PREFIX) + +endef + diff --git a/build/akri-python-containers.mk b/build/akri-python-containers.mk new file mode 100644 index 000000000..17d75d1e9 --- /dev/null +++ b/build/akri-python-containers.mk @@ -0,0 +1,48 @@ + +define add_python_target + akri-$(1): akri-build akri-docker-$(1) + akri-docker-$(1): $(1)-build $(1)-docker-per-arch $(1)-docker-multi-arch-create $(1)-docker-multi-arch-push + $(1)-build: $(1)-build-amd64 $(1)-build-arm32 $(1)-build-arm64 + $(1)-docker-per-arch: $(1)-docker-per-arch-amd64 $(1)-docker-per-arch-arm32 $(1)-docker-per-arch-arm64 + + $(1)-build-amd64: + ifeq (1, ${BUILD_AMD64}) + docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.$(2) . -t $(PREFIX)/$(2):$(LABEL_PREFIX)-$(AMD64_SUFFIX) --build-arg PLATFORM=$(AMD64_SUFFIX) + endif + $(1)-build-arm32: + ifeq (1, ${BUILD_ARM32}) + docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.$(2) . -t $(PREFIX)/$(2):$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) --build-arg PLATFORM=$(ARM32V7_SUFFIX) + endif + $(1)-build-arm64: + ifeq (1, ${BUILD_ARM64}) + docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.$(2) . -t $(PREFIX)/$(2):$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) --build-arg PLATFORM=$(ARM64V8_SUFFIX) + endif + + $(1)-docker-per-arch-amd64: + ifeq (1, ${BUILD_AMD64}) + docker push $(PREFIX)/$(2):$(LABEL_PREFIX)-$(AMD64_SUFFIX) + endif + $(1)-docker-per-arch-arm32: + ifeq (1, ${BUILD_ARM32}) + docker push $(PREFIX)/$(2):$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) + endif + $(1)-docker-per-arch-arm64: + ifeq (1, ${BUILD_ARM64}) + docker push $(PREFIX)/$(2):$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) + endif + + $(1)-docker-multi-arch-create: + ifeq (1, ${BUILD_AMD64}) + $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/$(2):$(LABEL_PREFIX) $(PREFIX)/$(2):$(LABEL_PREFIX)-$(AMD64_SUFFIX) + endif + ifeq (1, ${BUILD_ARM32}) + $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/$(2):$(LABEL_PREFIX) $(PREFIX)/$(2):$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) + endif + ifeq (1, ${BUILD_ARM64}) + $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/$(2):$(LABEL_PREFIX) $(PREFIX)/$(2):$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) + endif + + $(1)-docker-multi-arch-push: + $(ENABLE_DOCKER_MANIFEST) docker manifest push $(PREFIX)/$(2):$(LABEL_PREFIX) + +endef diff --git a/build/akri-rust-containers.mk b/build/akri-rust-containers.mk new file mode 100644 index 000000000..ef6b8bc37 --- /dev/null +++ b/build/akri-rust-containers.mk @@ -0,0 +1,49 @@ + +# Create set of targets for rust builds +define add_rust_targets + akri-$(1): akri-build akri-docker-$(1) + akri-docker-$(1): $(1)-build $(1)-docker-per-arch $(1)-docker-multi-arch-create $(1)-docker-multi-arch-push + $(1)-build: $(1)-build-amd64 $(1)-build-arm32 $(1)-build-arm64 + $(1)-docker-per-arch: $(1)-docker-per-arch-amd64 $(1)-docker-per-arch-arm32 $(1)-docker-per-arch-arm64 + + $(1)-build-amd64: + ifeq (1, ${BUILD_AMD64}) + docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.$(2) . -t $(PREFIX)/$(2):$(LABEL_PREFIX)-$(AMD64_SUFFIX) --build-arg PLATFORM=$(AMD64_SUFFIX) --build-arg CROSS_BUILD_TARGET=$(AMD64_TARGET) --build-arg BUILD_TYPE=$(if $(BUILD_RELEASE_FLAG),release,debug) + endif + $(1)-build-arm32: + ifeq (1, ${BUILD_ARM32}) + docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.$(2) . -t $(PREFIX)/$(2):$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) --build-arg PLATFORM=$(ARM32V7_SUFFIX) --build-arg CROSS_BUILD_TARGET=$(ARM32V7_TARGET) --build-arg BUILD_TYPE=$(if $(BUILD_RELEASE_FLAG),release,debug) + endif + $(1)-build-arm64: + ifeq (1, ${BUILD_ARM64}) + docker build $(CACHE_OPTION) -f $(DOCKERFILE_DIR)/Dockerfile.$(2) . -t $(PREFIX)/$(2):$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) --build-arg PLATFORM=$(ARM64V8_SUFFIX) --build-arg CROSS_BUILD_TARGET=$(ARM64V8_TARGET) --build-arg BUILD_TYPE=$(if $(BUILD_RELEASE_FLAG),release,debug) + endif + + $(1)-docker-per-arch-amd64: + ifeq (1, ${BUILD_AMD64}) + docker push $(PREFIX)/$(2):$(LABEL_PREFIX)-$(AMD64_SUFFIX) + endif + $(1)-docker-per-arch-arm32: + ifeq (1, ${BUILD_ARM32}) + docker push $(PREFIX)/$(2):$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) + endif + $(1)-docker-per-arch-arm64: + ifeq (1, ${BUILD_ARM64}) + docker push $(PREFIX)/$(2):$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) + endif + + $(1)-docker-multi-arch-create: + ifeq (1, ${BUILD_AMD64}) + $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/$(2):$(LABEL_PREFIX) $(PREFIX)/$(2):$(LABEL_PREFIX)-$(AMD64_SUFFIX) + endif + ifeq (1, ${BUILD_ARM32}) + $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/$(2):$(LABEL_PREFIX) $(PREFIX)/$(2):$(LABEL_PREFIX)-$(ARM32V7_SUFFIX) + endif + ifeq (1, ${BUILD_ARM64}) + $(ENABLE_DOCKER_MANIFEST) docker manifest create --amend $(PREFIX)/$(2):$(LABEL_PREFIX) $(PREFIX)/$(2):$(LABEL_PREFIX)-$(ARM64V8_SUFFIX) + endif + + $(1)-docker-multi-arch-push: + $(ENABLE_DOCKER_MANIFEST) docker manifest push $(PREFIX)/$(2):$(LABEL_PREFIX) + +endef diff --git a/build/containers/Dockerfile.agent b/build/containers/Dockerfile.agent index 54fdccaff..25bc9f6a0 100644 --- a/build/containers/Dockerfile.agent +++ b/build/containers/Dockerfile.agent @@ -2,8 +2,9 @@ ARG PLATFORM=amd64 ARG CROSS_BUILD_TARGET=x86_64-unknown-linux-gnu FROM ${PLATFORM}/debian:buster-slim ARG CROSS_BUILD_TARGET +ARG BUILD_TYPE=release RUN echo "Creating container based on ${PLATFORM}/debian:buster-slim" -RUN echo "Using Rust binaries from ${CROSS_BUILD_TARGET}" +RUN echo "Using Rust binaries from ${CROSS_BUILD_TARGET}/${BUILD_TYPE}" # Link the container to the Akri repository LABEL org.opencontainers.image.source https://github.com/deislabs/akri @@ -12,6 +13,6 @@ LABEL org.opencontainers.image.source https://github.com/deislabs/akri COPY ./build/container-images-legal-notice.md . RUN apt-get update && apt-get install -y --no-install-recommends libssl-dev openssl && apt-get clean -COPY ./target/${CROSS_BUILD_TARGET}/release/agent /agent +COPY ./target/${CROSS_BUILD_TARGET}/${BUILD_TYPE}/agent /agent ENV RUST_LOG agent,akri_shared CMD ["./agent"] diff --git a/build/containers/Dockerfile.agent-full b/build/containers/Dockerfile.agent-full new file mode 100644 index 000000000..9cd3c7da7 --- /dev/null +++ b/build/containers/Dockerfile.agent-full @@ -0,0 +1,18 @@ +ARG PLATFORM=amd64 +ARG CROSS_BUILD_TARGET=x86_64-unknown-linux-gnu +FROM ${PLATFORM}/debian:buster-slim +ARG CROSS_BUILD_TARGET +ARG BUILD_TYPE=release +RUN echo "Creating container based on ${PLATFORM}/debian:buster-slim" +RUN echo "Using Rust binaries from ${CROSS_BUILD_TARGET}/${BUILD_TYPE}" + +# Link the container to the Akri repository +LABEL org.opencontainers.image.source https://github.com/deislabs/akri + +# Copy over container legal notice +COPY ./build/container-images-legal-notice.md . + +RUN apt-get update && apt-get install -y --no-install-recommends libssl-dev openssl && apt-get clean +COPY ./target/${CROSS_BUILD_TARGET}/${BUILD_TYPE}/agent-full /agent +ENV RUST_LOG agent,akri_shared,akri_debug_echo +CMD ["./agent"] diff --git a/build/containers/Dockerfile.controller b/build/containers/Dockerfile.controller index fd0ea05f0..c13d2bbc4 100644 --- a/build/containers/Dockerfile.controller +++ b/build/containers/Dockerfile.controller @@ -2,8 +2,9 @@ ARG PLATFORM=amd64 ARG CROSS_BUILD_TARGET=x86_64-unknown-linux-gnu FROM ${PLATFORM}/debian:buster-slim ARG CROSS_BUILD_TARGET +ARG BUILD_TYPE=release RUN echo "Creating container based on ${PLATFORM}/debian:buster-slim" -RUN echo "Using Rust binaries from ${CROSS_BUILD_TARGET}" +RUN echo "Using Rust binaries from ${CROSS_BUILD_TARGET}/${BUILD_TYPE}" # Link the container to the Akri repository LABEL org.opencontainers.image.source https://github.com/deislabs/akri @@ -12,6 +13,6 @@ LABEL org.opencontainers.image.source https://github.com/deislabs/akri COPY ./build/container-images-legal-notice.md . RUN apt-get update && apt-get install -y --no-install-recommends libssl-dev openssl && apt-get clean -COPY ./target/${CROSS_BUILD_TARGET}/release/controller /controller +COPY ./target/${CROSS_BUILD_TARGET}/${BUILD_TYPE}/controller /controller ENV RUST_LOG controller,akri_shared CMD ["./controller"] diff --git a/build/containers/Dockerfile.debug-echo-discovery b/build/containers/Dockerfile.debug-echo-discovery new file mode 100644 index 000000000..5bd5fab3f --- /dev/null +++ b/build/containers/Dockerfile.debug-echo-discovery @@ -0,0 +1,18 @@ +ARG PLATFORM=amd64 +ARG CROSS_BUILD_TARGET=x86_64-unknown-linux-gnu +FROM ${PLATFORM}/debian:buster-slim +ARG CROSS_BUILD_TARGET +ARG BUILD_TYPE=release +RUN echo "Creating container based on ${PLATFORM}/debian:buster-slim" +RUN echo "Using Rust binaries from ${CROSS_BUILD_TARGET}/${BUILD_TYPE}" + +# Link the container to the Akri repository +LABEL org.opencontainers.image.source https://github.com/deislabs/akri + +# Copy over container legal notice +COPY ./build/container-images-legal-notice.md . + +RUN apt-get update && apt-get install -y --no-install-recommends libssl-dev openssl && apt-get clean +COPY ./target/${CROSS_BUILD_TARGET}/${BUILD_TYPE}/debug-echo-discovery-handler /debug-echo-discovery-handler +ENV RUST_LOG debug_echo_discovery_handler,akri_shared,akri_discovery_utils,akri_debug_echo +CMD ["./debug-echo-discovery-handler"] diff --git a/build/containers/Dockerfile.http-discovery b/build/containers/Dockerfile.http-discovery new file mode 100644 index 000000000..912adb3e7 --- /dev/null +++ b/build/containers/Dockerfile.http-discovery @@ -0,0 +1,18 @@ +ARG PLATFORM=amd64 +ARG CROSS_BUILD_TARGET=x86_64-unknown-linux-gnu +FROM ${PLATFORM}/debian:buster-slim +ARG CROSS_BUILD_TARGET +ARG BUILD_TYPE=release +RUN echo "Creating container based on ${PLATFORM}/debian:buster-slim" +RUN echo "Using Rust binaries from ${CROSS_BUILD_TARGET}/${BUILD_TYPE}" + +# Link the container to the Akri repository +LABEL org.opencontainers.image.source https://github.com/deislabs/akri + +# Copy over container legal notice +COPY ./build/container-images-legal-notice.md . + +RUN apt-get update && apt-get install -y --no-install-recommends libssl-dev openssl && apt-get clean +COPY ./target/${CROSS_BUILD_TARGET}/${BUILD_TYPE}/http-discovery-handler /http-discovery-handler +ENV RUST_LOG http_discovery_handler,akri_shared,akri_discovery_utils +CMD ["./http-discovery-handler"] diff --git a/build/containers/Dockerfile.onvif-discovery b/build/containers/Dockerfile.onvif-discovery new file mode 100644 index 000000000..bd8791bfa --- /dev/null +++ b/build/containers/Dockerfile.onvif-discovery @@ -0,0 +1,18 @@ +ARG PLATFORM=amd64 +ARG CROSS_BUILD_TARGET=x86_64-unknown-linux-gnu +FROM ${PLATFORM}/debian:buster-slim +ARG CROSS_BUILD_TARGET +ARG BUILD_TYPE=release +RUN echo "Creating container based on ${PLATFORM}/debian:buster-slim" +RUN echo "Using Rust binaries from ${CROSS_BUILD_TARGET}/${BUILD_TYPE}" + +# Link the container to the Akri repository +LABEL org.opencontainers.image.source https://github.com/deislabs/akri + +# Copy over container legal notice +COPY ./build/container-images-legal-notice.md . + +RUN apt-get update && apt-get install -y --no-install-recommends libssl-dev openssl && apt-get clean +COPY ./target/${CROSS_BUILD_TARGET}/${BUILD_TYPE}/onvif-discovery-handler /onvif-discovery-handler +ENV RUST_LOG onvif_discovery_handler,akri_shared,akri_discovery_utils,akri_onvif +CMD ["./onvif-discovery-handler"] diff --git a/build/containers/Dockerfile.opcua-discovery b/build/containers/Dockerfile.opcua-discovery new file mode 100644 index 000000000..11880afee --- /dev/null +++ b/build/containers/Dockerfile.opcua-discovery @@ -0,0 +1,18 @@ +ARG PLATFORM=amd64 +ARG CROSS_BUILD_TARGET=x86_64-unknown-linux-gnu +FROM ${PLATFORM}/debian:buster-slim +ARG CROSS_BUILD_TARGET +ARG BUILD_TYPE=release +RUN echo "Creating container based on ${PLATFORM}/debian:buster-slim" +RUN echo "Using Rust binaries from ${CROSS_BUILD_TARGET}/${BUILD_TYPE}" + +# Link the container to the Akri repository +LABEL org.opencontainers.image.source https://github.com/deislabs/akri + +# Copy over container legal notice +COPY ./build/container-images-legal-notice.md . + +RUN apt-get update && apt-get install -y --no-install-recommends libssl-dev openssl && apt-get clean +COPY ./target/${CROSS_BUILD_TARGET}/${BUILD_TYPE}/opcua-discovery-handler /opcua-discovery-handler +ENV RUST_LOG opcua_discovery_handler,akri_shared,akri_discovery_utils,akri_opcua +CMD ["./opcua-discovery-handler"] diff --git a/build/containers/Dockerfile.udev-discovery b/build/containers/Dockerfile.udev-discovery new file mode 100644 index 000000000..08c7fa599 --- /dev/null +++ b/build/containers/Dockerfile.udev-discovery @@ -0,0 +1,18 @@ +ARG PLATFORM=amd64 +ARG CROSS_BUILD_TARGET=x86_64-unknown-linux-gnu +FROM ${PLATFORM}/debian:buster-slim +ARG CROSS_BUILD_TARGET +ARG BUILD_TYPE=release +RUN echo "Creating container based on ${PLATFORM}/debian:buster-slim" +RUN echo "Using Rust binaries from ${CROSS_BUILD_TARGET}/${BUILD_TYPE}" + +# Link the container to the Akri repository +LABEL org.opencontainers.image.source https://github.com/deislabs/akri + +# Copy over container legal notice +COPY ./build/container-images-legal-notice.md . + +RUN apt-get update && apt-get install -y --no-install-recommends libssl-dev openssl && apt-get clean +COPY ./target/${CROSS_BUILD_TARGET}/${BUILD_TYPE}/udev-discovery-handler /udev-discovery-handler +ENV RUST_LOG udev-discovery-handler,akri_shared,akri_discovery_utils,akri_udev +CMD ["./udev-discovery-handler"] diff --git a/build/containers/Dockerfile.webhook-configuration b/build/containers/Dockerfile.webhook-configuration new file mode 100644 index 000000000..3d4751fa0 --- /dev/null +++ b/build/containers/Dockerfile.webhook-configuration @@ -0,0 +1,24 @@ +ARG PLATFORM=amd64 +ARG CROSS_BUILD_TARGET=x86_64-unknown-linux-gnu + +FROM ${PLATFORM}/debian:buster-slim + +ARG CROSS_BUILD_TARGET +ARG BUILD_TYPE=release +RUN echo "Creating container based on ${PLATFORM}/debian:buster-slim" +RUN echo "Using Rust binaries from ${CROSS_BUILD_TARGET}/${BUILD_TYPE}" + +# Link the container to the Akri repository +LABEL org.opencontainers.image.source https://github.com/deislabs/akri + +# Copy over container legal notice +COPY ./build/container-images-legal-notice.md . + +RUN apt-get update && apt-get install -y --no-install-recommends libssl-dev openssl && apt-get clean + +COPY ./target/${CROSS_BUILD_TARGET}/${BUILD_TYPE}/webhook-configuration /server + +ENV RUST_LOG agent,akri_shared + +CMD ["/server"] +# CMD ["/server", "--tls-crt-file=/path/to/crt", "--tls-key-file=/path/to/key", "--port=8443"] diff --git a/build/setup.sh b/build/setup.sh index 4fa712d95..662633deb 100755 --- a/build/setup.sh +++ b/build/setup.sh @@ -23,10 +23,10 @@ then if [ -x "$(command -v sudo)" ]; then echo "Install rustup" - sudo curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain=1.41.0 + sudo curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain=1.51.0 else echo "Install rustup" - curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain=1.41.0 + curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain=1.51.0 fi else echo "Found rustup" diff --git a/controller/Cargo.toml b/controller/Cargo.toml index abfab148f..4ba75a8c6 100644 --- a/controller/Cargo.toml +++ b/controller/Cargo.toml @@ -1,25 +1,29 @@ [package] name = "controller" -version = "0.1.5" +version = "0.6.5" authors = [""] edition = "2018" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +akri-shared = { path = "../shared" } async-std = "1.5.0" async-trait = "0.1.0" chrono = "0.4.10" env_logger = "0.6.1" futures = "0.3.1" -failure = "0.1.5" +anyhow = "1.0.38" kube = { version = "0.23.0", features = ["openapi"] } k8s-openapi = { version = "0.6.0", features = ["v1_16"] } +lazy_static = "1.4" log = "0.4" -mockall = "0.6.0" +prometheus = { version = "0.12.0", features = ["process"] } serde = "1.0.104" serde_derive = "1.0.104" serde_json = "1.0.45" serde_yaml = "0.8.11" -akri-shared = { path = "../shared" } tokio = { version = "0.2", features = ["full"] } + +[dev-dependencies] +mockall = "0.9.0" \ No newline at end of file diff --git a/controller/src/main.rs b/controller/src/main.rs index 60eec0633..82d6d0c35 100644 --- a/controller/src/main.rs +++ b/controller/src/main.rs @@ -1,14 +1,21 @@ +#[macro_use] +extern crate lazy_static; mod util; -use akri_shared::akri::API_NAMESPACE; +use akri_shared::akri::{metrics::run_metrics_server, API_NAMESPACE}; use async_std::sync::Mutex; -use env_logger; +use prometheus::IntGaugeVec; use std::sync::Arc; use util::{instance_action, node_watcher, pod_watcher}; /// Length of time to sleep between controller system validation checks pub const SYSTEM_CHECK_DELAY_SECS: u64 = 30; +lazy_static! { + // Reports the number of Broker pods running, grouped by Configuration and Node + pub static ref BROKER_POD_COUNT_METRIC: IntGaugeVec = prometheus::register_int_gauge_vec!("akri_broker_pod_count", "Akri Broker Pod Count", &["configuration", "node"]).unwrap(); +} + /// This is the entry point for the controller. #[tokio::main] async fn main() -> Result<(), Box> { @@ -30,6 +37,11 @@ async fn main() -> Result<(), Box let instance_watch_synchronization = synchronization.clone(); let mut tasks = Vec::new(); + // Start server for prometheus metrics + tasks.push(tokio::spawn(async move { + run_metrics_server().await.unwrap(); + })); + // Handle existing instances tasks.push(tokio::spawn({ async move { diff --git a/controller/src/util/instance_action.rs b/controller/src/util/instance_action.rs index 1c0604533..4f46b276b 100644 --- a/controller/src/util/instance_action.rs +++ b/controller/src/util/instance_action.rs @@ -1,3 +1,4 @@ +use super::super::BROKER_POD_COUNT_METRIC; use super::{pod_action::PodAction, pod_action::PodActionInfo}; use akri_shared::{ akri::{ @@ -253,6 +254,7 @@ fn determine_action_for_pod( /// and the capability Service (if there are no remaining capability Pods). async fn handle_deletion_work( instance_name: &str, + configuration_name: &str, instance_shared: bool, node_to_delete_pod: &str, context: &PodContext, @@ -289,13 +291,16 @@ async fn handle_deletion_work( .remove_pod(&pod_app_name, &context_namespace) .await?; trace!("handle_deletion_work - pod::remove_pod succeeded",); + BROKER_POD_COUNT_METRIC + .with_label_values(&[configuration_name, context_node_name]) + .dec(); Ok(()) } #[cfg(test)] mod handle_deletion_work_tests { use super::*; - use akri_shared::k8s::test_kube::MockKubeImpl; + use akri_shared::k8s::MockKubeInterface; #[tokio::test] async fn test_handle_deletion_work_with_no_node_name() { @@ -309,10 +314,11 @@ mod handle_deletion_work_tests { assert!(handle_deletion_work( "instance_name", + "configuration_name", true, "node_to_delete_pod", &context, - &MockKubeImpl::new(), + &MockKubeInterface::new(), ) .await .is_err()); @@ -330,10 +336,11 @@ mod handle_deletion_work_tests { assert!(handle_deletion_work( "instance_name", + "configuration_name", true, "node_to_delete_pod", &context, - &MockKubeImpl::new(), + &MockKubeInterface::new(), ) .await .is_err()); @@ -380,6 +387,9 @@ async fn handle_addition_work( .create_pod(&new_pod, &instance_namespace) .await?; trace!("handle_addition_work - pod::create_pod succeeded",); + BROKER_POD_COUNT_METRIC + .with_label_values(&[instance_class_name, new_node]) + .inc(); } trace!("handle_addition_work - POST nodeInfo.SetNode \n"); Ok(()) @@ -464,6 +474,7 @@ pub async fn handle_instance_change( }) { handle_deletion_work( &instance_name, + &instance.spec.configuration_name, instance.spec.shared, node_to_delete_pod, context, @@ -542,7 +553,7 @@ mod handle_instance_tests { use super::*; use akri_shared::{ akri::instance::KubeAkriInstance, - k8s::{pod::AKRI_INSTANCE_LABEL_NAME, test_kube::MockKubeImpl}, + k8s::{pod::AKRI_INSTANCE_LABEL_NAME, MockKubeInterface}, os::file, }; use chrono::prelude::*; @@ -550,7 +561,7 @@ mod handle_instance_tests { use mockall::predicate::*; fn configure_find_pods_with_phase( - mock: &mut MockKubeImpl, + mock: &mut MockKubeInterface, pod_selector: &'static str, result_file: &'static str, specified_phase: &'static str, @@ -574,7 +585,7 @@ mod handle_instance_tests { } fn configure_find_pods_with_phase_and_start_time( - mock: &mut MockKubeImpl, + mock: &mut MockKubeInterface, pod_selector: &'static str, result_file: &'static str, specified_phase: &'static str, @@ -606,7 +617,7 @@ mod handle_instance_tests { } fn configure_find_pods_with_phase_and_no_start_time( - mock: &mut MockKubeImpl, + mock: &mut MockKubeInterface, pod_selector: &'static str, result_file: &'static str, specified_phase: &'static str, @@ -642,7 +653,10 @@ mod handle_instance_tests { addition_work: Option, } - fn configure_for_handle_instance_change(mock: &mut MockKubeImpl, work: &HandleInstanceWork) { + fn configure_for_handle_instance_change( + mock: &mut MockKubeInterface, + work: &HandleInstanceWork, + ) { if let Some(phase) = work.find_pods_phase { if let Some(start_time) = work.find_pods_start_time { configure_find_pods_with_phase_and_start_time( @@ -715,7 +729,7 @@ mod handle_instance_tests { } } - fn configure_for_handle_deletion_work(mock: &mut MockKubeImpl, work: &HandleDeletionWork) { + fn configure_for_handle_deletion_work(mock: &mut MockKubeInterface, work: &HandleDeletionWork) { for i in 0..work.broker_pod_names.len() { let broker_pod_name = work.broker_pod_names[i]; let cleanup_namespace = work.cleanup_namespaces[i]; @@ -756,7 +770,7 @@ mod handle_instance_tests { } } - fn configure_for_handle_addition_work(mock: &mut MockKubeImpl, work: &HandleAdditionWork) { + fn configure_for_handle_addition_work(mock: &mut MockKubeInterface, work: &HandleAdditionWork) { for i in 0..work.new_pod_names.len() { config_for_tests::configure_add_pod( mock, @@ -769,7 +783,7 @@ mod handle_instance_tests { } async fn run_handle_instance_change_test( - mock: &mut MockKubeImpl, + mock: &mut MockKubeInterface, instance_file: &'static str, action: &'static InstanceAction, ) { @@ -793,7 +807,7 @@ mod handle_instance_tests { async fn test_internal_handle_existing_instances_no_instances() { let _ = env_logger::builder().is_test(true).try_init(); - let mut mock = MockKubeImpl::new(); + let mut mock = MockKubeInterface::new(); config_for_tests::configure_get_instances(&mut mock, "../test/json/empty-list.json", false); internal_handle_existing_instances(&mock).await.unwrap(); } @@ -802,7 +816,7 @@ mod handle_instance_tests { async fn test_handle_instance_change_for_add_new_local_instance() { let _ = env_logger::builder().is_test(true).try_init(); - let mut mock = MockKubeImpl::new(); + let mut mock = MockKubeInterface::new(); configure_for_handle_instance_change( &mut mock, &HandleInstanceWork { @@ -827,7 +841,7 @@ mod handle_instance_tests { async fn test_handle_instance_change_for_remove_running_local_instance() { let _ = env_logger::builder().is_test(true).try_init(); - let mut mock = MockKubeImpl::new(); + let mut mock = MockKubeInterface::new(); configure_for_handle_instance_change( &mut mock, &HandleInstanceWork { @@ -852,7 +866,7 @@ mod handle_instance_tests { async fn test_handle_instance_change_for_add_new_shared_instance() { let _ = env_logger::builder().is_test(true).try_init(); - let mut mock = MockKubeImpl::new(); + let mut mock = MockKubeInterface::new(); configure_for_handle_instance_change( &mut mock, &HandleInstanceWork { @@ -879,7 +893,7 @@ mod handle_instance_tests { async fn test_handle_instance_change_for_remove_running_shared_instance() { let _ = env_logger::builder().is_test(true).try_init(); - let mut mock = MockKubeImpl::new(); + let mut mock = MockKubeInterface::new(); configure_for_handle_instance_change( &mut mock, &HandleInstanceWork { @@ -904,7 +918,7 @@ mod handle_instance_tests { async fn test_handle_instance_change_for_update_active_shared_instance() { let _ = env_logger::builder().is_test(true).try_init(); - let mut mock = MockKubeImpl::new(); + let mut mock = MockKubeInterface::new(); configure_for_handle_instance_change( &mut mock, &HandleInstanceWork { @@ -959,7 +973,7 @@ mod handle_instance_tests { }) .collect::>(); - let mut mock = MockKubeImpl::new(); + let mut mock = MockKubeInterface::new(); configure_for_handle_instance_change( &mut mock, &HandleInstanceWork { @@ -976,4 +990,74 @@ mod handle_instance_tests { ); run_handle_instance_change_test(&mut mock, &instance_file, &InstanceAction::Update).await; } + + /// Checks that the BROKER_POD_COUNT_METRIC is appropriately incremented + /// and decremented when an instance is added and deleted (and pods are + /// created and deleted). Cannot be run in parallel with other tests + /// due to the metric being a global variable and modified unpredictably by + /// other tests. + /// Run with: cargo test -- test_broker_pod_count_metric --ignored + #[tokio::test] + #[ignore] + async fn test_broker_pod_count_metric() { + let _ = env_logger::builder().is_test(true).try_init(); + BROKER_POD_COUNT_METRIC + .with_label_values(&["config-a", "node-a"]) + .set(0); + + let mut mock = MockKubeInterface::new(); + configure_for_handle_instance_change( + &mut mock, + &HandleInstanceWork { + find_pods_selector: "akri.sh/instance=config-a-b494b6", + find_pods_result: "../test/json/empty-list.json", + find_pods_phase: None, + find_pods_start_time: None, + find_pods_delete_start_time: false, + deletion_work: None, + addition_work: Some(configure_add_local_config_a_b494b6()), + }, + ); + run_handle_instance_change_test( + &mut mock, + "../test/json/local-instance.json", + &InstanceAction::Add, + ) + .await; + + // Check that broker pod count metric has been incremented to include new pod for this instance + assert_eq!( + BROKER_POD_COUNT_METRIC + .with_label_values(&["config-a", "node-a"]) + .get(), + 1 + ); + + configure_for_handle_instance_change( + &mut mock, + &HandleInstanceWork { + find_pods_selector: "akri.sh/instance=config-a-b494b6", + find_pods_result: "../test/json/running-pod-list-for-config-a-local.json", + find_pods_phase: None, + find_pods_start_time: None, + find_pods_delete_start_time: false, + deletion_work: Some(configure_deletion_work_for_config_a_b494b6()), + addition_work: None, + }, + ); + run_handle_instance_change_test( + &mut mock, + "../test/json/local-instance.json", + &InstanceAction::Remove, + ) + .await; + + // Check that broker pod count metric has been decremented to reflect deleted instance and pod + assert_eq!( + BROKER_POD_COUNT_METRIC + .with_label_values(&["config-a", "node-a"]) + .get(), + 0 + ); + } } diff --git a/controller/src/util/node_watcher.rs b/controller/src/util/node_watcher.rs index c3e975273..889afd99e 100644 --- a/controller/src/util/node_watcher.rs +++ b/controller/src/util/node_watcher.rs @@ -106,6 +106,11 @@ impl NodeWatcher { } WatchEvent::Modified(node) => { trace!("handle_node - Modified: {:?}", &node.metadata.name); + trace!( + "handle_node - Modified with Node Status {:?} and NodeSpec: {:?}", + &node.status, + &node.spec + ); if self.is_node_ready(&node) { self.known_nodes .insert(node.metadata.name.clone(), NodeState::Running); @@ -166,6 +171,7 @@ impl NodeWatcher { /// This determines if a node is in the Ready state. fn is_node_ready(&self, k8s_node: &NodeObject) -> bool { + trace!("is_node_ready - for node {:?}", k8s_node.metadata.name); k8s_node .status .as_ref() @@ -298,8 +304,7 @@ impl NodeWatcher { // Save the instance let modified_instance = Instance { configuration_name: instance.spec.configuration_name.clone(), - metadata: instance.spec.metadata.clone(), - rbac: instance.spec.rbac.clone(), + broker_properties: instance.spec.broker_properties.clone(), shared: instance.spec.shared, device_usage: modified_device_usage, nodes: modified_nodes, @@ -322,9 +327,7 @@ impl NodeWatcher { mod tests { use super::super::shared_test_utils::config_for_tests; use super::*; - use akri_shared::{ - akri::instance::KubeAkriInstanceList, k8s::test_kube::MockKubeImpl, os::file, - }; + use akri_shared::{akri::instance::KubeAkriInstanceList, k8s::MockKubeInterface, os::file}; #[derive(Clone)] struct UpdateInstance { @@ -341,7 +344,7 @@ mod tests { } fn configure_for_handle_node_disappearance( - mock: &mut MockKubeImpl, + mock: &mut MockKubeInterface, work: &HandleNodeDisappearance, ) { config_for_tests::configure_get_instances( @@ -368,7 +371,7 @@ mod tests { let node: NodeObject = serde_json::from_str(&node_json).unwrap(); let mut node_watcher = NodeWatcher::new(); node_watcher - .handle_node(WatchEvent::Added(node), &MockKubeImpl::new()) + .handle_node(WatchEvent::Added(node), &MockKubeInterface::new()) .await .unwrap(); @@ -388,7 +391,7 @@ mod tests { let node: NodeObject = serde_json::from_str(&node_json).unwrap(); let mut node_watcher = NodeWatcher::new(); node_watcher - .handle_node(WatchEvent::Added(node), &MockKubeImpl::new()) + .handle_node(WatchEvent::Added(node), &MockKubeInterface::new()) .await .unwrap(); @@ -417,7 +420,7 @@ mod tests { .device_usage .insert("config-a-359973-2".to_string(), "".to_string()); - let mut mock = MockKubeImpl::new(); + let mut mock = MockKubeInterface::new(); configure_for_handle_node_disappearance( &mut mock, &HandleNodeDisappearance { @@ -452,7 +455,7 @@ mod tests { let node: NodeObject = serde_json::from_str(&node_json).unwrap(); let mut node_watcher = NodeWatcher::new(); - let mock = MockKubeImpl::new(); + let mock = MockKubeInterface::new(); node_watcher .handle_node(WatchEvent::Modified(node), &mock) .await @@ -483,7 +486,7 @@ mod tests { .device_usage .insert("config-a-359973-2".to_string(), "".to_string()); - let mut mock = MockKubeImpl::new(); + let mut mock = MockKubeInterface::new(); configure_for_handle_node_disappearance( &mut mock, &HandleNodeDisappearance { @@ -530,7 +533,7 @@ mod tests { async fn test_handle_node_disappearance_update_failure_retries() { let _ = env_logger::builder().is_test(true).try_init(); - let mut mock = MockKubeImpl::new(); + let mut mock = MockKubeInterface::new(); mock.expect_get_instances().times(1).returning(move || { let instance_file = "../test/json/shared-instance-update.json"; let instance_json = file::read_file_to_string(instance_file); @@ -567,7 +570,7 @@ mod tests { let instance_json = file::read_file_to_string(instance_file); let kube_object_instance: KubeAkriInstance = serde_json::from_str(&instance_json).unwrap(); - let mut mock = MockKubeImpl::new(); + let mut mock = MockKubeInterface::new(); mock.expect_update_instance() .times(1) .withf(move |ins, n, ns| { diff --git a/controller/src/util/pod_watcher.rs b/controller/src/util/pod_watcher.rs index 065b3e38e..630fc3ef0 100644 --- a/controller/src/util/pod_watcher.rs +++ b/controller/src/util/pod_watcher.rs @@ -601,7 +601,7 @@ mod tests { use super::super::shared_test_utils::config_for_tests; use super::super::shared_test_utils::config_for_tests::PodList; use super::*; - use akri_shared::{k8s::test_kube::MockKubeImpl, os::file}; + use akri_shared::{k8s::MockKubeInterface, os::file}; use kube::ErrorResponse; fn create_pods_with_phase(result_file: &'static str, specified_phase: &'static str) -> PodList { @@ -628,7 +628,7 @@ mod tests { reason: "reason".to_string(), code: 0, }), - &MockKubeImpl::new(), + &MockKubeInterface::new(), ) .await .unwrap(); @@ -652,7 +652,7 @@ mod tests { &phase ); pod_watcher - .handle_pod(WatchEvent::Added(pod), &MockKubeImpl::new()) + .handle_pod(WatchEvent::Added(pod), &MockKubeInterface::new()) .await .unwrap(); trace!( @@ -686,7 +686,7 @@ mod tests { &phase ); pod_watcher - .handle_pod(WatchEvent::Modified(pod), &MockKubeImpl::new()) + .handle_pod(WatchEvent::Modified(pod), &MockKubeInterface::new()) .await .unwrap(); trace!( @@ -713,7 +713,7 @@ mod tests { let pod_list: PodList = serde_json::from_str(&pods_json).unwrap(); let pod = pod_list.items.first().unwrap().clone(); let mut pod_watcher = BrokerPodWatcher::new(); - let mut mock = MockKubeImpl::new(); + let mut mock = MockKubeInterface::new(); configure_for_handle_pod( &mut mock, &HandlePod { @@ -771,7 +771,7 @@ mod tests { let pod_list: PodList = serde_json::from_str(&pods_json).unwrap(); let pod = pod_list.items.first().unwrap().clone(); let mut pod_watcher = BrokerPodWatcher::new(); - let mut mock = MockKubeImpl::new(); + let mut mock = MockKubeInterface::new(); configure_for_handle_pod( &mut mock, &HandlePod { @@ -830,7 +830,7 @@ mod tests { ); let pod = pod_list.items.first().unwrap().clone(); let mut pod_watcher = BrokerPodWatcher::new(); - let mut mock = MockKubeImpl::new(); + let mut mock = MockKubeInterface::new(); configure_for_handle_pod( &mut mock, &HandlePod { @@ -892,7 +892,7 @@ mod tests { ); let pod = pod_list.items.first().unwrap().clone(); let mut pod_watcher = BrokerPodWatcher::new(); - let mut mock = MockKubeImpl::new(); + let mut mock = MockKubeInterface::new(); configure_for_handle_pod( &mut mock, &HandlePod { @@ -961,7 +961,7 @@ mod tests { &phase ); pod_watcher - .handle_pod(WatchEvent::Added(pod), &MockKubeImpl::new()) + .handle_pod(WatchEvent::Added(pod), &MockKubeInterface::new()) .await .unwrap(); trace!( @@ -982,7 +982,7 @@ mod tests { &phase ); pod_watcher - .handle_pod(WatchEvent::Modified(pod), &MockKubeImpl::new()) + .handle_pod(WatchEvent::Modified(pod), &MockKubeInterface::new()) .await .unwrap(); trace!( @@ -1007,7 +1007,7 @@ mod tests { .known_pods .insert("config-a-b494b6-pod".to_string(), PodState::Running); pod_watcher - .handle_running_pod_if_needed(pod, &MockKubeImpl::new()) + .handle_running_pod_if_needed(pod, &MockKubeInterface::new()) .await .unwrap(); assert_eq!(1, pod_watcher.known_pods.len()); @@ -1034,7 +1034,7 @@ mod tests { .known_pods .insert("config-a-b494b6-pod".to_string(), PodState::Ended); pod_watcher - .handle_ended_pod_if_needed(pod, &MockKubeImpl::new()) + .handle_ended_pod_if_needed(pod, &MockKubeInterface::new()) .await .unwrap(); assert_eq!(1, pod_watcher.known_pods.len()); @@ -1061,7 +1061,7 @@ mod tests { .known_pods .insert("config-a-b494b6-pod".to_string(), PodState::Deleted); pod_watcher - .handle_deleted_pod_if_needed(pod, &MockKubeImpl::new()) + .handle_deleted_pod_if_needed(pod, &MockKubeInterface::new()) .await .unwrap(); assert_eq!(1, pod_watcher.known_pods.len()); @@ -1154,7 +1154,7 @@ mod tests { let dcc: KubeAkriConfig = serde_json::from_str(&dcc_json).unwrap(); let pod_watcher = BrokerPodWatcher::new(); - let mut mock = MockKubeImpl::new(); + let mut mock = MockKubeInterface::new(); config_for_tests::configure_find_services( &mut mock, "akri.sh/instance=config-a-b494b6", @@ -1196,7 +1196,7 @@ mod tests { let dcc: KubeAkriConfig = serde_json::from_str(&dcc_json).unwrap(); let pod_watcher = BrokerPodWatcher::new(); - let mut mock = MockKubeImpl::new(); + let mut mock = MockKubeInterface::new(); config_for_tests::configure_find_services( &mut mock, "akri.sh/instance=config-a-b494b6", @@ -1239,7 +1239,7 @@ mod tests { let dcc: KubeAkriConfig = serde_json::from_str(&dcc_json).unwrap(); let pod_watcher = BrokerPodWatcher::new(); - let mut mock = MockKubeImpl::new(); + let mut mock = MockKubeInterface::new(); config_for_tests::configure_find_services( &mut mock, "akri.sh/instance=config-a-b494b6", @@ -1283,7 +1283,7 @@ mod tests { let dcc: KubeAkriConfig = serde_json::from_str(&dcc_json).unwrap(); let pod_watcher = BrokerPodWatcher::new(); - let mut mock = MockKubeImpl::new(); + let mut mock = MockKubeInterface::new(); config_for_tests::configure_find_services( &mut mock, "akri.sh/instance=config-a-b494b6", @@ -1340,7 +1340,7 @@ mod tests { } fn configure_for_cleanup_broker_and_configuration_svcs( - mock: &mut MockKubeImpl, + mock: &mut MockKubeInterface, work: &CleanupServices, ) { for i in 0..work.cleanup_services.len() { @@ -1393,7 +1393,7 @@ mod tests { new_configuration_svc_name: &'static str, } - fn configure_for_running_pod_work(mock: &mut MockKubeImpl, work: &HandlePodRunning) { + fn configure_for_running_pod_work(mock: &mut MockKubeInterface, work: &HandlePodRunning) { config_for_tests::configure_find_config( mock, work.find_config_name, @@ -1464,7 +1464,7 @@ mod tests { ended: Option, } - fn configure_for_handle_pod(mock: &mut MockKubeImpl, handle_pod: &HandlePod) { + fn configure_for_handle_pod(mock: &mut MockKubeInterface, handle_pod: &HandlePod) { if let Some(running) = &handle_pod.running { configure_for_running_pod_work(mock, &running); } diff --git a/controller/src/util/shared_test_utils.rs b/controller/src/util/shared_test_utils.rs index 031e651cc..44654c780 100644 --- a/controller/src/util/shared_test_utils.rs +++ b/controller/src/util/shared_test_utils.rs @@ -5,7 +5,7 @@ pub mod config_for_tests { configuration::KubeAkriConfig, instance::{Instance, KubeAkriInstance, KubeAkriInstanceList}, }, - k8s::test_kube::MockKubeImpl, + k8s::MockKubeInterface, os::file, }; use k8s_openapi::api::core::v1::{PodSpec, PodStatus, ServiceSpec, ServiceStatus}; @@ -18,7 +18,7 @@ pub mod config_for_tests { pub type ServiceList = ObjectList; pub fn configure_find_instance( - mock: &mut MockKubeImpl, + mock: &mut MockKubeInterface, instance_name: &'static str, instance_namespace: &'static str, result_file: &'static str, @@ -30,7 +30,13 @@ pub mod config_for_tests { .withf(move |name, namespace| name == instance_name && namespace == instance_namespace) .returning(move |_, _| { if result_error { - Err(None.ok_or("failure")?) + // Return error that instance could not be found + Err(kube::Error::Api(kube::ErrorResponse { + status: "Failure".to_string(), + message: "instances.akri.sh \"akri-blah-901a7b\" not found".to_string(), + reason: "NotFound".to_string(), + code: akri_shared::k8s::ERROR_NOT_FOUND, + })) } else { let dci_json = file::read_file_to_string(result_file); let dci: KubeAkriInstance = serde_json::from_str(&dci_json).unwrap(); @@ -56,7 +62,7 @@ pub mod config_for_tests { } pub fn configure_get_instances( - mock: &mut MockKubeImpl, + mock: &mut MockKubeInterface, result_file: &'static str, listify_result: bool, ) { @@ -74,7 +80,7 @@ pub mod config_for_tests { } pub fn configure_update_instance( - mock: &mut MockKubeImpl, + mock: &mut MockKubeInterface, instance_to_update: Instance, instance_name: &'static str, instance_namespace: &'static str, @@ -104,7 +110,7 @@ pub mod config_for_tests { } pub fn configure_find_config( - mock: &mut MockKubeImpl, + mock: &mut MockKubeInterface, config_name: &'static str, config_namespace: &'static str, result_file: &'static str, @@ -126,7 +132,7 @@ pub mod config_for_tests { } pub fn configure_find_services( - mock: &mut MockKubeImpl, + mock: &mut MockKubeInterface, svc_selector: &'static str, result_file: &'static str, result_error: bool, @@ -146,7 +152,7 @@ pub mod config_for_tests { }); } pub fn configure_add_service( - mock: &mut MockKubeImpl, + mock: &mut MockKubeInterface, svc_name: &'static str, namespace: &'static str, label_id: &'static str, @@ -185,7 +191,7 @@ pub mod config_for_tests { } pub fn configure_remove_service( - mock: &mut MockKubeImpl, + mock: &mut MockKubeInterface, svc_name: &'static str, svc_namespace: &'static str, ) { @@ -203,7 +209,7 @@ pub mod config_for_tests { } pub fn configure_update_service( - mock: &mut MockKubeImpl, + mock: &mut MockKubeInterface, svc_name: &'static str, svc_namespace: &'static str, result_error: bool, @@ -227,7 +233,7 @@ pub mod config_for_tests { } pub fn configure_find_pods( - mock: &mut MockKubeImpl, + mock: &mut MockKubeInterface, pod_selector: &'static str, result_file: &'static str, result_error: bool, @@ -251,7 +257,7 @@ pub mod config_for_tests { } pub fn configure_add_pod( - mock: &mut MockKubeImpl, + mock: &mut MockKubeInterface, pod_name: &'static str, pod_namespace: &'static str, label_id: &'static str, @@ -285,7 +291,7 @@ pub mod config_for_tests { } pub fn configure_remove_pod( - mock: &mut MockKubeImpl, + mock: &mut MockKubeInterface, pod_name: &'static str, pod_namespace: &'static str, ) { diff --git a/deployment/helm/Chart.yaml b/deployment/helm/Chart.yaml index 3e76ffc31..b28753d64 100644 --- a/deployment/helm/Chart.yaml +++ b/deployment/helm/Chart.yaml @@ -15,9 +15,9 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 0.1.5 +version: 0.6.5 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. -appVersion: 0.1.5 +appVersion: 0.6.5 diff --git a/deployment/helm/crds/akri-configuration-crd.yaml b/deployment/helm/crds/akri-configuration-crd.yaml index 39d72e159..53788a6ef 100644 --- a/deployment/helm/crds/akri-configuration-crd.yaml +++ b/deployment/helm/crds/akri-configuration-crd.yaml @@ -15,100 +15,15 @@ spec: spec: type: object properties: - protocol: # {{ProtocolHandler}} + discoveryHandler: # {{DiscoveryHandlerInfo}} type: object properties: - debugEcho: # {{DebugEchoDiscoveryHandler}} - type: object - properties: - shared: - type: boolean - descriptions: - type: array - items: - type: string - onvif: # {{OnvifDiscoveryHandler}} - type: object - properties: - ipAddresses: # {{FilterList}} - type: object - properties: - action: - type: string - enum: - - Include - - Exclude - items: - type: array - items: - type: string - macAddresses: # {{FilterList}} - type: object - properties: - action: - type: string - enum: - - Include - - Exclude - items: - type: array - items: - type: string - scopes: # {{FilterList}} - type: object - properties: - action: - type: string - enum: - - Include - - Exclude - items: - type: array - items: - type: string - discoveryTimeoutSeconds: - type: integer - udev: - type: object - properties: - udevRules: - type: array - items: - type: string - opcua: - type: object - properties: - opcuaDiscoveryMethod: - type: object - properties: - standard: # {{StandardOpcuaDiscovery}} - type: object - properties: - discoveryUrls: - type: array - items: - type: string - applicationNames: - type: object - properties: - action: - type: string - enum: - - Include - - Exclude - items: - type: array - items: - type: string - oneOf: - - required: ["debugEcho"] - - required: ["onvif"] - - required: ["udev"] - - required: ["opcua"] + name: + type: string + discoveryDetails: + type: string capacity: type: integer - units: - type: string brokerPodSpec: # {{PodSpec}} x-kubernetes-preserve-unknown-fields: true type: object @@ -121,7 +36,7 @@ spec: x-kubernetes-preserve-unknown-fields: true type: object nullable: true - properties: # map + brokerProperties: # map additionalProperties: type: string type: object diff --git a/deployment/helm/crds/akri-instance-crd.yaml b/deployment/helm/crds/akri-instance-crd.yaml index fad553920..61873e7d8 100644 --- a/deployment/helm/crds/akri-instance-crd.yaml +++ b/deployment/helm/crds/akri-instance-crd.yaml @@ -17,7 +17,7 @@ spec: properties: configurationName: type: string - metadata: + brokerProperties: additionalProperties: type: string type: object @@ -31,8 +31,6 @@ spec: additionalProperties: type: string type: object - rbac: - type: string additionalPrinterColumns: - name: Config type: string diff --git a/deployment/helm/templates/agent.yaml b/deployment/helm/templates/agent.yaml index 16c4fba45..478857530 100644 --- a/deployment/helm/templates/agent.yaml +++ b/deployment/helm/templates/agent.yaml @@ -12,8 +12,11 @@ spec: labels: name: akri-agent spec: + {{- /* hostNetwork is only needed for embedded ONVIF discovery */}} + {{- if .Values.agent.full }} hostNetwork: true dnsPolicy: ClusterFirstWithHostNet + {{- end }} {{- if .Values.agent.linuxOnly }} nodeSelector: "kubernetes.io/os": linux @@ -23,20 +26,30 @@ spec: {{- end }} containers: - name: akri-agent + {{- $repository := ternary .Values.agent.image.fullRepository .Values.agent.image.repository .Values.agent.full -}} {{- if .Values.useDevelopmentContainers }} {{- if .Values.useLatestContainers }} - image: {{ printf "%s:latest-dev" .Values.agent.image.repository | quote }} + image: {{ printf "%s:%s" $repository (default "latest-dev" .Values.agent.image.tag) | quote }} {{- else }} - image: {{ printf "%s:%s" .Values.agent.image.repository (default (printf "v%s-dev" .Chart.AppVersion) .Values.agent.image.tag) | quote }} + image: {{ printf "%s:%s" $repository (default (printf "v%s-dev" .Chart.AppVersion) .Values.agent.image.tag) | quote }} {{- end }} {{- else }} {{- if .Values.useLatestContainers }} - image: {{ printf "%s:latest" .Values.agent.image.repository | quote }} + image: {{ printf "%s:%s" $repository (default "latest" .Values.agent.image.tag) | quote }} {{- else }} - image: {{ printf "%s:%s" .Values.agent.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.agent.image.tag) | quote }} + image: {{ printf "%s:%s" $repository (default (printf "v%s" .Chart.AppVersion) .Values.agent.image.tag) | quote }} + {{- end }} {{- end }} + {{- with .Values.agent.image.pullPolicy }} + imagePullPolicy: {{ . }} {{- end }} - imagePullPolicy: {{ .Values.agent.image.pullPolicy }} + resources: + requests: + memory: {{ .Values.agent.resources.memoryRequest }} + cpu: {{ .Values.agent.resources.cpuRequest }} + limits: + memory: {{ .Values.agent.resources.memoryLimit }} + cpu: {{ .Values.agent.resources.cpuLimit }} {{- if .Values.agent.securityContext }} securityContext: {{- toYaml .Values.agent.securityContext | nindent 10 }} @@ -45,6 +58,9 @@ spec: {{- if .Values.agent.allowDebugEcho }} - name: ENABLE_DEBUG_ECHO value: "1" + {{- /* environment variable to tell the Agent's embedded debug echo Discovery Handler whether its instances are shared */}} + - name: DEBUG_ECHO_INSTANCES_SHARED + value: {{ .Values.debugEcho.configuration.shared | quote }} {{- end }} - name: HOST_CRICTL_PATH value: /host/usr/bin/crictl @@ -56,20 +72,34 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName + - name: DISCOVERY_HANDLERS_DIRECTORY + value: /var/lib/akri volumeMounts: + - name: discovery-handlers + mountPath: /var/lib/akri - name: device-plugin mountPath: /var/lib/kubelet/device-plugins - name: usr-bin-crictl mountPath: /host/usr/bin/crictl - name: var-run-dockershim mountPath: /host/var/run/dockershim.sock + {{- if .Values.agent.host.udev }} - name: devices mountPath: /run/udev + {{- end }} + {{- if .Values.prometheus.enabled }} + ports: + - name: {{ .Values.prometheus.portName | quote }} + containerPort: {{ .Values.prometheus.port }} + {{- end }} {{- with .Values.imagePullSecrets }} imagePullSecrets: {{- toYaml . | nindent 8 }} {{- end }} volumes: + - name: discovery-handlers + hostPath: + path: {{ .Values.agent.host.discoveryHandlers }} - name: device-plugin hostPath: path: "{{ .Values.agent.host.kubeletDevicePlugins }}" @@ -79,7 +109,9 @@ spec: - name: var-run-dockershim hostPath: path: "{{ .Values.agent.host.dockerShimSock }}" + {{- if .Values.agent.host.udev }} - name: devices hostPath: path: "{{ .Values.agent.host.udev }}" + {{- end }} {{- end }} \ No newline at end of file diff --git a/deployment/helm/templates/controller.yaml b/deployment/helm/templates/controller.yaml index 4cc2d7dbc..81599acb6 100644 --- a/deployment/helm/templates/controller.yaml +++ b/deployment/helm/templates/controller.yaml @@ -20,18 +20,32 @@ spec: - name: akri-controller {{- if .Values.useDevelopmentContainers }} {{- if .Values.useLatestContainers }} - image: {{ printf "%s:latest-dev" .Values.controller.image.repository | quote }} + image: {{ printf "%s:%s" .Values.controller.image.repository (default "latest-dev" .Values.controller.image.tag) | quote }} {{- else }} image: {{ printf "%s:%s" .Values.controller.image.repository (default (printf "v%s-dev" .Chart.AppVersion) .Values.controller.image.tag) | quote }} {{- end }} {{- else }} {{- if .Values.useLatestContainers }} - image: {{ printf "%s:latest" .Values.controller.image.repository | quote }} + image: {{ printf "%s:%s" .Values.controller.image.repository (default "latest" .Values.controller.image.tag) | quote }} {{- else }} image: {{ printf "%s:%s" .Values.controller.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.controller.image.tag) | quote }} {{- end }} {{- end }} - imagePullPolicy: {{ .Values.controller.image.pullPolicy }} + {{- with .Values.controller.image.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end }} + resources: + requests: + memory: {{ .Values.controller.resources.memoryRequest }} + cpu: {{ .Values.controller.resources.cpuRequest }} + limits: + memory: {{ .Values.controller.resources.memoryLimit }} + cpu: {{ .Values.controller.resources.cpuLimit }} + {{- if .Values.prometheus.enabled }} + ports: + - name: {{ .Values.prometheus.portName | quote }} + containerPort: {{ .Values.prometheus.port }} + {{- end }} {{- with .Values.imagePullSecrets }} imagePullSecrets: {{- toYaml . | nindent 8 }} @@ -44,14 +58,14 @@ spec: {{- end }} {{- if or .Values.controller.linuxOnly .Values.controller.onlyOnControlPlane .Values.controller.nodeSelectors }} nodeSelector: - {{- if .Values.controller.nodeSelectors }} - {{- toYaml .Values.controller.nodeSelectors | nindent 8 }} + {{- if .Values.controller.onlyOnControlPlane }} + node-role.kubernetes.io/master: "" {{- end }} {{- if .Values.controller.linuxOnly }} "kubernetes.io/os": linux {{- end }} - {{- if .Values.controller.onlyOnControlPlane }} - node-role.kubernetes.io/master: "" + {{- if .Values.controller.nodeSelectors }} + {{- toYaml .Values.controller.nodeSelectors | nindent 8 }} {{- end }} {{- end }} -{{- end }} \ No newline at end of file +{{- end }} diff --git a/deployment/helm/templates/custom-configuration.yaml b/deployment/helm/templates/custom-configuration.yaml new file mode 100644 index 000000000..9f38a927b --- /dev/null +++ b/deployment/helm/templates/custom-configuration.yaml @@ -0,0 +1,61 @@ +{{- if .Values.custom.configuration.enabled }} +apiVersion: {{ printf "%s/%s" .Values.crds.group .Values.crds.version }} +kind: Configuration +metadata: + name: {{ .Values.custom.configuration.name }} +spec: + discoveryHandler: + name: {{ required "A custom.configuration.discoveryHandlerName is required." .Values.custom.configuration.discoveryHandlerName }} + discoveryDetails: {{ .Values.custom.configuration.discoveryDetails }} + {{- if .Values.custom.configuration.brokerPod.image.repository }} + brokerPodSpec: + containers: + - name: {{ .Values.custom.configuration.name }}-broker + image: {{ printf "%s:%s" .Values.custom.configuration.brokerPod.image.repository .Values.custom.configuration.brokerPod.image.tag | quote }} + {{- with .Values.custom.configuration.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end }} + resources: + requests: + {{`"{{PLACEHOLDER}}"`}} : "1" + memory: {{ .Values.custom.configuration.brokerPod.resources.memoryRequest }} + cpu: {{ .Values.custom.configuration.brokerPod.resources.cpuRequest }} + limits: + {{`"{{PLACEHOLDER}}"`}} : "1" + memory: {{ .Values.custom.configuration.brokerPod.resources.memoryLimit }} + cpu: {{ .Values.custom.configuration.brokerPod.resources.cpuLimit }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} + {{- if .Values.custom.configuration.brokerPod.image.repository }} + {{- if .Values.custom.configuration.createInstanceServices }} + instanceServiceSpec: + type: {{ .Values.custom.configuration.instanceService.type }} + ports: + - name: {{ .Values.custom.configuration.instanceService.name }} + port: {{ .Values.custom.configuration.instanceService.port }} + protocol: {{ .Values.custom.configuration.instanceService.protocol }} + targetPort: {{ .Values.custom.configuration.instanceService.targetPort }} + {{- end }} + {{- if .Values.custom.configuration.createConfigurationService }} + configurationServiceSpec: + type: {{ .Values.custom.configuration.configurationService.type }} + ports: + - name: {{ .Values.custom.configuration.configurationService.name }} + port: {{ .Values.custom.configuration.configurationService.port }} + protocol: {{ .Values.custom.configuration.configurationService.protocol }} + targetPort: {{ .Values.custom.configuration.configurationService.port }} + {{- end }} + {{- end }} + {{- if .Values.custom.configuration.brokerProperties }} + brokerProperties: + {{- range $key, $val := .Values.custom.configuration.brokerProperties }} + {{- $key | nindent 4 }}: {{ $val | quote }} + {{- end }} + {{- else }} + brokerProperties: {} + {{- end }} + capacity: {{ .Values.custom.configuration.capacity }} +{{- end }} \ No newline at end of file diff --git a/deployment/helm/templates/custom-discovery-handler.yaml b/deployment/helm/templates/custom-discovery-handler.yaml new file mode 100644 index 000000000..f879c1b67 --- /dev/null +++ b/deployment/helm/templates/custom-discovery-handler.yaml @@ -0,0 +1,46 @@ +{{- if .Values.custom.discovery.enabled }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ printf "%s-daemonset" .Values.custom.discovery.name }} +spec: + selector: + matchLabels: + name: {{ .Values.custom.discovery.name }} + template: + metadata: + labels: + name: {{ .Values.custom.discovery.name }} + spec: + containers: + - name: {{ .Values.custom.discovery.name }} + image: {{ printf "%s:%s" (required "A custom.discovery.image.repository is required." .Values.custom.discovery.image.repository) .Values.custom.discovery.image.tag | quote }} + {{- with .Values.custom.discovery.image.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end}} + resources: + requests: + memory: {{ .Values.custom.discovery.resources.memoryRequest }} + cpu: {{ .Values.custom.discovery.resources.cpuRequest }} + limits: + memory: {{ .Values.custom.discovery.resources.memoryLimit }} + cpu: {{ .Values.custom.discovery.resources.cpuLimit }} + env: + - name: DISCOVERY_HANDLERS_DIRECTORY + value: /var/lib/akri + volumeMounts: + - name: discovery-handlers + mountPath: /var/lib/akri + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.custom.discovery.nodeSelectors }} + nodeSelector: + {{- toYaml .Values.custom.discovery.nodeSelectors | nindent 8 }} + {{- end }} + volumes: + - name: discovery-handlers + hostPath: + path: {{ .Values.agent.host.discoveryHandlers }} +{{- end }} \ No newline at end of file diff --git a/deployment/helm/templates/debug-echo-configuration.yaml b/deployment/helm/templates/debug-echo-configuration.yaml new file mode 100644 index 000000000..a85d59afb --- /dev/null +++ b/deployment/helm/templates/debug-echo-configuration.yaml @@ -0,0 +1,67 @@ +{{- if .Values.debugEcho.configuration.enabled }} +apiVersion: {{ printf "%s/%s" .Values.crds.group .Values.crds.version }} +kind: Configuration +metadata: + name: {{ .Values.debugEcho.configuration.name }} +spec: + discoveryHandler: + name: debugEcho + discoveryDetails: |+ + {{- if .Values.debugEcho.configuration.discoveryDetails.descriptions}} + descriptions: + {{- toYaml .Values.debugEcho.configuration.discoveryDetails.descriptions | nindent 6 }} + {{- else }} + descriptions: [] + {{- end }} + {{- if .Values.debugEcho.configuration.brokerPod.image.repository }} + brokerPodSpec: + containers: + - name: {{ .Values.debugEcho.configuration.name }}-broker + image: {{ printf "%s:%s" .Values.debugEcho.configuration.brokerPod.image.repository .Values.debugEcho.configuration.brokerPod.image.tag | quote }} + {{- with .Values.debugEcho.configuration.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end }} + resources: + requests: + {{`"{{PLACEHOLDER}}"`}} : "1" + memory: {{ .Values.debugEcho.configuration.brokerPod.resources.memoryRequest }} + cpu: {{ .Values.debugEcho.configuration.brokerPod.resources.cpuRequest }} + limits: + {{`"{{PLACEHOLDER}}"`}} : "1" + memory: {{ .Values.debugEcho.configuration.brokerPod.resources.memoryLimit }} + cpu: {{ .Values.debugEcho.configuration.brokerPod.resources.cpuLimit }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} + {{- if .Values.debugEcho.configuration.brokerPod.image.repository }} + {{- if .Values.debugEcho.configuration.createInstanceServices }} + instanceServiceSpec: + type: {{ .Values.debugEcho.configuration.instanceService.type }} + ports: + - name: {{ .Values.debugEcho.configuration.instanceService.name }} + port: {{ .Values.debugEcho.configuration.instanceService.port }} + protocol: {{ .Values.debugEcho.configuration.instanceService.protocol }} + targetPort: {{ .Values.debugEcho.configuration.instanceService.targetPort }} + {{- end }} + {{- if .Values.debugEcho.configuration.createConfigurationService }} + configurationServiceSpec: + type: {{ .Values.debugEcho.configuration.configurationService.type }} + ports: + - name: {{ .Values.debugEcho.configuration.configurationService.name }} + port: {{ .Values.debugEcho.configuration.configurationService.port }} + protocol: {{ .Values.debugEcho.configuration.configurationService.protocol }} + targetPort: {{ .Values.debugEcho.configuration.configurationService.port }} + {{- end }} + {{- end }} + {{- if .Values.debugEcho.configuration.brokerProperties }} + brokerProperties: + {{- range $key, $val := .Values.debugEcho.configuration.brokerProperties }} + {{- $key | nindent 4 }}: {{ $val | quote }} + {{- end }} + {{- else }} + brokerProperties: {} + {{- end }} + capacity: 2 +{{- end }} \ No newline at end of file diff --git a/deployment/helm/templates/debug-echo-discovery-handler.yaml b/deployment/helm/templates/debug-echo-discovery-handler.yaml new file mode 100644 index 000000000..c6c3ecea3 --- /dev/null +++ b/deployment/helm/templates/debug-echo-discovery-handler.yaml @@ -0,0 +1,71 @@ +{{- if .Values.debugEcho.discovery.enabled }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: akri-debug-echo-discovery-daemonset +spec: + selector: + matchLabels: + name: akri-debug-echo-discovery + template: + metadata: + labels: + name: akri-debug-echo-discovery + spec: + containers: + - name: akri-debug-echo-discovery + {{- if .Values.useDevelopmentContainers }} + {{- if .Values.useLatestContainers }} + image: {{ printf "%s:%s" .Values.debugEcho.discovery.image.repository (default "latest-dev" .Values.debugEcho.discovery.image.tag) | quote }} + {{- else }} + image: {{ printf "%s:%s" .Values.debugEcho.discovery.image.repository (default (printf "v%s-dev" .Chart.AppVersion) .Values.debugEcho.discovery.image.tag) | quote }} + {{- end }} + {{- else }} + {{- if .Values.useLatestContainers }} + image: {{ printf "%s:%s" .Values.debugEcho.discovery.image.repository (default "latest" .Values.debugEcho.discovery.image.tag) | quote }} + {{- else }} + image: {{ printf "%s:%s" .Values.debugEcho.discovery.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.debugEcho.discovery.image.tag) | quote }} + {{- end }} + {{- end }} + {{- with .Values.debugEcho.discovery.image.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end}} + resources: + requests: + memory: {{ .Values.debugEcho.discovery.resources.memoryRequest }} + cpu: {{ .Values.debugEcho.discovery.resources.cpuRequest }} + limits: + memory: {{ .Values.debugEcho.discovery.resources.memoryLimit }} + cpu: {{ .Values.debugEcho.discovery.resources.cpuLimit }} + {{- if .Values.debugEcho.discovery.useNetworkConnection }} + ports: + - name: discovery + containerPort: {{ .Values.debugEcho.discovery.port }} + {{- end }} + env: + {{- if .Values.debugEcho.discovery.useNetworkConnection }} + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- end }} + - name: DISCOVERY_HANDLERS_DIRECTORY + value: /var/lib/akri + - name: DEBUG_ECHO_INSTANCES_SHARED + value: {{ .Values.debugEcho.configuration.shared | quote }} + volumeMounts: + - name: discovery-handlers + mountPath: /var/lib/akri + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.debugEcho.discovery.nodeSelectors }} + nodeSelector: + {{- toYaml .Values.debugEcho.discovery.nodeSelectors | nindent 8 }} + {{- end }} + volumes: + - name: discovery-handlers + hostPath: + path: {{ .Values.agent.host.discoveryHandlers }} +{{- end }} \ No newline at end of file diff --git a/deployment/helm/templates/debug-echo.yaml b/deployment/helm/templates/debug-echo.yaml deleted file mode 100644 index 115f2bad9..000000000 --- a/deployment/helm/templates/debug-echo.yaml +++ /dev/null @@ -1,49 +0,0 @@ -{{- if .Values.debugEcho.enabled }} -apiVersion: akri.sh/v0 -kind: Configuration -metadata: - name: {{ .Values.debugEcho.name }} -spec: - protocol: - debugEcho: - {{- if .Values.debugEcho.descriptions}} - descriptions: - {{- toYaml .Values.debugEcho.descriptions | nindent 6 }} - {{- else }} - descriptions: [] - {{- end }} - shared: {{ .Values.debugEcho.shared }} - brokerPodSpec: - containers: - - name: {{ .Values.debugEcho.name }}-broker - image: {{ .Values.debugEcho.brokerPod.image | quote }} - imagePullPolicy: {{ .Values.debugEcho.pullPolicy }} - resources: - limits: - {{`"{{PLACEHOLDER}}"`}} : "1" - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 6 }} - {{- end }} - instanceServiceSpec: - type: {{ .Values.debugEcho.instanceService.type }} - ports: - - name: {{ .Values.debugEcho.instanceService.name }} - port: {{ .Values.debugEcho.instanceService.port }} - protocol: {{ .Values.debugEcho.instanceService.protocol }} - targetPort: {{ .Values.debugEcho.instanceService.targetPort }} - configurationServiceSpec: - type: {{ .Values.debugEcho.configurationService.type }} - ports: - - name: {{ .Values.debugEcho.configurationService.name }} - port: {{ .Values.debugEcho.configurationService.port }} - protocol: {{ .Values.debugEcho.configurationService.protocol }} - targetPort: {{ .Values.debugEcho.configurationService.port }} - {{- if .Values.debugEcho.properties }} - properties: - {{- toYaml . | nindent 4 }} - {{- else }} - properties: {} - {{- end }} - capacity: {{ .Values.debugEcho.capacity }} -{{- end }} \ No newline at end of file diff --git a/deployment/helm/templates/onvif-configuration.yaml b/deployment/helm/templates/onvif-configuration.yaml new file mode 100644 index 000000000..c8118800f --- /dev/null +++ b/deployment/helm/templates/onvif-configuration.yaml @@ -0,0 +1,95 @@ +{{- if .Values.onvif.configuration.enabled }} +apiVersion: {{ printf "%s/%s" .Values.crds.group .Values.crds.version }} +kind: Configuration +metadata: + name: {{ .Values.onvif.configuration.name }} +spec: + discoveryHandler: + name: onvif + discoveryDetails: |+ + ipAddresses: + action: {{ .Values.onvif.configuration.discoveryDetails.ipAddresses.action }} + {{- if .Values.onvif.configuration.discoveryDetails.ipAddresses.items}} + items: + {{- toYaml .Values.onvif.configuration.discoveryDetails.ipAddresses.items | nindent 8 }} + {{- else }} + items: [] + {{- end }} + macAddresses: + action: {{ .Values.onvif.configuration.discoveryDetails.macAddresses.action }} + {{- if .Values.onvif.configuration.discoveryDetails.macAddresses.items}} + items: + {{- toYaml .Values.onvif.configuration.discoveryDetails.macAddresses.items | nindent 8 }} + {{- else }} + items: [] + {{- end }} + scopes: + action: {{ .Values.onvif.configuration.discoveryDetails.scopes.action }} + {{- if .Values.onvif.configuration.discoveryDetails.scopes.items}} + items: + {{- toYaml .Values.onvif.configuration.discoveryDetails.scopes.items | nindent 8 }} + {{- else }} + items: [] + {{- end }} + discoveryTimeoutSeconds: {{ .Values.onvif.configuration.discoveryDetails.discoveryTimeoutSeconds }} + {{- if .Values.onvif.configuration.brokerPod.image.repository }} + {{- /* Only add broker pod spec if a broker image is provided */}} + brokerPodSpec: + containers: + - name: {{ .Values.onvif.configuration.name }}-broker + image: {{ printf "%s:%s" .Values.onvif.configuration.brokerPod.image.repository .Values.onvif.configuration.brokerPod.image.tag | quote }} + {{- with .Values.onvif.configuration.brokerPod.image.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end }} + {{- if .Values.onvif.configuration.brokerPod.env }} + env: + {{- range $key, $val := .Values.onvif.configuration.brokerPod.env }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end }} + {{- end }} + resources: + requests: + {{`"{{PLACEHOLDER}}"`}} : "1" + memory: {{ .Values.onvif.configuration.brokerPod.resources.memoryRequest }} + cpu: {{ .Values.onvif.configuration.brokerPod.resources.cpuRequest }} + limits: + {{`"{{PLACEHOLDER}}"`}} : "1" + memory: {{ .Values.onvif.configuration.brokerPod.resources.memoryLimit }} + cpu: {{ .Values.onvif.configuration.brokerPod.resources.cpuLimit }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} + {{- /* Only add service specs if a broker image was specified and service creation was not disabled */}} + {{- if .Values.onvif.configuration.brokerPod.image.repository }} + {{- if .Values.onvif.configuration.createInstanceServices }} + instanceServiceSpec: + type: {{ .Values.onvif.configuration.instanceService.type }} + ports: + - name: {{ .Values.onvif.configuration.instanceService.portName }} + port: {{ .Values.onvif.configuration.instanceService.port }} + protocol: {{ .Values.onvif.configuration.instanceService.protocol }} + targetPort: {{ .Values.onvif.configuration.instanceService.targetPort }} + {{- end }} + {{- if .Values.onvif.configuration.createConfigurationService }} + configurationServiceSpec: + type: {{ .Values.onvif.configuration.configurationService.type }} + ports: + - name: {{ .Values.onvif.configuration.configurationService.portName }} + port: {{ .Values.onvif.configuration.configurationService.port }} + protocol: {{ .Values.onvif.configuration.configurationService.protocol }} + targetPort: {{ .Values.onvif.configuration.configurationService.targetPort }} + {{- end }} + {{- end }} + {{- if .Values.onvif.configuration.brokerProperties }} + brokerProperties: + {{- range $key, $val := .Values.onvif.configuration.brokerProperties }} + {{- $key | nindent 4 }}: {{ $val | quote }} + {{- end }} + {{- else }} + brokerProperties: {} + {{- end }} + capacity: {{ .Values.onvif.configuration.capacity }} +{{- end }} \ No newline at end of file diff --git a/deployment/helm/templates/onvif-discovery-handler.yaml b/deployment/helm/templates/onvif-discovery-handler.yaml new file mode 100644 index 000000000..bf04069c5 --- /dev/null +++ b/deployment/helm/templates/onvif-discovery-handler.yaml @@ -0,0 +1,71 @@ +{{- if .Values.onvif.discovery.enabled }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: akri-onvif-discovery-daemonset +spec: + selector: + matchLabels: + name: akri-onvif-discovery + template: + metadata: + labels: + name: akri-onvif-discovery + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + containers: + - name: akri-onvif-discovery + {{- if .Values.useDevelopmentContainers }} + {{- if .Values.useLatestContainers }} + image: {{ printf "%s:%s" .Values.onvif.discovery.image.repository (default "latest-dev" .Values.onvif.discovery.image.tag) | quote }} + {{- else }} + image: {{ printf "%s:%s" .Values.onvif.discovery.image.repository (default (printf "v%s-dev" .Chart.AppVersion) .Values.onvif.discovery.image.tag) | quote }} + {{- end }} + {{- else }} + {{- if .Values.useLatestContainers }} + image: {{ printf "%s:%s" .Values.onvif.discovery.image.repository (default "latest" .Values.onvif.discovery.image.tag) | quote }} + {{- else }} + image: {{ printf "%s:%s" .Values.onvif.discovery.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.onvif.discovery.image.tag) | quote }} + {{- end }} + {{- end }} + {{- with .Values.onvif.discovery.image.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end}} + resources: + requests: + memory: {{ .Values.onvif.discovery.resources.memoryRequest }} + cpu: {{ .Values.onvif.discovery.resources.cpuRequest }} + limits: + memory: {{ .Values.onvif.discovery.resources.memoryLimit }} + cpu: {{ .Values.onvif.discovery.resources.cpuLimit }} + {{- if .Values.onvif.discovery.useNetworkConnection }} + ports: + - name: discovery + containerPort: {{ .Values.onvif.discovery.port }} + {{- end }} + env: + {{- if .Values.onvif.discovery.useNetworkConnection }} + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- end }} + - name: DISCOVERY_HANDLERS_DIRECTORY + value: /var/lib/akri + volumeMounts: + - name: discovery-handlers + mountPath: /var/lib/akri + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.onvif.discovery.nodeSelectors }} + nodeSelector: + {{- toYaml .Values.onvif.discovery.nodeSelectors | nindent 8 }} + {{- end }} + volumes: + - name: discovery-handlers + hostPath: + path: {{ .Values.agent.host.discoveryHandlers }} +{{- end }} \ No newline at end of file diff --git a/deployment/helm/templates/onvif.yaml b/deployment/helm/templates/onvif.yaml deleted file mode 100644 index 36d4d7552..000000000 --- a/deployment/helm/templates/onvif.yaml +++ /dev/null @@ -1,84 +0,0 @@ -{{- if .Values.onvif.enabled }} -apiVersion: akri.sh/v0 -kind: Configuration -metadata: - name: {{ .Values.onvif.name }} -spec: - protocol: - onvif: - ipAddresses: - action: {{ .Values.onvif.ipAddresses.action }} - {{- if .Values.onvif.ipAddresses.items}} - items: - {{- toYaml .Values.onvif.ipAddresses.items | nindent 8 }} - {{- else }} - items: [] - {{- end }} - macAddresses: - action: {{ .Values.onvif.macAddresses.action }} - {{- if .Values.onvif.macAddresses.items}} - items: - {{- toYaml .Values.onvif.macAddresses.items | nindent 8 }} - {{- else }} - items: [] - {{- end }} - scopes: - action: {{ .Values.onvif.scopes.action }} - {{- if .Values.onvif.scopes.items}} - items: - {{- toYaml .Values.onvif.scopes.items | nindent 8 }} - {{- else }} - items: [] - {{- end }} - discoveryTimeoutSeconds: {{ .Values.onvif.discoveryTimeoutSeconds }} - {{- if .Values.onvif.brokerPod.image.repository }} - {{- /* Only add broker pod spec if a broker image is provided */}} - brokerPodSpec: - containers: - - name: {{ .Values.onvif.name }}-broker - image: {{ .Values.onvif.brokerPod.image.repository | quote }} - imagePullPolicy: {{ .Values.onvif.brokerPod.image.pullPolicy }} - {{- if .Values.onvif.brokerPod.env }} - env: - {{- range $key, $val := .Values.onvif.brokerPod.env }} - - name: {{ $key }} - value: {{ $val | quote }} - {{- end }} - {{- end }} - resources: - limits: - {{`"{{PLACEHOLDER}}"`}} : "1" - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- end }} - {{- /* Only add service specs if a broker image was specified and service creation was not disabled */}} - {{- if .Values.onvif.brokerPod.image.repository }} - {{- if .Values.onvif.createInstanceServices }} - instanceServiceSpec: - type: {{ .Values.onvif.instanceService.type }} - ports: - - name: {{ .Values.onvif.instanceService.portName }} - port: {{ .Values.onvif.instanceService.port }} - protocol: {{ .Values.onvif.instanceService.protocol }} - targetPort: {{ .Values.onvif.instanceService.targetPort }} - {{- end }} - {{- if .Values.onvif.createConfigurationService }} - configurationServiceSpec: - type: {{ .Values.onvif.configurationService.type }} - ports: - - name: {{ .Values.onvif.configurationService.portName }} - port: {{ .Values.onvif.configurationService.port }} - protocol: {{ .Values.onvif.configurationService.protocol }} - targetPort: {{ .Values.onvif.configurationService.targetPort }} - {{- end }} - {{- end }} - {{- if .Values.onvif.properties }} - properties: - {{- toYaml . | nindent 4 }} - {{- else }} - properties: {} - {{- end }} - capacity: {{ .Values.onvif.capacity }} -{{- end }} \ No newline at end of file diff --git a/deployment/helm/templates/opcua-configuration.yaml b/deployment/helm/templates/opcua-configuration.yaml new file mode 100644 index 000000000..088ab62ea --- /dev/null +++ b/deployment/helm/templates/opcua-configuration.yaml @@ -0,0 +1,104 @@ +{{- if .Values.opcua.configuration.enabled }} +apiVersion: {{ printf "%s/%s" .Values.crds.group .Values.crds.version }} +kind: Configuration +metadata: + name: {{ .Values.opcua.configuration.name }} +spec: + discoveryHandler: + name: opcua + discoveryDetails: |+ + opcuaDiscoveryMethod: + standard: + discoveryUrls: + {{- toYaml .Values.opcua.configuration.discoveryDetails.discoveryUrls | nindent 10 }} + applicationNames: + action: {{ .Values.opcua.configuration.discoveryDetails.applicationNames.action }} + {{- if .Values.opcua.configuration.discoveryDetails.applicationNames.items}} + items: + {{- toYaml .Values.opcua.configuration.discoveryDetails.applicationNames.items | nindent 8 }} + {{- else }} + items: [] + {{- end }} + {{- if .Values.opcua.configuration.brokerPod.image.repository }} + {{- /* Only add broker pod spec if a broker image is provided */}} + brokerPodSpec: + containers: + - name: {{ .Values.opcua.configuration.name }}-broker + image: {{ printf "%s:%s" .Values.opcua.configuration.brokerPod.image.repository .Values.opcua.configuration.brokerPod.image.tag | quote }} + {{- with .Values.opcua.configuration.brokerPod.image.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end }} + {{- if .Values.opcua.configuration.brokerPod.env }} + env: + {{- range $key, $val := .Values.opcua.configuration.brokerPod.env }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end }} + {{- end }} + resources: + requests: + {{`"{{PLACEHOLDER}}"`}} : "1" + memory: {{ .Values.opcua.configuration.brokerPod.resources.memoryRequest }} + cpu: {{ .Values.opcua.configuration.brokerPod.resources.cpuRequest }} + limits: + {{`"{{PLACEHOLDER}}"`}} : "1" + memory: {{ .Values.opcua.configuration.brokerPod.resources.memoryLimit }} + cpu: {{ .Values.opcua.configuration.brokerPod.resources.cpuLimit }} + {{- if .Values.opcua.configuration.mountCertificates}} + volumeMounts: + - name: credentials + mountPath: "/etc/opcua-certs/client-pki" + readOnly: false + {{- end }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- if .Values.opcua.configuration.mountCertificates }} + volumes: + - name: credentials + secret: + secretName: opcua-broker-credentials + items: + - key: client_certificate + path: own/certs/AkriBroker.der + - key: client_key + path: own/private/AkriBroker.pfx + - key: ca_certificate + path: trusted/certs/SomeCA.der + - key: ca_crl + path: trusted/crl/SomeCA.crl + {{- end }} + {{- end }} + {{- /* Only add service specs if a broker image was specified and service + creation was not disabled */}} + {{- if .Values.opcua.configuration.brokerPod.image.repository }} + {{- if .Values.opcua.configuration.createInstanceServices }} + instanceServiceSpec: + type: {{ .Values.opcua.configuration.instanceService.type }} + ports: + - name: grpc + port: {{ .Values.opcua.configuration.instanceService.port }} + protocol: {{ .Values.opcua.configuration.instanceService.protocol }} + targetPort: {{ .Values.opcua.configuration.instanceService.targetPort }} + {{- end }} + {{- if .Values.opcua.configuration.createConfigurationService }} + configurationServiceSpec: + type: {{ .Values.opcua.configuration.configurationService.type }} + ports: + - name: grpc + port: {{ .Values.opcua.configuration.configurationService.port }} + protocol: {{ .Values.opcua.configuration.configurationService.protocol }} + targetPort: {{ .Values.opcua.configuration.configurationService.targetPort }} + {{- end }} + {{- end }} + {{- if .Values.opcua.configuration.brokerProperties }} + brokerProperties: + {{- range $key, $val := .Values.opcua.configuration.brokerProperties }} + {{- $key | nindent 4 }}: {{ $val | quote }} + {{- end }} + {{- else }} + brokerProperties: {} + {{- end }} + capacity: {{ .Values.opcua.configuration.capacity }} +{{- end }} \ No newline at end of file diff --git a/deployment/helm/templates/opcua-discovery-handler.yaml b/deployment/helm/templates/opcua-discovery-handler.yaml new file mode 100644 index 000000000..5b0ad33a2 --- /dev/null +++ b/deployment/helm/templates/opcua-discovery-handler.yaml @@ -0,0 +1,69 @@ +{{- if .Values.opcua.discovery.enabled }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: akri-opcua-discovery-daemonset +spec: + selector: + matchLabels: + name: akri-opcua-discovery + template: + metadata: + labels: + name: akri-opcua-discovery + spec: + containers: + - name: akri-opcua-discovery + {{- if .Values.useDevelopmentContainers }} + {{- if .Values.useLatestContainers }} + image: {{ printf "%s:%s" .Values.opcua.discovery.image.repository (default "latest-dev" .Values.opcua.discovery.image.tag) | quote }} + {{- else }} + image: {{ printf "%s:%s" .Values.opcua.discovery.image.repository (default (printf "v%s-dev" .Chart.AppVersion) .Values.opcua.discovery.image.tag) | quote }} + {{- end }} + {{- else }} + {{- if .Values.useLatestContainers }} + image: {{ printf "%s:%s" .Values.opcua.discovery.image.repository (default "latest" .Values.opcua.discovery.image.tag) | quote }} + {{- else }} + image: {{ printf "%s:%s" .Values.opcua.discovery.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.opcua.discovery.image.tag) | quote }} + {{- end }} + {{- end }} + {{- with .Values.opcua.discovery.image.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end}} + resources: + requests: + memory: {{ .Values.opcua.discovery.resources.memoryRequest }} + cpu: {{ .Values.opcua.discovery.resources.cpuRequest }} + limits: + memory: {{ .Values.opcua.discovery.resources.memoryLimit }} + cpu: {{ .Values.opcua.discovery.resources.cpuLimit }} + {{- if .Values.opcua.discovery.useNetworkConnection }} + ports: + - name: discovery + containerPort: {{ .Values.opcua.discovery.port }} + {{- end }} + env: + {{- if .Values.opcua.discovery.useNetworkConnection }} + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- end }} + - name: DISCOVERY_HANDLERS_DIRECTORY + value: /var/lib/akri + volumeMounts: + - name: discovery-handlers + mountPath: /var/lib/akri + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.opcua.discovery.nodeSelectors }} + nodeSelector: + {{- toYaml .Values.opcua.discovery.nodeSelectors | nindent 8 }} + {{- end }} + volumes: + - name: discovery-handlers + hostPath: + path: {{ .Values.agent.host.discoveryHandlers }} +{{- end }} \ No newline at end of file diff --git a/deployment/helm/templates/opcua.yaml b/deployment/helm/templates/opcua.yaml deleted file mode 100644 index 9ed5b3ffe..000000000 --- a/deployment/helm/templates/opcua.yaml +++ /dev/null @@ -1,93 +0,0 @@ -{{- if .Values.opcua.enabled }} -apiVersion: akri.sh/v0 -kind: Configuration -metadata: - name: {{ .Values.opcua.name }} -spec: - protocol: - opcua: - opcuaDiscoveryMethod: - standard: - discoveryUrls: - {{- toYaml .Values.opcua.discoveryUrls | nindent 10 }} - applicationNames: - action: {{ .Values.opcua.applicationNames.action }} - {{- if .Values.opcua.applicationNames.items}} - items: - {{- toYaml .Values.opcua.applicationNames.items | nindent 8 }} - {{- else }} - items: [] - {{- end }} - {{- if .Values.opcua.brokerPod.image.repository }} - {{- /* Only add broker pod spec if a broker image is provided */}} - brokerPodSpec: - containers: - - name: {{ .Values.opcua.name }}-broker - image: {{ .Values.opcua.brokerPod.image.repository | quote }} - imagePullPolicy: {{ .Values.opcua.brokerPod.image.pullPolicy }} - {{- if .Values.opcua.brokerPod.env }} - env: - {{- range $key, $val := .Values.opcua.brokerPod.env }} - - name: {{ $key }} - value: {{ $val | quote }} - {{- end }} - {{- end }} - resources: - limits: - {{`"{{PLACEHOLDER}}"`}} : "1" - {{- if .Values.opcua.mountCertificates}} - volumeMounts: - - name: credentials - mountPath: "/etc/opcua-certs/client-pki" - readOnly: false - {{- end }} - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- if .Values.opcua.mountCertificates }} - volumes: - - name: credentials - secret: - secretName: opcua-broker-credentials - items: - - key: client_certificate - path: own/certs/AkriBroker.der - - key: client_key - path: own/private/AkriBroker.pfx - - key: ca_certificate - path: trusted/certs/SomeCA.der - - key: ca_crl - path: trusted/crl/SomeCA.crl - {{- end }} - {{- end }} - {{- /* Only add service specs if a broker image was specified and service - creation was not disabled */}} - {{- if .Values.opcua.brokerPod.image.repository }} - {{- if .Values.opcua.createInstanceServices }} - instanceServiceSpec: - type: {{ .Values.opcua.instanceService.type }} - ports: - - name: grpc - port: {{ .Values.opcua.instanceService.port }} - protocol: {{ .Values.opcua.instanceService.protocol }} - targetPort: {{ .Values.opcua.instanceService.targetPort }} - {{- end }} - {{- if .Values.opcua.createConfigurationService }} - configurationServiceSpec: - type: {{ .Values.opcua.configurationService.type }} - ports: - - name: grpc - port: {{ .Values.opcua.configurationService.port }} - protocol: {{ .Values.opcua.configurationService.protocol }} - targetPort: {{ .Values.opcua.configurationService.targetPort }} - {{- end }} - {{- end }} - {{- if .Values.opcua.properties }} - properties: - {{- toYaml . | nindent 4 }} - {{- else }} - properties: {} - {{- end }} - capacity: {{ .Values.opcua.capacity }} -{{- end }} \ No newline at end of file diff --git a/deployment/helm/templates/prometheus.yaml b/deployment/helm/templates/prometheus.yaml new file mode 100644 index 000000000..a50be0767 --- /dev/null +++ b/deployment/helm/templates/prometheus.yaml @@ -0,0 +1,31 @@ +{{- if .Values.prometheus.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: akri-agent-metrics + namespace: {{ .Release.Namespace }} + labels: + release: prometheus +spec: + selector: + matchLabels: + name: akri-agent + podMetricsEndpoints: + - port: {{ .Values.prometheus.portName | quote }} + path: {{ .Values.prometheus.endpoint }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: akri-controller-metrics + namespace: {{ .Release.Namespace }} + labels: + release: prometheus +spec: + selector: + matchLabels: + app: akri-controller + podMetricsEndpoints: + - port: {{ .Values.prometheus.portName | quote }} + path: {{ .Values.prometheus.endpoint }} +{{- end }} \ No newline at end of file diff --git a/deployment/helm/templates/rbac.yaml b/deployment/helm/templates/rbac.yaml index 382ea0200..e96378059 100644 --- a/deployment/helm/templates/rbac.yaml +++ b/deployment/helm/templates/rbac.yaml @@ -20,10 +20,10 @@ rules: - apiGroups: [""] resources: ["nodes"] verbs: ["get", "list", "watch"] -- apiGroups: ["akri.sh"] +- apiGroups: [{{ .Values.crds.group | quote }}] resources: ["instances"] verbs: ["get", "list", "watch", "update", "patch"] -- apiGroups: ["akri.sh"] +- apiGroups: [{{ .Values.crds.group | quote }}] resources: ["configurations"] verbs: ["get", "list", "watch"] --- @@ -35,10 +35,10 @@ rules: - apiGroups: [""] resources: ["pods"] verbs: ["get", "list", "watch"] -- apiGroups: ["akri.sh"] +- apiGroups: [{{ .Values.crds.group | quote }}] resources: ["instances"] verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] -- apiGroups: ["akri.sh"] +- apiGroups: [{{ .Values.crds.group | quote }}] resources: ["configurations"] verbs: ["get", "list", "watch"] --- diff --git a/deployment/helm/templates/udev-configuration.yaml b/deployment/helm/templates/udev-configuration.yaml new file mode 100644 index 000000000..a58247530 --- /dev/null +++ b/deployment/helm/templates/udev-configuration.yaml @@ -0,0 +1,79 @@ +{{- if .Values.udev.configuration.enabled }} +apiVersion: {{ printf "%s/%s" .Values.crds.group .Values.crds.version }} +kind: Configuration +metadata: + name: {{ .Values.udev.configuration.name }} +spec: + discoveryHandler: + name: udev + discoveryDetails: |+ + udevRules: + {{- required "Please set at least one udev rule with `--set udev.configuration.discoveryDetails.udevRules[0]==\"\"' to specify what you want discovered. See the udev Configuration document at https://github.com/deislabs/akri/blob/main/docs/udev-configuration.md for more information." .Values.udev.configuration.discoveryDetails.udevRules | toYaml | nindent 6 }} + {{- if .Values.udev.configuration.brokerPod.image.repository }} + {{- /* Only add broker pod spec if a broker image is provided */}} + brokerPodSpec: + containers: + - name: {{ .Values.udev.configuration.name }}-broker + image: {{ printf "%s:%s" .Values.udev.configuration.brokerPod.image.repository .Values.udev.configuration.brokerPod.image.tag | quote }} + {{- with .Values.udev.configuration.brokerPod.image.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end }} + {{- if .Values.udev.configuration.brokerPod.env }} + env: + {{- range $key, $val := .Values.udev.configuration.brokerPod.env }} + - name: {{ $key }} + value: {{ $val | quote }} + {{- end }} + {{- end }} + securityContext: + {{- if .Values.udev.configuration.brokerPod.securityContext }} + {{- toYaml .Values.udev.configuration.brokerPod.securityContext | nindent 8 }} + {{- else}} + privileged: true + {{- end}} + resources: + requests: + {{`"{{PLACEHOLDER}}"`}} : "1" + memory: {{ .Values.udev.configuration.brokerPod.resources.memoryRequest }} + cpu: {{ .Values.udev.configuration.brokerPod.resources.cpuRequest }} + limits: + {{`"{{PLACEHOLDER}}"`}} : "1" + memory: {{ .Values.udev.configuration.brokerPod.resources.memoryLimit }} + cpu: {{ .Values.udev.configuration.brokerPod.resources.cpuLimit }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} + {{- /* Only add service specs if a broker image was specified and service + creation was not disabled */}} + {{- if .Values.udev.configuration.brokerPod.image.repository }} + {{- if .Values.udev.configuration.createInstanceServices }} + instanceServiceSpec: + type: {{ .Values.udev.configuration.instanceService.type }} + ports: + - name: {{ .Values.udev.configuration.instanceService.portName }} + port: {{ .Values.udev.configuration.instanceService.port }} + protocol: {{ .Values.udev.configuration.instanceService.protocol }} + targetPort: {{ .Values.udev.configuration.instanceService.targetPort }} + {{- end }} + {{- if .Values.udev.configuration.createConfigurationService }} + configurationServiceSpec: + type: {{ .Values.udev.configuration.configurationService.type }} + ports: + - name: {{ .Values.udev.configuration.configurationService.portName }} + port: {{ .Values.udev.configuration.configurationService.port }} + protocol: {{ .Values.udev.configuration.configurationService.protocol }} + targetPort: {{ .Values.udev.configuration.configurationService.targetPort }} + {{- end }} + {{- end }} + {{- if .Values.udev.configuration.brokerProperties }} + brokerProperties: + {{- range $key, $val := .Values.udev.configuration.brokerProperties }} + {{- $key | nindent 4 }}: {{ $val | quote }} + {{- end }} + {{- else }} + brokerProperties: {} + {{- end }} + capacity: {{ .Values.udev.configuration.capacity }} +{{- end }} \ No newline at end of file diff --git a/deployment/helm/templates/udev-discovery-handler.yaml b/deployment/helm/templates/udev-discovery-handler.yaml new file mode 100644 index 000000000..4921b032c --- /dev/null +++ b/deployment/helm/templates/udev-discovery-handler.yaml @@ -0,0 +1,80 @@ +{{- if .Values.udev.discovery.enabled }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: akri-udev-discovery-daemonset +spec: + selector: + matchLabels: + name: akri-udev-discovery + template: + metadata: + labels: + name: akri-udev-discovery + spec: + nodeSelector: + "kubernetes.io/os": linux + containers: + - name: akri-udev-discovery + {{- if .Values.useDevelopmentContainers }} + {{- if .Values.useLatestContainers }} + image: {{ printf "%s:%s" .Values.udev.discovery.image.repository (default "latest-dev" .Values.udev.discovery.image.tag) | quote }} + {{- else }} + image: {{ printf "%s:%s" .Values.udev.discovery.image.repository (default (printf "v%s-dev" .Chart.AppVersion) .Values.udev.discovery.image.tag) | quote }} + {{- end }} + {{- else }} + {{- if .Values.useLatestContainers }} + image: {{ printf "%s:%s" .Values.udev.discovery.image.repository (default "latest" .Values.udev.discovery.image.tag) | quote }} + {{- else }} + image: {{ printf "%s:%s" .Values.udev.discovery.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.udev.discovery.image.tag) | quote }} + {{- end }} + {{- end }} + {{- with .Values.udev.discovery.image.pullPolicy }} + imagePullPolicy: {{ . }} + {{- end}} + resources: + requests: + memory: {{ .Values.udev.discovery.resources.memoryRequest }} + cpu: {{ .Values.udev.discovery.resources.cpuRequest }} + limits: + memory: {{ .Values.udev.discovery.resources.memoryLimit }} + cpu: {{ .Values.udev.discovery.resources.cpuLimit }} + {{- if .Values.udev.discovery.useNetworkConnection }} + ports: + - name: discovery + containerPort: {{ .Values.udev.discovery.port }} + {{- end }} + env: + {{- if .Values.udev.discovery.useNetworkConnection }} + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- end }} + - name: DISCOVERY_HANDLERS_DIRECTORY + value: /var/lib/akri + volumeMounts: + - name: discovery-handlers + mountPath: /var/lib/akri + {{- if .Values.udev.discovery.host.udev }} + - name: devices + mountPath: /run/udev + {{- end }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.udev.discovery.nodeSelectors }} + nodeSelector: + {{- toYaml .Values.udev.discovery.nodeSelectors | nindent 8 }} + {{- end }} + volumes: + - name: discovery-handlers + hostPath: + path: {{ .Values.agent.host.discoveryHandlers }} + {{- if .Values.udev.discovery.host.udev }} + - name: devices + hostPath: + path: "{{ .Values.udev.discovery.host.udev }}" + {{- end }} +{{- end }} \ No newline at end of file diff --git a/deployment/helm/templates/udev.yaml b/deployment/helm/templates/udev.yaml deleted file mode 100644 index 6b495535f..000000000 --- a/deployment/helm/templates/udev.yaml +++ /dev/null @@ -1,68 +0,0 @@ -{{- if .Values.udev.enabled }} -apiVersion: akri.sh/v0 -kind: Configuration -metadata: - name: {{ .Values.udev.name }} -spec: - protocol: - udev: - udevRules: - {{- required "Please set at least one udev rule with `--set udev.udevRules[0]==\"\"' to specify what you want discovered. See the udev Configuration document at https://github.com/deislabs/akri/blob/main/docs/udev-configuration.md for more information." .Values.udev.udevRules | toYaml | nindent 6 }} - {{- if .Values.udev.brokerPod.image.repository }} - {{- /* Only add broker pod spec if a broker image is provided */}} - brokerPodSpec: - containers: - - name: {{ .Values.udev.name }}-broker - image: {{ .Values.udev.brokerPod.image.repository | quote }} - imagePullPolicy: {{ .Values.udev.brokerPod.image.pullPolicy }} - {{- if .Values.udev.brokerPod.env }} - env: - {{- range $key, $val := .Values.udev.brokerPod.env }} - - name: {{ $key }} - value: {{ $val | quote }} - {{- end }} - {{- end }} - securityContext: - {{- if .Values.udev.brokerPod.securityContext }} - {{- toYaml .Values.udev.brokerPod.securityContext | nindent 8 }} - {{- else}} - privileged: true - {{- end}} - resources: - limits: - {{`"{{PLACEHOLDER}}"`}} : "1" - {{- with .Values.imagePullSecrets }} - imagePullSecrets: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- end }} - {{- /* Only add service specs if a broker image was specified and service - creation was not disabled */}} - {{- if .Values.udev.brokerPod.image.repository }} - {{- if .Values.udev.createInstanceServices }} - instanceServiceSpec: - type: {{ .Values.udev.instanceService.type }} - ports: - - name: {{ .Values.udev.instanceService.portName }} - port: {{ .Values.udev.instanceService.port }} - protocol: {{ .Values.udev.instanceService.protocol }} - targetPort: {{ .Values.udev.instanceService.targetPort }} - {{- end }} - {{- if .Values.udev.createConfigurationService }} - configurationServiceSpec: - type: {{ .Values.udev.configurationService.type }} - ports: - - name: {{ .Values.udev.configurationService.portName }} - port: {{ .Values.udev.configurationService.port }} - protocol: {{ .Values.udev.configurationService.protocol }} - targetPort: {{ .Values.udev.configurationService.targetPort }} - {{- end }} - {{- end }} - {{- if .Values.udev.properties }} - properties: - {{- toYaml . | nindent 4 }} - {{- else }} - properties: {} - {{- end }} - capacity: {{ .Values.udev.capacity }} -{{- end }} \ No newline at end of file diff --git a/deployment/helm/templates/webhook-configuration.yaml b/deployment/helm/templates/webhook-configuration.yaml new file mode 100644 index 000000000..5b5eae7ac --- /dev/null +++ b/deployment/helm/templates/webhook-configuration.yaml @@ -0,0 +1,149 @@ +{{- if .Values.webhookConfiguration.enabled }} +apiVersion: v1 +kind: List +metadata: + name: {{ .Values.webhookConfiguration.name }} +items: + - apiVersion: v1 + kind: ServiceAccount + metadata: + name: {{ .Values.webhookConfiguration.name }} + namespace: {{ .Release.Namespace }} + - apiVersion: rbac.authorization.k8s.io/v1 + kind: Role + metadata: + name: {{ .Values.webhookConfiguration.name }} + namespace: {{ .Release.Namespace }} + rules: + - apiGroups: [""] + resources: ["pods"] + verbs: ["get"] + - apiVersion: rbac.authorization.k8s.io/v1 + kind: RoleBinding + metadata: + name: {{ .Values.webhookConfiguration.name }} + namespace: {{ .Release.Namespace }} + roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ .Values.webhookConfiguration.name }} + subjects: + - kind: ServiceAccount + name: {{ .Values.webhookConfiguration.name }} + namespace: {{ .Release.Namespace }} + - apiVersion: apps/v1 + kind: Deployment + metadata: + name: {{ .Values.webhookConfiguration.name }} + spec: + replicas: 1 + selector: + matchLabels: + app: {{ .Values.webhookConfiguration.name }} + template: + metadata: + labels: + app: {{ .Values.webhookConfiguration.name }} + spec: + {{- if .Values.rbac.enabled }} + serviceAccountName: {{ .Values.webhookConfiguration.name }} + {{- end }} + containers: + - name: webhook + {{- if .Values.useDevelopmentContainers }} + {{- if .Values.useLatestContainers }} + image: {{ printf "%s:latest-dev" .Values.webhookConfiguration.image.repository | quote }} + {{- else }} + image: {{ printf "%s:%s" .Values.webhookConfiguration.image.repository (default (printf "v%s-dev" .Chart.AppVersion) .Values.webhookConfiguration.image.tag) | quote }} + {{- end }} + {{- else }} + {{- if .Values.useLatestContainers }} + image: {{ printf "%s:latest" .Values.webhookConfiguration.image.repository | quote }} + {{- else }} + image: {{ printf "%s:%s" .Values.webhookConfiguration.image.repository (default (printf "v%s" .Chart.AppVersion) .Values.webhookConfiguration.image.tag) | quote }} + {{- end }} + {{- end }} + imagePullPolicy: {{ .Values.webhookConfiguration.image.pullPolicy }} + resources: + requests: + memory: {{ .Values.webhookConfiguration.resources.memoryRequest }} + cpu: {{ .Values.webhookConfiguration.resources.cpuRequest }} + limits: + memory: {{ .Values.webhookConfiguration.resources.memoryLimit }} + cpu: {{ .Values.webhookConfiguration.resources.cpuLimit }} + command: + - /server + args: + - --tls-crt-file=/secrets/tls.crt + - --tls-key-file=/secrets/tls.key + - --port=8443 + volumeMounts: + - name: secrets + mountPath: /secrets + readOnly: true + volumes: + - name: secrets + secret: + secretName: {{ .Values.webhookConfiguration.name }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.webhookConfiguration.allowOnControlPlane }} + tolerations: + {{- /* Allow this pod to run on the master. */}} + - key: node-role.kubernetes.io/master + effect: NoSchedule + {{- end }} + {{- if or .Values.webhookConfiguration.linuxOnly .Values.webhookConfiguration.onlyOnControlPlane .Values.webhookConfiguration.nodeSelectors }} + nodeSelector: + {{- if .Values.webhookConfiguration.nodeSelectors }} + {{- toYaml .Values.webhookConfiguration.nodeSelectors | nindent 8 }} + {{- end }} + {{- if .Values.webhookConfiguration.linuxOnly }} + "kubernetes.io/os": linux + {{- end }} + {{- if .Values.webhookConfiguration.onlyOnControlPlane }} + node-role.kubernetes.io/master: "" + {{- end }} + {{- end }} + - apiVersion: v1 + kind: Service + metadata: + name: {{ .Values.webhookConfiguration.name }} + spec: + selector: + app: {{ .Values.webhookConfiguration.name }} + ports: + - name: http + port: 443 + targetPort: 8443 + - apiVersion: admissionregistration.k8s.io/v1 + kind: ValidatingWebhookConfiguration + metadata: + name: {{ .Values.webhookConfiguration.name }} + webhooks: + - name: {{ .Values.webhookConfiguration.name }}.{{ .Release.Namespace }}.svc + clientConfig: + service: + name: {{ .Values.webhookConfiguration.name }} + namespace: {{ .Release.Namespace }} + port: 443 + path: "/validate" + caBundle: {{ required "please rerun helm install" .Values.webhookConfiguration.caBundle }} + rules: + - operations: + - "CREATE" + - "UPDATE" + apiGroups: + - {{ .Values.crds.group }} + apiVersions: + - {{ .Values.crds.version }} + resources: + - "configurations" + scope: "*" + admissionReviewVersions: + - v1 + - v1beta1 + sideEffects: None +{{- end }} \ No newline at end of file diff --git a/deployment/helm/values.yaml b/deployment/helm/values.yaml index 0df8ae0fb..1265ea33c 100644 --- a/deployment/helm/values.yaml +++ b/deployment/helm/values.yaml @@ -16,10 +16,26 @@ useDevelopmentContainers: true # This can be set from the helm command line using `--set imagePullSecrets[0].name="mysecret"` imagePullSecrets: [] +# generalize references to `apiGroups` and `apiVersion` values for Akri CRDs +crds: + group: akri.sh + version: v0 + rbac: # enabled defines whether to apply rbac to Akri enabled: true +prometheus: + # enabled defines whether metrics ports are exposed on + # the Controller and Agent + enabled: false + # endpoint is the path the port exposed for metrics + endpoint: /metrics + # port is the port that the metrics service is exposed on + port: 8080 + # portName is the name of the metrics port + portName: metrics + controller: # enabled defines whether to apply the Akri Controller enabled: true @@ -27,10 +43,11 @@ controller: # repository is the Akri Controller container reference repository: ghcr.io/deislabs/akri/controller # tag is the Akri Controller container tag - # controller.yaml will default to v(AppVersion)-dev + # controller.yaml will default to v(AppVersion)[-dev] + # with `-dev` added if `useDevelopmentContainers` is specified tag: # pullPolicy is the Akri Controller pull policy - pullPolicy: Always + pullPolicy: "" # onlyOnControlPlane dictates whether the Akri Controller will only run on nodes with # the label with (key, value) of ("node-role.kubernetes.io/master", "") onlyOnControlPlane: false @@ -42,249 +59,612 @@ controller: # nodeSelectors is the array of nodeSelectors used to target nodes for the Akri Controller to run on # This can be set from the helm command line using `--set controller.nodeSelectors.label="value"` nodeSelectors: {} + resources: + # memoryRequest defines the minimum amount of RAM that must be available to this Pod + # for it to be scheduled by the Kubernetes Scheduler + memoryRequest: 11Mi + # cpuRequest defines the minimum amount of CPU that must be available to this Pod + # for it to be scheduled by the Kubernetes Scheduler + cpuRequest: 10m + # memoryLimit defines the maximum amount of RAM this Pod can consume. + memoryLimit: 100Mi + # cpuLimit defines the maximum amount of CPU this Pod can consume. + cpuLimit: 26m agent: # enabled defines whether to apply the Akri Agent enabled: true + # full specifies that the `agent-full` image should be used which has embedded Discovery Handlers + full: false image: # repository is the Akri Agent container reference repository: ghcr.io/deislabs/akri/agent + # fullRepository is the container reference for the Akri Agent with embedded Discovery Handlers + fullRepository: ghcr.io/deislabs/akri/agent-full # tag is the Akri Agent container tag - # agent.yaml will default to v(AppVersion)-dev + # agent.yaml will default to v(AppVersion)[-dev] + # with `-dev` added if `useDevelopmentContainers` is specified tag: # pullPolicy is the Akri Agent pull policy - pullPolicy: Always + pullPolicy: "" securityContext: {} host: + # discoveryHandlers is the location of Akri Discovery Handler sockets and + # the agent registration service + discoveryHandlers: /var/lib/akri # kubeletDevicePlugins is the location of the kubelet device-plugin sockets kubeletDevicePlugins: /var/lib/kubelet/device-plugins # crictl is the node path to crictl crictl: /usr/bin/crictl # dockerShimSock is the node path of the docker socket dockerShimSock: /var/run/dockershim.sock - # udev is the node path of udev - udev: /run/udev + # udev is the node path of udev, usually at `/run/udev` + udev: # allowDebugEcho dictates whether the Akri Agent will allow DebugEcho Configurations allowDebugEcho: false # linuxOnly dictates whether the Akri Agent will only run on a linux node linuxOnly: true + resources: + # memoryRequest defines the minimum amount of RAM that must be available to this Pod + # for it to be scheduled by the Kubernetes Scheduler + memoryRequest: 11Mi + # cpuRequest defines the minimum amount of CPU that must be available to this Pod + # for it to be scheduled by the Kubernetes Scheduler + cpuRequest: 10m + # memoryLimit defines the maximum amount of RAM this Pod can consume. + memoryLimit: 79Mi + # cpuLimit defines the maximum amount of CPU this Pod can consume. + cpuLimit: 26m + +custom: + configuration: + # enabled defines whether to load a custom configuration + enabled: false + # name is the Kubernetes resource name that will be created for this + # custom configuration + name: akri-custom + # discoveryHandlerName is the name of the Discovery Handler the Configuration is using + discoveryHandlerName: + # brokerProperties is a map of properties that will be passed to any instances + # created as a result of applying this custom configuration + brokerProperties: {} + # capacity is the capacity for any instances created as a result of + # applying this custom configuration + capacity: 1 + # discoveryDetails is the string of discovery details that is + # passed to a Discovery Handler which can parse it into an expected format. + discoveryDetails: "" + brokerPod: + image: + # repository is the custom broker container reference + repository: + # tag is the custom broker image tag + tag: latest + # pullPolicy is the custom pull policy + pullPolicy: "" + resources: + # memoryRequest defines the minimum amount of RAM that must be available to this Pod + # for it to be scheduled by the Kubernetes Scheduler + memoryRequest: 11Mi + # cpuRequest defines the minimum amount of CPU that must be available to this Pod + # for it to be scheduled by the Kubernetes Scheduler + cpuRequest: 10m + # memoryLimit defines the maximum amount of RAM this Pod can consume. + memoryLimit: 24Mi + # cpuLimit defines the maximum amount of CPU this Pod can consume. + cpuLimit: 24m + # createInstanceServices is specified if a service should automatically be + # created for each broker pod + createInstanceServices: true + instanceService: + # name is the description of the instance service + name: akri-custom-instance-service + # type is the service type of the instance service + type: ClusterIP + # port is the service port of the instance service + port: 6052 + # targetPort is the service targetPort of the instance service + targetPort: 6052 + # protocol is the service protocol of the instance service + protocol: TCP + # createConfigurationService is specified if a single service should automatically be + # created for all broker pods of a Configuration + createConfigurationService: true + configurationService: + # name is the description of the configuration service + name: akri-custom-configuration-service + # type is the service type of the instance service + type: ClusterIP + # port is the service port of the instance service + port: 6052 + # targetPort is the service targetPort of the instance service + targetPort: 6052 + # protocol is the service protocol of the instance service + protocol: TCP + # discovery defines a set of values for a custom discovery handler DaemonSet + discovery: + # enabled defines whether discovery handler pods will be deployed in a slim Agent scenario + enabled: false + # name is the Kubernetes resource name that will be created for this + # custom Discovery Handler DaemonSet + name: akri-custom-discovery + image: + # repository is the custom broker container reference + repository: + # tag is the custom broker image tag + tag: latest + # pullPolicy is the pull policy + pullPolicy: "" + # nodeSelectors is the array of nodeSelectors used to target nodes for the discovery handler to run on + # This can be set from the helm command line using `--set custom.discovery.nodeSelectors.label="value"` + nodeSelectors: {} + resources: + # memoryRequest defines the minimum amount of RAM that must be available to this Pod + # for it to be scheduled by the Kubernetes Scheduler + memoryRequest: 11Mi + # cpuRequest defines the minimum amount of CPU that must be available to this Pod + # for it to be scheduled by the Kubernetes Scheduler + cpuRequest: 10m + # memoryLimit defines the maximum amount of RAM this Pod can consume. + memoryLimit: 24Mi + # cpuLimit defines the maximum amount of CPU this Pod can consume. + cpuLimit: 24m debugEcho: - # enabled defines whether to load a debugEcho configuration - enabled: false - # name is the Kubernetes resource name that will be created for this - # debugEcho configuration - name: akri-debug-echo-foo - # properties is a map of properties that will be passed to any instances - # created as a result of applying this debugEcho configuration - properties: - # capacity is the capacity for any instances created as a result of - # applying this debugEcho configuration - capacity: 1 - # descriptions is the list of instances created as a result of - # applying this debugEcho configuration - descriptions: - - "foo0" - - "foo1" - # shared defines whether instances created as a result of - # applying this debugEcho configuration as shared - shared: true - brokerPod: - # container used by debugEcho - image: nginx:latest - # pullPolicy is the debugEcho pull policy - pullPolicy: Always - instanceService: - # name is the description of the instance service - name: akri-debug-echo-foo-instance-service - # type is the service type of the instance service - type: ClusterIP - # port is the service port of the instance service - port: 6052 - # targetPort is the service targetPort of the instance service - targetPort: 6052 - # protocol is the service protocol of the instance service - protocol: TCP - configurationService: - # name is the description of the configuration service - name: akri-debug-echo-foo-configuration-service - # type is the service type of the instance service - type: ClusterIP - # port is the service port of the instance service - port: 6052 - # targetPort is the service targetPort of the instance service - targetPort: 6052 - # protocol is the service protocol of the instance service - protocol: TCP + configuration: + # enabled defines whether to load a debugEcho configuration + enabled: false + # name is the Kubernetes resource name that will be created for this + # debugEcho configuration + name: akri-debug-echo + # brokerProperties is a map of properties that will be passed to any instances + # created as a result of applying this debugEcho configuration + brokerProperties: {} + # capacity is the capacity for any instances created as a result of + # applying this debugEcho configuration + capacity: 1 + discoveryDetails: + # descriptions is the list of instances created as a result of + # applying this debugEcho configuration + descriptions: + - "foo0" + - "foo1" + # shared defines whether instances created as a result of + # applying this debugEcho configuration are shared + shared: true + brokerPod: + # container used by debugEcho + image: + # repository is the debugEcho broker container reference + repository: nginx + # tag is the debugEcho broker image tag + tag: stable-alpine + # pullPolicy is the debugEcho pull policy + pullPolicy: "" + resources: + # memoryRequest defines the minimum amount of RAM that must be available to this Pod + # for it to be scheduled by the Kubernetes Scheduler + memoryRequest: 10Mi + # cpuRequest defines the minimum amount of CPU that must be available to this Pod + # for it to be scheduled by the Kubernetes Scheduler + cpuRequest: 10m + # memoryLimit defines the maximum amount of RAM this Pod can consume. + memoryLimit: 30Mi + # cpuLimit defines the maximum amount of CPU this Pod can consume. + cpuLimit: 29m + # createInstanceServices is specified if a service should automatically be + # created for each broker pod + createInstanceServices: true + instanceService: + # name is the description of the instance service + name: akri-debug-echo-foo-instance-service + # type is the service type of the instance service + type: ClusterIP + # port is the service port of the instance service + port: 6052 + # targetPort is the service targetPort of the instance service + targetPort: 6052 + # protocol is the service protocol of the instance service + protocol: TCP + # createConfigurationService is specified if a single service should automatically be + # created for all broker pods of a Configuration + createConfigurationService: true + configurationService: + # name is the description of the configuration service + name: akri-debug-echo-foo-configuration-service + # type is the service type of the instance service + type: ClusterIP + # port is the service port of the instance service + port: 6052 + # targetPort is the service targetPort of the instance service + targetPort: 6052 + # protocol is the service protocol of the instance service + protocol: TCP + # discovery defines a set of values for a debugEcho discovery handler DaemonSet + discovery: + # enabled defines whether discovery handler pods will be deployed in a slim Agent scenario + enabled: false + image: + # repository is the container reference + repository: ghcr.io/deislabs/akri/debug-echo-discovery + # tag is the container tag + # debug-echo-configuration.yaml will default to v(AppVersion)[-dev] + # with `-dev` added if `useDevelopmentContainers` is specified + tag: + # pullPolicy is the pull policy + pullPolicy: "" + # useNetworkConnection specifies whether the discovery handler should make a networked connection + # with Agents, using its pod IP address when registering + useNetworkConnection: false + # port specifies (when useNetworkConnection is true) the port on which the discovery handler advertises its discovery service + port: 10000 + # nodeSelectors is the array of nodeSelectors used to target nodes for the discovery handler to run on + # This can be set from the helm command line using `--set debugEcho.discovery.nodeSelectors.label="value"` + nodeSelectors: {} + resources: + # memoryRequest defines the minimum amount of RAM that must be available to this Pod + # for it to be scheduled by the Kubernetes Scheduler + memoryRequest: 11Mi + # cpuRequest defines the minimum amount of CPU that must be available to this Pod + # for it to be scheduled by the Kubernetes Scheduler + cpuRequest: 10m + # memoryLimit defines the maximum amount of RAM this Pod can consume. + memoryLimit: 24Mi + # cpuLimit defines the maximum amount of CPU this Pod can consume. + cpuLimit: 26m onvif: - # enabled defines whether to load a onvif configuration - enabled: false - # name is the Kubernetes resource name that will be created for this - # onvif configuration - name: akri-onvif - # properties is a map of properties that will be passed to any instances - # created as a result of applying this onvif configuration - properties: - ipAddresses: - action: Exclude - items: [] - macAddresses: - action: Exclude - items: [] - scopes: - action: Exclude - items: [] - discoveryTimeoutSeconds: 1 - # capacity is the capacity for any instances created as a result of - # applying this onvif configuration - capacity: 1 - brokerPod: + configuration: + # enabled defines whether to load a onvif configuration + enabled: false + # name is the Kubernetes resource name that will be created for this + # onvif configuration + name: akri-onvif + # brokerProperties is a map of properties that will be passed to any instances + # created as a result of applying this onvif configuration + brokerProperties: {} + discoveryDetails: + ipAddresses: + action: Exclude + items: [] + macAddresses: + action: Exclude + items: [] + scopes: + action: Exclude + items: [] + discoveryTimeoutSeconds: 1 + # capacity is the capacity for any instances created as a result of + # applying this onvif configuration + capacity: 1 + brokerPod: + image: + # repository is the onvif broker container reference + repository: + # tag is the onvif broker image tag + tag: latest + # pullPolicy is the Akri onvif broker pull policy + pullPolicy: "" + resources: + # memoryRequest defines the minimum amount of RAM that must be available to this Pod + # for it to be scheduled by the Kubernetes Scheduler + memoryRequest: 98Mi + # cpuRequest defines the minimum amount of CPU that must be available to this Pod + # for it to be scheduled by the Kubernetes Scheduler + cpuRequest: 134m + # memoryLimit defines the maximum amount of RAM this Pod can consume. + memoryLimit: 400Mi + # cpuLimit defines the maximum amount of CPU this Pod can consume. + cpuLimit: 2800m + # createInstanceServices is specified if a service should automatically be + # created for each broker pod + createInstanceServices: true + instanceService: + # name is the description of the instance service + name: akri-onvif-instance-service + # type is the service type of the instance service + type: ClusterIP + # portName is the name of the port + portName: grpc + # port is the service port of the instance service + port: 80 + # targetPort is the service targetPort of the instance service + targetPort: 8083 + # protocol is the service protocol of the instance service + protocol: TCP + # createConfigurationService is specified if a single service should automatically be + # created for all broker pods of a Configuration + createConfigurationService: true + configurationService: + # name is the description of the configuration service + name: akri-onvif-configuration-service + # type is the service type of the instance service + type: ClusterIP + # portName is the name of the port + portName: grpc + # port is the service port of the instance service + port: 80 + # targetPort is the service targetPort of the instance service + targetPort: 8083 + # protocol is the service protocol of the instance service + protocol: TCP + # discovery defines a set of values for a onvif discovery handler DaemonSet + discovery: + # enabled defines whether discovery handler pods will be deployed in a slim Agent scenario + enabled: false image: # repository is the container reference - repository: - # pullPolicy is the Akri onvif broker pull policy - pullPolicy: Always - # createInstanceServices is specified if a service should automatically be - # created for each broker pod - createInstanceServices: true - instanceService: - # name is the description of the instance service - name: akri-onvif-instance-service - # type is the service type of the instance service - type: ClusterIP - # portName is the name of the port - portName: grpc - # port is the service port of the instance service - port: 80 - # targetPort is the service targetPort of the instance service - targetPort: 8083 - # protocol is the service protocol of the instance service - protocol: TCP - # createConfigurationService is specified if a single service should automatically be - # created for all broker pods of a Configuration - createConfigurationService: true - configurationService: - # name is the description of the configuration service - name: akri-onvif-configuration-service - # type is the service type of the instance service - type: ClusterIP - # portName is the name of the port - portName: grpc - # port is the service port of the instance service - port: 80 - # targetPort is the service targetPort of the instance service - targetPort: 8083 - # protocol is the service protocol of the instance service - protocol: TCP + repository: ghcr.io/deislabs/akri/onvif-discovery + # tag is the container tag + # onvif-configuration.yaml will default to v(AppVersion)[-dev] + # with `-dev` added if `useDevelopmentContainers` is specified + tag: + # pullPolicy is the pull policy + pullPolicy: "" + # useNetworkConnection specifies whether the discovery handler should make a networked connection + # with Agents, using its pod IP address when registering + useNetworkConnection: false + # port specifies (when useNetworkConnection is true) the port on which the discovery handler advertises its discovery service + port: 10000 + # nodeSelectors is the array of nodeSelectors used to target nodes for the discovery handler to run on + # This can be set from the helm command line using `--set onvif.discovery.nodeSelectors.label="value"` + nodeSelectors: {} + resources: + # memoryRequest defines the minimum amount of RAM that must be available to this Pod + # for it to be scheduled by the Kubernetes Scheduler + memoryRequest: 11Mi + # cpuRequest defines the minimum amount of CPU that must be available to this Pod + # for it to be scheduled by the Kubernetes Scheduler + cpuRequest: 10m + # memoryLimit defines the maximum amount of RAM this Pod can consume. + memoryLimit: 24Mi + # cpuLimit defines the maximum amount of CPU this Pod can consume. + cpuLimit: 24m opcua: - # enabled defines whether to load an OPC UA configuration - enabled: false - # name is the Kubernetes resource name that will be created for this - # OPC UA configuration - name: akri-opcua - # properties is a map of properties that will be passed to any instances - # created as a result of applying this OPC UA configuration - properties: - opcuaDiscoveryMethod: standard - # discoveryUrls is a list of DiscoveryUrls for OPC UA servers - discoveryUrls: - - "opc.tcp://localhost:4840/" - # mountCertificates determines whether to mount into the broker pods k8s Secrets - # containing OPC UA client credentials for connecting to OPC UA severs with the - # same signing certificate authority. - # If set to false, the brokers will attempt to make an insecure connection with the servers. - mountCertificates: false - # applicationNames is a filter applied to the discovered OPC UA servers to either exclusively - # include or exclude servers with application names in the applicationNames list. - applicationNames: - action: Exclude - items: [] - # capacity is the capacity for any instances created as a result of - # applying this OPC UA configuration - capacity: 1 - brokerPod: + configuration: + # enabled defines whether to load an OPC UA configuration + enabled: false + # name is the Kubernetes resource name that will be created for this + # OPC UA configuration + name: akri-opcua + # brokerProperties is a map of properties that will be passed to any instances + # created as a result of applying this OPC UA configuration + brokerProperties: {} + discoveryDetails: + # discoveryUrls is a list of DiscoveryUrls for OPC UA servers + discoveryUrls: + - "opc.tcp://localhost:4840/" + # applicationNames is a filter applied to the discovered OPC UA servers to either exclusively + # include or exclude servers with application names in the applicationNames list. + applicationNames: + action: Exclude + items: [] + # mountCertificates determines whether to mount into the broker pods k8s Secrets + # containing OPC UA client credentials for connecting to OPC UA severs with the + # same signing certificate authority. + # If set to false, the brokers will attempt to make an insecure connection with the servers. + mountCertificates: false + # capacity is the capacity for any instances created as a result of + # applying this OPC UA configuration + capacity: 1 + brokerPod: + image: + # repository is the OPC UA broker container reference + repository: + # tag is the OPC UA broker image tag + tag: latest + # pullPolicy is the OPC UA broker pull policy + pullPolicy: "" + resources: + # memoryRequest defines the minimum amount of RAM that must be available to this Pod + # for it to be scheduled by the Kubernetes Scheduler + memoryRequest: 76Mi + # cpuRequest defines the minimum amount of CPU that must be available to this Pod + # for it to be scheduled by the Kubernetes Scheduler + cpuRequest: 9m + # memoryLimit defines the maximum amount of RAM this Pod can consume. + memoryLimit: 200Mi + # cpuLimit defines the maximum amount of CPU this Pod can consume. + cpuLimit: 30m + # createInstanceServices is specified if a service should automatically be + # created for each broker pod + createInstanceServices: true + instanceService: + # name is the description of the instance service + name: akri-opcua-instance-service + # type is the service type of the instance service + type: ClusterIP + # port is the service port of the instance service + port: 80 + # targetPort is the service targetPort of the instance service + targetPort: 8083 + # protocol is the service protocol of the instance service + protocol: TCP + # createConfigurationService is specified if a single service should automatically be + # created for all broker pods of a Configuration + createConfigurationService: true + configurationService: + # name is the description of the configuration service + name: akri-opcua-configuration-service + # type is the service type of the instance service + type: ClusterIP + # port is the service port of the instance service + port: 80 + # targetPort is the service targetPort of the instance service + targetPort: 8083 + # protocol is the service protocol of the instance service + protocol: TCP + # discovery defines a set of values for a opcua discovery handler DaemonSet + discovery: + # enabled defines whether discovery handler pods will be deployed in a slim Agent scenario + enabled: false image: - # repository is the Akri OPC UA broker container reference - repository: - # pullPolicy is the Akri OPC UA broker pull policy - pullPolicy: Always - # createInstanceServices is specified if a service should automatically be - # created for each broker pod - createInstanceServices: true - instanceService: - # name is the description of the instance service - name: akri-opcua-instance-service - # type is the service type of the instance service - type: ClusterIP - # port is the service port of the instance service - port: 80 - # targetPort is the service targetPort of the instance service - targetPort: 8083 - # protocol is the service protocol of the instance service - protocol: TCP - # createConfigurationService is specified if a single service should automatically be - # created for all broker pods of a Configuration - createConfigurationService: true - configurationService: - # name is the description of the configuration service - name: akri-opcua-configuration-service - # type is the service type of the instance service - type: ClusterIP - # port is the service port of the instance service - port: 80 - # targetPort is the service targetPort of the instance service - targetPort: 8083 - # protocol is the service protocol of the instance service - protocol: TCP + # repository is the container reference + repository: ghcr.io/deislabs/akri/opcua-discovery + # tag is the container tag + # opcua-configuration.yaml will default to v(AppVersion)[-dev] + # with `-dev` added if `useDevelopmentContainers` is specified + tag: + # pullPolicy is the pull policy + pullPolicy: "" + # useNetworkConnection specifies whether the discovery handler should make a networked connection + # with Agents, using its pod IP address when registering + useNetworkConnection: false + # port specifies (when useNetworkConnection is true) the port on which the discovery handler advertises its discovery service + port: 10000 + # nodeSelectors is the array of nodeSelectors used to target nodes for the discovery handler to run on + # This can be set from the helm command line using `--set opcua.discovery.nodeSelectors.label="value"` + nodeSelectors: {} + resources: + # memoryRequest defines the minimum amount of RAM that must be available to this Pod + # for it to be scheduled by the Kubernetes Scheduler + memoryRequest: 11Mi + # cpuRequest defines the minimum amount of CPU that must be available to this Pod + # for it to be scheduled by the Kubernetes Scheduler + cpuRequest: 10m + # memoryLimit defines the maximum amount of RAM this Pod can consume. + memoryLimit: 24Mi + # cpuLimit defines the maximum amount of CPU this Pod can consume. + cpuLimit: 24m udev: - # enabled defines whether to load a udev configuration - enabled: false - # name is the Kubernetes resource name that will be created for this - # udev configuration - name: akri-udev - # properties is a map of properties that will be passed to any instances - # created as a result of applying this udev configuration - properties: - # udevRules is the list of udev rules used to find instances created as a result of - # applying this udev configuration - udevRules: - # capacity is the capacity for any instances created as a result of - # applying this udev configuration - capacity: 1 - brokerPod: + configuration: + # enabled defines whether to load a udev configuration + enabled: false + # name is the Kubernetes resource name that will be created for this + # udev configuration + name: akri-udev + # brokerProperties is a map of properties that will be passed to any instances + # created as a result of applying this udev configuration + brokerProperties: {} + discoveryDetails: + # udevRules is the list of udev rules used to find instances created as a result of + # applying this udev configuration + udevRules: + # capacity is the capacity for any instances created as a result of + # applying this udev configuration + capacity: 1 + brokerPod: + image: + # repository is the udev broker container reference + repository: + # tag is the udev broker image tag + tag: latest + # pullPolicy is the udev broker pull policy + pullPolicy: "" + securityContext: {} + # createInstanceServices is specified if a service should automatically be + # created for each broker pod + createInstanceServices: true + instanceService: + # portName is the name of the port + portName: grpc + # type is the service type of the instance service + type: ClusterIP + # port is the service port of the instance service + port: 80 + # targetPort is the service targetPort of the instance service + targetPort: 8083 + # protocol is the service protocol of the instance service + protocol: TCP + # createConfigurationService is specified if a single service should automatically be + # created for all broker pods of a Configuration + createConfigurationService: true + configurationService: + # portName is the name of the port + portName: grpc + # type is the service type of the instance service + type: ClusterIP + # port is the service port of the instance service + port: 80 + # targetPort is the service targetPort of the instance service + targetPort: 8083 + # protocol is the service protocol of the instance service + protocol: TCP + # discovery defines a set of values for a udev discovery handler DaemonSet + discovery: + # enabled defines whether discovery handler pods will be deployed in a slim Agent scenario + enabled: false image: # repository is the container reference - repository: - pullPolicy: Always - securityContext: {} - # createInstanceServices is specified if a service should automatically be - # created for each broker pod - createInstanceServices: true - instanceService: - # portName is the name of the port - portName: grpc - # type is the service type of the instance service - type: ClusterIP - # port is the service port of the instance service - port: 80 - # targetPort is the service targetPort of the instance service - targetPort: 8083 - # protocol is the service protocol of the instance service - protocol: TCP - # createConfigurationService is specified if a single service should automatically be - # created for all broker pods of a Configuration - createConfigurationService: true - configurationService: - # portName is the name of the port - portName: grpc - # type is the service type of the instance service - type: ClusterIP - # port is the service port of the instance service - port: 80 - # targetPort is the service targetPort of the instance service - targetPort: 8083 - # protocol is the service protocol of the instance service - protocol: TCP \ No newline at end of file + repository: ghcr.io/deislabs/akri/udev-discovery + # tag is the container tag + # udev-configuration.yaml will default to v(AppVersion)[-dev] + # with `-dev` added if `useDevelopmentContainers` is specified + tag: + # pullPolicy is the pull policy + pullPolicy: "" + resources: + # memoryRequest defines the minimum amount of RAM that must be available to this Pod + # for it to be scheduled by the Kubernetes Scheduler + memoryRequest: 10Mi + # cpuRequest defines the minimum amount of CPU that must be available to this Pod + # for it to be scheduled by the Kubernetes Scheduler + cpuRequest: 10m + # memoryLimit defines the maximum amount of RAM this Pod can consume. + memoryLimit: 30Mi + # cpuLimit defines the maximum amount of CPU this Pod can consume. + cpuLimit: 29m + # useNetworkConnection specifies whether the discovery handler should make a networked connection + # with Agents, using its pod IP address when registering + useNetworkConnection: false + # port specifies (when useNetworkConnection is true) the port on which the discovery handler advertises its discovery service + port: 10000 + # nodeSelectors is the array of nodeSelectors used to target nodes for the discovery handler to run on + # This can be set from the helm command line using `--set udev.discovery.nodeSelectors.label="value"` + nodeSelectors: {} + host: + # udev is the node path of udev, usually at `/run/udev` + udev: + resources: + # memoryRequest defines the minimum amount of RAM that must be available to this Pod + # for it to be scheduled by the Kubernetes Scheduler + memoryRequest: 11Mi + # cpuRequest defines the minimum amount of CPU that must be available to this Pod + # for it to be scheduled by the Kubernetes Scheduler + cpuRequest: 10m + # memoryLimit defines the maximum amount of RAM this Pod can consume. + memoryLimit: 24Mi + # cpuLimit defines the maximum amount of CPU this Pod can consume. + cpuLimit: 24m + +# Admission Controllers (Webhooks) +webhookConfiguration: + # enabled defines whether to apply the Akri Admission Controller (Webhook) for Akri Configurations + enabled: false + # name of the webhook + name: akri-webhook-configuration + # base64-encoded CA certificate (PEM) used by Kubernetes to validate the Webhook's certificate + caBundle: null + image: + # repository is the Akri Webhook for Configurations image reference + repository: ghcr.io/deislabs/akri/webhook-configuration + # tag is the container tag + # webhook-configuration.yaml will default to v(AppVersion)[-dev] + # with `-dev` added if `useDevelopmentContainers` is specified + tag: + # pullPolicy is the Akri Webhook pull policy + pullPolicy: Always + # onlyOnControlPlane dictates whether the Akri Webhook will only run on nodes with + # the label with (key, value) of ("node-role.kubernetes.io/master", "") + onlyOnControlPlane: false + # allowOnControlPlane dictates whether a toleration will be added to allow to Akri Webhook + # to run on the control plane node + allowOnControlPlane: true + # linuxOnly dictates whether the Akri Webhook will only run on a linux node + linuxOnly: true + # nodeSelectors is the array of nodeSelectors used to target nodes for the Akri Webhook to run on + # This can be set from the helm command line using `--set webhookConfiguration.nodeSelectors.label="value"` + nodeSelectors: {} + resources: + # memoryRequest defines the minimum amount of RAM that must be available to this Pod + # for it to be scheduled by the Kubernetes Scheduler + memoryRequest: 100Mi + # cpuRequest defines the minimum amount of CPU that must be available to this Pod + # for it to be scheduled by the Kubernetes Scheduler + cpuRequest: 15m + # memoryLimit defines the maximum amount of RAM this Pod can consume. + memoryLimit: 100Mi + # cpuLimit defines the maximum amount of CPU this Pod can consume. + cpuLimit: 26m diff --git a/discovery-handler-modules/debug-echo-discovery-handler/Cargo.toml b/discovery-handler-modules/debug-echo-discovery-handler/Cargo.toml new file mode 100644 index 000000000..26c5b7100 --- /dev/null +++ b/discovery-handler-modules/debug-echo-discovery-handler/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "debug-echo-discovery-handler" +version = "0.6.5" +authors = ["Kate Goldenring "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +akri-discovery-utils = { path = "../../discovery-utils" } +akri-debug-echo = { path = "../../discovery-handlers/debug-echo" } +env_logger = "0.8.3" +log = "0.4" +tokio = { version = "0.2", features = ["rt-threaded", "sync", "time", "stream", "fs", "macros", "uds"] } diff --git a/discovery-handler-modules/debug-echo-discovery-handler/src/main.rs b/discovery-handler-modules/debug-echo-discovery-handler/src/main.rs new file mode 100644 index 000000000..1a58ef943 --- /dev/null +++ b/discovery-handler-modules/debug-echo-discovery-handler/src/main.rs @@ -0,0 +1,29 @@ +use akri_debug_echo::{ + discovery_handler::DiscoveryHandlerImpl, DEBUG_ECHO_INSTANCES_SHARED_LABEL, + DISCOVERY_HANDLER_NAME, +}; +use akri_discovery_utils::discovery::discovery_handler::{ + run_discovery_handler, REGISTER_AGAIN_CHANNEL_CAPACITY, +}; +use log::info; +#[tokio::main] +async fn main() -> Result<(), Box> { + env_logger::try_init()?; + info!("main - debugEcho discovery handler started"); + let (register_sender, register_receiver) = + tokio::sync::mpsc::channel(REGISTER_AGAIN_CHANNEL_CAPACITY); + let discovery_handler = DiscoveryHandlerImpl::new(Some(register_sender)); + let shared: bool = std::env::var(DEBUG_ECHO_INSTANCES_SHARED_LABEL) + .unwrap() + .parse() + .unwrap(); + run_discovery_handler( + discovery_handler, + register_receiver, + DISCOVERY_HANDLER_NAME, + shared, + ) + .await?; + info!("main - debugEcho discovery handler ended"); + Ok(()) +} diff --git a/discovery-handler-modules/http-discovery-handler/Cargo.toml b/discovery-handler-modules/http-discovery-handler/Cargo.toml new file mode 100644 index 000000000..c629462cc --- /dev/null +++ b/discovery-handler-modules/http-discovery-handler/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "http-discovery-handler" +version = "0.1.0" +authors = ["Kate Goldenring "] +edition = "2018" + +[dependencies] +# TODO: Change this to Akri main when the library is merged +akri-discovery-utils = { git = "https://github.com/deislabs/akri", branch = "main", package = "akri-discovery-utils" } +async-trait = "0.1.0" +tokio = { version = "0.2", features = ["rt-threaded", "sync", "time", "stream", "fs", "macros", "uds"] } +tonic = {version = "0.1.0", features = ["tls"] } +anyhow = "1.0.38" +reqwest = "0.10.8" +env_logger = "0.6.1" +log = "0.4" \ No newline at end of file diff --git a/discovery-handler-modules/http-discovery-handler/src/discovery_handler.rs b/discovery-handler-modules/http-discovery-handler/src/discovery_handler.rs new file mode 100644 index 000000000..3eafc8f12 --- /dev/null +++ b/discovery-handler-modules/http-discovery-handler/src/discovery_handler.rs @@ -0,0 +1,68 @@ +use akri_discovery_utils::discovery::{ + v0::{discovery_handler_server::DiscoveryHandler, Device, DiscoverRequest, DiscoverResponse}, + DiscoverStream, +}; +use async_trait::async_trait; +use tokio::sync::mpsc; +use tonic::{Response, Status}; +use reqwest::get; +use std::collections::HashMap; +use log::info; + +const BROKER_NAME: &str = "AKRI_HTTP"; +const DEVICE_ENDPOINT: &str = "AKRI_HTTP_DEVICE_ENDPOINT"; + +pub struct DiscoveryHandlerImpl { + register_sender: tokio::sync::mpsc::Sender<()>, +} + +impl DiscoveryHandlerImpl { + pub fn new(register_sender: tokio::sync::mpsc::Sender<()>) -> Self { + DiscoveryHandlerImpl { register_sender } + } +} + +#[async_trait] +impl DiscoveryHandler for DiscoveryHandlerImpl { + type DiscoverStream = DiscoverStream; + async fn discover( + &self, + request: tonic::Request, + ) -> Result, Status> { + info!("discover - entered"); + // Get the discovery url from the `DiscoverRequest` + let url = request.get_ref().discovery_details.clone(); + // Create a channel for sending and receiving device updates + let (mut stream_sender, stream_receiver) = mpsc::channel(4); + let mut register_sender = self.register_sender.clone(); + tokio::spawn(async move { + loop { + let resp = get(&url).await.unwrap(); + // Response is a newline separated list of devices (host:port) or empty + let device_list = &resp.text().await.unwrap(); + let devices = device_list + .lines() + .map(|endpoint| { + let mut properties = HashMap::new(); + properties.insert(BROKER_NAME.to_string(), "http".to_string()); + properties.insert(DEVICE_ENDPOINT.to_string(), endpoint.to_string()); + Device { + id: endpoint.to_string(), + properties, + mounts: Vec::default(), + device_specs: Vec::default(), + } + }) + .collect::>(); + // Send the Agent the list of devices. + if let Err(_) = stream_sender.send(Ok(DiscoverResponse { devices })).await { + // Agent dropped its end of the stream. Stop discovering and signal to try to re-register. + register_sender.send(()).await.unwrap(); + break; + } + } + }); + // Send the agent one end of the channel to receive device updates + Ok(Response::new(stream_receiver)) + } +} diff --git a/discovery-handler-modules/http-discovery-handler/src/main.rs b/discovery-handler-modules/http-discovery-handler/src/main.rs new file mode 100644 index 000000000..044d3e2e0 --- /dev/null +++ b/discovery-handler-modules/http-discovery-handler/src/main.rs @@ -0,0 +1,28 @@ +mod discovery_handler; + +use akri_discovery_utils::discovery::discovery_handler::run_discovery_handler; +use discovery_handler::DiscoveryHandlerImpl; +use log::info; + +#[tokio::main] +async fn main() -> Result<(), Box> { + env_logger::try_init()?; + info!("main - HTTP discovery handler started"); + // Specify the name of this DiscoveryHandler. A discovery handler is usually, but not necessarily, identified by + // the protocol it uses. + let name = "http"; + // Specify whether the devices discovered by this discovery handler are locally attached (or embedded) to nodes or are + // network based and usable/sharable by multiple nodes. + let shared = true; + // A DiscoveryHandler must handle the Agent dropping a connection due to a Configuration that utilizes this + // DiscoveryHandler being deleted or the Agent erroring. It is impossible to determine the cause of the + // disconnection, so in case the Agent did error out, the Discovery Handler should try to re-register. + let (register_sender, register_receiver) = tokio::sync::mpsc::channel(2); + // Create a DiscoveryHandler + let discovery_handler = DiscoveryHandlerImpl::new(register_sender); + // This function will register the DiscoveryHandler with the Agent's registration socket + // and serve its discover service over UDS at the socket path + // `format!("{}/{}.sock"), env::var("DISCOVERY_HANDLERS_DIRECTORY"), name)`. + run_discovery_handler(discovery_handler, register_receiver, name, shared).await?; + Ok(()) +} diff --git a/discovery-handler-modules/onvif-discovery-handler/Cargo.toml b/discovery-handler-modules/onvif-discovery-handler/Cargo.toml new file mode 100644 index 000000000..564ee651b --- /dev/null +++ b/discovery-handler-modules/onvif-discovery-handler/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "onvif-discovery-handler" +version = "0.6.5" +authors = ["Kate Goldenring "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +akri-discovery-utils = { path = "../../discovery-utils" } +akri-onvif = { path = "../../discovery-handlers/onvif" } +env_logger = "0.8.3" +log = "0.4" +tokio = { version = "0.2", features = ["rt-threaded", "sync", "time", "stream", "fs", "macros", "uds"] } \ No newline at end of file diff --git a/discovery-handler-modules/onvif-discovery-handler/src/main.rs b/discovery-handler-modules/onvif-discovery-handler/src/main.rs new file mode 100644 index 000000000..2def2feba --- /dev/null +++ b/discovery-handler-modules/onvif-discovery-handler/src/main.rs @@ -0,0 +1,22 @@ +use akri_discovery_utils::discovery::discovery_handler::{ + run_discovery_handler, REGISTER_AGAIN_CHANNEL_CAPACITY, +}; +use akri_onvif::{discovery_handler::DiscoveryHandlerImpl, DISCOVERY_HANDLER_NAME, SHARED}; +use log::info; +#[tokio::main] +async fn main() -> Result<(), Box> { + env_logger::try_init()?; + info!("main - onvif discovery handler started"); + let (register_sender, register_receiver) = + tokio::sync::mpsc::channel(REGISTER_AGAIN_CHANNEL_CAPACITY); + let discovery_handler = DiscoveryHandlerImpl::new(Some(register_sender)); + run_discovery_handler( + discovery_handler, + register_receiver, + DISCOVERY_HANDLER_NAME, + SHARED, + ) + .await?; + info!("main - onvif discovery handler ended"); + Ok(()) +} diff --git a/discovery-handler-modules/opcua-discovery-handler/Cargo.toml b/discovery-handler-modules/opcua-discovery-handler/Cargo.toml new file mode 100644 index 000000000..d63f449c5 --- /dev/null +++ b/discovery-handler-modules/opcua-discovery-handler/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "opcua-discovery-handler" +version = "0.6.5" +authors = ["Kate Goldenring "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +akri-discovery-utils = { path = "../../discovery-utils" } +akri-opcua = { path = "../../discovery-handlers/opcua" } +env_logger = "0.8.3" +log = "0.4" +tokio = { version = "0.2", features = ["rt-threaded", "sync", "time", "stream", "fs", "macros", "uds"] } \ No newline at end of file diff --git a/discovery-handler-modules/opcua-discovery-handler/src/main.rs b/discovery-handler-modules/opcua-discovery-handler/src/main.rs new file mode 100644 index 000000000..25360df6f --- /dev/null +++ b/discovery-handler-modules/opcua-discovery-handler/src/main.rs @@ -0,0 +1,22 @@ +use akri_discovery_utils::discovery::discovery_handler::{ + run_discovery_handler, REGISTER_AGAIN_CHANNEL_CAPACITY, +}; +use akri_opcua::{discovery_handler::DiscoveryHandlerImpl, DISCOVERY_HANDLER_NAME, SHARED}; +use log::info; +#[tokio::main] +async fn main() -> Result<(), Box> { + env_logger::try_init()?; + info!("main - opcua discovery handler started"); + let (register_sender, register_receiver) = + tokio::sync::mpsc::channel(REGISTER_AGAIN_CHANNEL_CAPACITY); + let discovery_handler = DiscoveryHandlerImpl::new(Some(register_sender)); + run_discovery_handler( + discovery_handler, + register_receiver, + DISCOVERY_HANDLER_NAME, + SHARED, + ) + .await?; + info!("main - opcua discovery handler ended"); + Ok(()) +} diff --git a/discovery-handler-modules/udev-discovery-handler/Cargo.toml b/discovery-handler-modules/udev-discovery-handler/Cargo.toml new file mode 100644 index 000000000..f084b8269 --- /dev/null +++ b/discovery-handler-modules/udev-discovery-handler/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "udev-discovery-handler" +version = "0.6.5" +authors = ["Kate Goldenring "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +akri-discovery-utils = { path = "../../discovery-utils" } +akri-udev = { path = "../../discovery-handlers/udev" } +env_logger = "0.8.3" +log = "0.4" +tokio = { version = "0.2", features = ["rt-threaded", "sync", "time", "stream", "fs", "macros", "uds"] } diff --git a/discovery-handler-modules/udev-discovery-handler/src/main.rs b/discovery-handler-modules/udev-discovery-handler/src/main.rs new file mode 100644 index 000000000..ccd4b9a76 --- /dev/null +++ b/discovery-handler-modules/udev-discovery-handler/src/main.rs @@ -0,0 +1,22 @@ +use akri_discovery_utils::discovery::discovery_handler::{ + run_discovery_handler, REGISTER_AGAIN_CHANNEL_CAPACITY, +}; +use akri_udev::{discovery_handler::DiscoveryHandlerImpl, DISCOVERY_HANDLER_NAME, SHARED}; +use log::info; +#[tokio::main] +async fn main() -> Result<(), Box> { + env_logger::try_init()?; + info!("main - udev discovery handler started"); + let (register_sender, register_receiver) = + tokio::sync::mpsc::channel(REGISTER_AGAIN_CHANNEL_CAPACITY); + let discovery_handler = DiscoveryHandlerImpl::new(Some(register_sender)); + run_discovery_handler( + discovery_handler, + register_receiver, + DISCOVERY_HANDLER_NAME, + SHARED, + ) + .await?; + info!("main - udev discovery handler ended"); + Ok(()) +} diff --git a/discovery-handlers/debug-echo/Cargo.toml b/discovery-handlers/debug-echo/Cargo.toml new file mode 100644 index 000000000..1eb79968a --- /dev/null +++ b/discovery-handlers/debug-echo/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "akri-debug-echo" +version = "0.6.5" +authors = ["Kate Goldenring "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +akri-discovery-utils = { path = "../../discovery-utils" } +anyhow = "1.0.38" +async-trait = "0.1.0" +env_logger = "0.8.3" +futures-util = "0.3" +log = "0.4" +serde = "1.0.104" +serde_json = "1.0.45" +serde_yaml = "0.8.11" +serde_derive = "1.0.104" +tokio = { version = "0.2", features = ["rt-threaded", "sync", "time", "stream", "fs", "macros", "uds"] } +tonic = {version = "0.1.0", features = ["tls"] } + +[dev-dependencies] +akri-shared = { path = "../../shared" } \ No newline at end of file diff --git a/discovery-handlers/debug-echo/src/discovery_handler.rs b/discovery-handlers/debug-echo/src/discovery_handler.rs new file mode 100644 index 000000000..a5be894c6 --- /dev/null +++ b/discovery-handlers/debug-echo/src/discovery_handler.rs @@ -0,0 +1,202 @@ +use akri_discovery_utils::discovery::{ + discovery_handler::{deserialize_discovery_details, DISCOVERED_DEVICES_CHANNEL_CAPACITY}, + v0::{discovery_handler_server::DiscoveryHandler, Device, DiscoverRequest, DiscoverResponse}, + DiscoverStream, +}; +use async_trait::async_trait; +use log::{error, info, trace}; +use std::time::Duration; +use std::{collections::HashMap, fs}; +use tokio::sync::mpsc; +use tokio::time::delay_for; +use tonic::{Response, Status}; + +// TODO: make this configurable +pub const DISCOVERY_INTERVAL_SECS: u64 = 10; + +/// File acting as an environment variable for testing discovery. +/// To mimic an instance going offline, kubectl exec into the pod running this discovery handler +/// and echo "OFFLINE" > /tmp/debug-echo-availability.txt. +/// To mimic a device coming back online, remove the word "OFFLINE" from the file +/// ie: echo "" > /tmp/debug-echo-availability.txt. +pub const DEBUG_ECHO_AVAILABILITY_CHECK_PATH: &str = "/tmp/debug-echo-availability.txt"; +/// String to write into DEBUG_ECHO_AVAILABILITY_CHECK_PATH to make Other devices undiscoverable +pub const OFFLINE: &str = "OFFLINE"; + +/// DebugEchoDiscoveryDetails describes the necessary information needed to discover and filter debug echo devices. +/// Specifically, it contains a list (`descriptions`) of fake devices to be discovered. +/// This information is expected to be serialized in the discovery details map sent during Discover requests. +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(rename_all = "camelCase")] +pub struct DebugEchoDiscoveryDetails { + pub descriptions: Vec, +} + +/// The DiscoveryHandlerImpl discovers a list of devices, named in its `descriptions`. +/// It mocks discovering the devices by inspecting the contents of the file at `DEBUG_ECHO_AVAILABILITY_CHECK_PATH`. +/// If the file contains "OFFLINE", it won't discover any of the devices, else it discovers them all. +pub struct DiscoveryHandlerImpl { + register_sender: Option>, +} + +impl DiscoveryHandlerImpl { + pub fn new(register_sender: Option>) -> Self { + DiscoveryHandlerImpl { register_sender } + } +} + +#[async_trait] +impl DiscoveryHandler for DiscoveryHandlerImpl { + type DiscoverStream = DiscoverStream; + async fn discover( + &self, + request: tonic::Request, + ) -> Result, Status> { + info!("discover - called for debug echo protocol"); + let register_sender = self.register_sender.clone(); + let discover_request = request.get_ref(); + let (mut discovered_devices_sender, discovered_devices_receiver) = + mpsc::channel(DISCOVERED_DEVICES_CHANNEL_CAPACITY); + let discovery_handler_config: DebugEchoDiscoveryDetails = + deserialize_discovery_details(&discover_request.discovery_details) + .map_err(|e| tonic::Status::new(tonic::Code::InvalidArgument, format!("{}", e)))?; + let descriptions = discovery_handler_config.descriptions; + let mut offline = fs::read_to_string(DEBUG_ECHO_AVAILABILITY_CHECK_PATH) + .unwrap_or_default() + .contains(OFFLINE); + let mut first_loop = true; + tokio::spawn(async move { + loop { + let availability = + fs::read_to_string(DEBUG_ECHO_AVAILABILITY_CHECK_PATH).unwrap_or_default(); + trace!( + "discover -- debugEcho devices are online? {}", + !availability.contains(OFFLINE) + ); + if (availability.contains(OFFLINE) && !offline) || offline && first_loop { + if first_loop { + first_loop = false; + } + // If the device is now offline, return an empty list of instance info + offline = true; + if let Err(e) = discovered_devices_sender + .send(Ok(DiscoverResponse { + devices: Vec::new(), + })) + .await + { + error!("discover - for debugEcho failed to send discovery response with error {}", e); + if let Some(mut sender) = register_sender { + sender.send(()).await.unwrap(); + } + break; + } + } else if (!availability.contains(OFFLINE) && offline) || !offline && first_loop { + if first_loop { + first_loop = false; + } + offline = false; + let devices = descriptions + .iter() + .map(|description| { + let mut properties = HashMap::new(); + properties.insert( + super::DEBUG_ECHO_DESCRIPTION_LABEL.to_string(), + description.clone(), + ); + Device { + id: description.clone(), + properties, + mounts: Vec::default(), + device_specs: Vec::default(), + } + }) + .collect::>(); + if let Err(e) = discovered_devices_sender + .send(Ok(DiscoverResponse { devices })) + .await + { + // TODO: consider re-registering here + error!("discover - for debugEcho failed to send discovery response with error {}", e); + if let Some(mut sender) = register_sender { + sender.send(()).await.unwrap(); + } + break; + } + } + delay_for(Duration::from_secs(DISCOVERY_INTERVAL_SECS)).await; + } + }); + Ok(Response::new(discovered_devices_receiver)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use akri_discovery_utils::discovery::v0::DiscoverRequest; + use akri_shared::akri::configuration::DiscoveryHandlerInfo; + + #[test] + fn test_deserialize_discovery_details_empty() { + let dh_config: Result = + deserialize_discovery_details(""); + assert!(dh_config.is_err()); + + let dh_config: DebugEchoDiscoveryDetails = + deserialize_discovery_details("descriptions: []").unwrap(); + assert!(dh_config.descriptions.is_empty()); + let serialized = serde_json::to_string(&dh_config).unwrap(); + let expected_deserialized = r#"{"descriptions":[]}"#; + assert_eq!(expected_deserialized, serialized); + } + + #[test] + fn test_deserialize_discovery_details_detailed() { + let yaml = r#" + descriptions: + - "foo1" + "#; + let dh_config: DebugEchoDiscoveryDetails = deserialize_discovery_details(yaml).unwrap(); + assert_eq!(dh_config.descriptions.len(), 1); + assert_eq!(&dh_config.descriptions[0], "foo1"); + } + + #[tokio::test] + async fn test_discover_online_devices() { + // Make devices "online" + fs::write(DEBUG_ECHO_AVAILABILITY_CHECK_PATH, "").unwrap(); + let debug_echo_yaml = r#" + name: debugEcho + discoveryDetails: |+ + descriptions: + - "foo1" + "#; + let deserialized: DiscoveryHandlerInfo = serde_yaml::from_str(&debug_echo_yaml).unwrap(); + let discovery_handler = DiscoveryHandlerImpl::new(None); + let properties: HashMap = [( + super::super::DEBUG_ECHO_DESCRIPTION_LABEL.to_string(), + "foo1".to_string(), + )] + .iter() + .cloned() + .collect(); + let device = akri_discovery_utils::discovery::v0::Device { + id: "foo1".to_string(), + properties, + mounts: Vec::default(), + device_specs: Vec::default(), + }; + let discover_request = tonic::Request::new(DiscoverRequest { + discovery_details: deserialized.discovery_details.clone(), + }); + let mut stream = discovery_handler + .discover(discover_request) + .await + .unwrap() + .into_inner(); + let devices = stream.recv().await.unwrap().unwrap().devices; + assert_eq!(1, devices.len()); + assert_eq!(devices[0], device); + } +} diff --git a/discovery-handlers/debug-echo/src/lib.rs b/discovery-handlers/debug-echo/src/lib.rs new file mode 100644 index 000000000..13da4868a --- /dev/null +++ b/discovery-handlers/debug-echo/src/lib.rs @@ -0,0 +1,13 @@ +pub mod discovery_handler; + +#[macro_use] +extern crate serde_derive; + +/// Name debugEcho discovery handlers use when registering with the Agent +pub const DISCOVERY_HANDLER_NAME: &str = "debugEcho"; +/// Label of the environment variable in debugEcho discovery handlers that sets whether debug echo registers +/// as discovering local instances on nodes rather than ones visible to multiple nodes +pub const DEBUG_ECHO_INSTANCES_SHARED_LABEL: &str = "DEBUG_ECHO_INSTANCES_SHARED"; +/// Name of environment variable that is set in debug echo brokers. Contains the description of +/// the device. +pub const DEBUG_ECHO_DESCRIPTION_LABEL: &str = "DEBUG_ECHO_DESCRIPTION"; diff --git a/discovery-handlers/onvif/Cargo.toml b/discovery-handlers/onvif/Cargo.toml new file mode 100644 index 000000000..75efa2dd1 --- /dev/null +++ b/discovery-handlers/onvif/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "akri-onvif" +version = "0.6.5" +authors = ["Kate Goldenring "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +akri-discovery-utils = { path = "../../discovery-utils" } +akri-shared = { path = "../../shared" } +anyhow = "1.0.38" +async-trait = "0.1.0" +bytes = "0.5" +env_logger = "0.8.3" +futures-util = "0.3" +hyper = { version = "0.13.5", package = "hyper" } +log = "0.4" +serde = "1.0.104" +serde_json = "1.0.45" +serde_yaml = "0.8.11" +serde_derive = "1.0.104" +sxd-document = "0.3.0" +sxd-xpath = "0.4.0" +tokio = { version = "0.2", features = ["rt-threaded", "sync", "time", "stream", "fs", "macros", "uds"] } +tonic = {version = "0.1.0", features = ["tls"] } +uuid = { version = "0.8.1", features = ["v4"] } +xml-rs = { version = "0.8.0" } +yaserde = "0.3.13" +yaserde_derive = "0.3.13" + +[dev-dependencies] +mockall = "0.9.0" \ No newline at end of file diff --git a/discovery-handlers/onvif/src/discovery_handler.rs b/discovery-handlers/onvif/src/discovery_handler.rs new file mode 100644 index 000000000..c575e2424 --- /dev/null +++ b/discovery-handlers/onvif/src/discovery_handler.rs @@ -0,0 +1,589 @@ +use super::discovery_impl::util; +use super::discovery_utils::{ + OnvifQuery, OnvifQueryImpl, ONVIF_DEVICE_IP_ADDRESS_LABEL_ID, + ONVIF_DEVICE_MAC_ADDRESS_LABEL_ID, ONVIF_DEVICE_SERVICE_URL_LABEL_ID, +}; +use akri_discovery_utils::{ + discovery::{ + discovery_handler::{deserialize_discovery_details, DISCOVERED_DEVICES_CHANNEL_CAPACITY}, + v0::{ + discovery_handler_server::DiscoveryHandler, Device, DiscoverRequest, DiscoverResponse, + }, + DiscoverStream, + }, + filtering::{FilterList, FilterType}, +}; +use async_trait::async_trait; +use log::{error, info, trace}; +use std::{collections::HashMap, time::Duration}; +use tokio::{sync::mpsc, time::delay_for}; +use tonic::{Response, Status}; + +// TODO: make this configurable +pub const DISCOVERY_INTERVAL_SECS: u64 = 10; + +/// This defines the ONVIF data stored in the Configuration +/// CRD +/// +/// The ONVIF discovery handler is structured to store a filter list for +/// ip addresses, mac addresses, and ONVIF scopes. +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(rename_all = "camelCase")] +pub struct OnvifDiscoveryDetails { + #[serde(default, skip_serializing_if = "Option::is_none")] + pub ip_addresses: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub mac_addresses: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub scopes: Option, + #[serde(default = "default_discovery_timeout_seconds")] + pub discovery_timeout_seconds: i32, +} + +fn default_discovery_timeout_seconds() -> i32 { + 1 +} + +/// `DiscoveryHandlerImpl` discovers the onvif instances as described by the filters `discover_handler_config.ip_addresses`, +/// `discover_handler_config.mac_addresses`, and `discover_handler_config.scopes`. +/// The instances it discovers are always shared. +pub struct DiscoveryHandlerImpl { + register_sender: Option>, +} + +impl DiscoveryHandlerImpl { + pub fn new(register_sender: Option>) -> Self { + DiscoveryHandlerImpl { register_sender } + } +} + +#[async_trait] +impl DiscoveryHandler for DiscoveryHandlerImpl { + type DiscoverStream = DiscoverStream; + async fn discover( + &self, + request: tonic::Request, + ) -> Result, Status> { + info!("discover - called for ONVIF protocol"); + let register_sender = self.register_sender.clone(); + let discover_request = request.get_ref(); + let (mut discovered_devices_sender, discovered_devices_receiver) = + mpsc::channel(DISCOVERED_DEVICES_CHANNEL_CAPACITY); + let discovery_handler_config: OnvifDiscoveryDetails = + deserialize_discovery_details(&discover_request.discovery_details) + .map_err(|e| tonic::Status::new(tonic::Code::InvalidArgument, format!("{}", e)))?; + let mut cameras: Vec = Vec::new(); + tokio::spawn(async move { + loop { + let onvif_query = OnvifQueryImpl {}; + + trace!("discover - filters:{:?}", &discovery_handler_config,); + let discovered_onvif_cameras = util::simple_onvif_discover(Duration::from_secs( + discovery_handler_config.discovery_timeout_seconds as u64, + )) + .await + .unwrap(); + trace!("discover - discovered:{:?}", &discovered_onvif_cameras,); + // apply_filters never returns an error -- safe to unwrap + let filtered_onvif_cameras = apply_filters( + &discovery_handler_config, + discovered_onvif_cameras, + &onvif_query, + ) + .await + .unwrap(); + trace!("discover - filtered:{:?}", &filtered_onvif_cameras); + let mut changed_camera_list = false; + let mut matching_camera_count = 0; + filtered_onvif_cameras.iter().for_each(|camera| { + if !cameras.contains(camera) { + changed_camera_list = true; + } else { + matching_camera_count += 1; + } + }); + if changed_camera_list || matching_camera_count != cameras.len() { + trace!("discover - sending updated device list"); + cameras = filtered_onvif_cameras.clone(); + if let Err(e) = discovered_devices_sender + .send(Ok(DiscoverResponse { + devices: filtered_onvif_cameras, + })) + .await + { + error!( + "discover - for ONVIF failed to send discovery response with error {}", + e + ); + if let Some(mut sender) = register_sender { + sender.send(()).await.unwrap(); + } + break; + } + } + delay_for(Duration::from_secs(DISCOVERY_INTERVAL_SECS)).await; + } + }); + Ok(Response::new(discovered_devices_receiver)) + } +} + +fn execute_filter(filter_list: Option<&FilterList>, filter_against: &[String]) -> bool { + if filter_list.is_none() { + return false; + } + let filter_action = filter_list.as_ref().unwrap().action.clone(); + let filter_count = filter_list + .unwrap() + .items + .iter() + .filter(|pattern| { + filter_against + .iter() + .filter(|filter_against_item| filter_against_item.contains(*pattern)) + .count() + > 0 + }) + .count(); + + if FilterType::Include == filter_action { + filter_count == 0 + } else { + filter_count != 0 + } +} + +async fn apply_filters( + discovery_handler_config: &OnvifDiscoveryDetails, + device_service_uris: Vec, + onvif_query: &impl OnvifQuery, +) -> Result, anyhow::Error> { + let mut result = Vec::new(); + for device_service_url in device_service_uris.iter() { + trace!("apply_filters - device service url {}", &device_service_url); + let (ip_address, mac_address) = match onvif_query + .get_device_ip_and_mac_address(&device_service_url) + .await + { + Ok(ip_and_mac) => ip_and_mac, + Err(e) => { + error!("apply_filters - error getting ip and mac address: {}", e); + continue; + } + }; + + // Evaluate camera ip address against ip filter if provided + let ip_address_as_vec = vec![ip_address.clone()]; + if execute_filter( + discovery_handler_config.ip_addresses.as_ref(), + &ip_address_as_vec, + ) { + continue; + } + + // Evaluate camera mac address against mac filter if provided + let mac_address_as_vec = vec![mac_address.clone()]; + if execute_filter( + discovery_handler_config.mac_addresses.as_ref(), + &mac_address_as_vec, + ) { + continue; + } + + let ip_and_mac_joined = format!("{}-{}", &ip_address, &mac_address); + + // Evaluate camera scopes against scopes filter if provided + let device_scopes = match onvif_query.get_device_scopes(&device_service_url).await { + Ok(scopes) => scopes, + Err(e) => { + error!("apply_filters - error getting scopes: {}", e); + continue; + } + }; + if execute_filter(discovery_handler_config.scopes.as_ref(), &device_scopes) { + continue; + } + + let mut properties = HashMap::new(); + properties.insert( + ONVIF_DEVICE_SERVICE_URL_LABEL_ID.to_string(), + device_service_url.to_string(), + ); + properties.insert(ONVIF_DEVICE_IP_ADDRESS_LABEL_ID.into(), ip_address); + properties.insert(ONVIF_DEVICE_MAC_ADDRESS_LABEL_ID.into(), mac_address); + + trace!( + "apply_filters - returns DiscoveryResult ip/mac: {:?}, props: {:?}", + &ip_and_mac_joined, + &properties + ); + result.push(Device { + id: ip_and_mac_joined, + properties, + mounts: Vec::default(), + device_specs: Vec::default(), + }) + } + Ok(result) +} + +#[cfg(test)] +mod tests { + use super::super::discovery_utils::MockOnvifQuery; + use super::*; + + struct IpAndMac { + mock_uri: &'static str, + mock_ip: &'static str, + mock_mac: &'static str, + } + + struct Scope { + mock_uri: &'static str, + mock_scope: &'static str, + } + + fn configure_scenario( + mock: &mut MockOnvifQuery, + ip_and_mac: Option, + scope: Option, + ) { + if let Some(ip_and_mac_) = ip_and_mac { + configure_get_device_ip_and_mac_address( + mock, + &ip_and_mac_.mock_uri, + &ip_and_mac_.mock_ip, + &ip_and_mac_.mock_mac, + ) + } + if let Some(scope_) = scope { + configure_get_device_scopes(mock, &scope_.mock_uri, &scope_.mock_scope) + } + } + + fn configure_get_device_ip_and_mac_address( + mock: &mut MockOnvifQuery, + uri: &'static str, + ip: &'static str, + mac: &'static str, + ) { + mock.expect_get_device_ip_and_mac_address() + .times(1) + .withf(move |u| u == uri) + .returning(move |_| Ok((ip.to_string(), mac.to_string()))); + } + + fn configure_get_device_scopes( + mock: &mut MockOnvifQuery, + uri: &'static str, + scope: &'static str, + ) { + mock.expect_get_device_scopes() + .times(1) + .withf(move |u| u == uri) + .returning(move |_| Ok(vec![scope.to_string()])); + } + + #[test] + fn test_deserialize_discovery_details() { + let dh_config: OnvifDiscoveryDetails = deserialize_discovery_details("{}").unwrap(); + let serialized = serde_json::to_string(&dh_config).unwrap(); + let expected_deserialized = r#"{"discoveryTimeoutSeconds":1}"#; + assert_eq!(expected_deserialized, serialized); + } + + #[tokio::test] + async fn test_apply_filters_no_filters() { + let mock_uri = "device_uri"; + + let mut mock = MockOnvifQuery::new(); + configure_scenario( + &mut mock, + Some(IpAndMac { + mock_uri: "device_uri", + mock_ip: "mock.ip", + mock_mac: "mock:mac", + }), + Some(Scope { + mock_uri: "device_uri", + mock_scope: "mock.scope", + }), + ); + + let onvif_config = OnvifDiscoveryDetails { + ip_addresses: None, + mac_addresses: None, + scopes: None, + discovery_timeout_seconds: 1, + }; + let instances = apply_filters(&onvif_config, vec![mock_uri.to_string()], &mock) + .await + .unwrap(); + + assert_eq!(1, instances.len()); + } + + #[tokio::test] + async fn test_apply_filters_include_ip_exist() { + let mock_uri = "device_uri"; + let mock_ip = "mock.ip"; + + let mut mock = MockOnvifQuery::new(); + configure_scenario( + &mut mock, + Some(IpAndMac { + mock_uri, + mock_ip, + mock_mac: "mock:mac", + }), + Some(Scope { + mock_uri, + mock_scope: "mock.scope", + }), + ); + + let onvif_config = OnvifDiscoveryDetails { + ip_addresses: Some(FilterList { + action: FilterType::Include, + items: vec![mock_ip.to_string()], + }), + mac_addresses: None, + scopes: None, + discovery_timeout_seconds: 1, + }; + let instances = apply_filters(&onvif_config, vec![mock_uri.to_string()], &mock) + .await + .unwrap(); + + assert_eq!(1, instances.len()); + } + + #[tokio::test] + async fn test_apply_filters_include_ip_nonexist() { + let mock_uri = "device_uri"; + + let mut mock = MockOnvifQuery::new(); + configure_scenario( + &mut mock, + Some(IpAndMac { + mock_uri, + mock_ip: "mock.ip", + mock_mac: "mock:mac", + }), + None, + ); + + let onvif_config = OnvifDiscoveryDetails { + ip_addresses: Some(FilterList { + action: FilterType::Include, + items: vec!["nonexist.ip".to_string()], + }), + mac_addresses: None, + scopes: None, + discovery_timeout_seconds: 1, + }; + let instances = apply_filters(&onvif_config, vec![mock_uri.to_string()], &mock) + .await + .unwrap(); + + assert_eq!(0, instances.len()); + } + + #[tokio::test] + async fn test_apply_filters_exclude_ip_nonexist() { + let mock_uri = "device_uri"; + + let mut mock = MockOnvifQuery::new(); + configure_scenario( + &mut mock, + Some(IpAndMac { + mock_uri, + mock_ip: "mock.ip", + mock_mac: "mock:mac", + }), + Some(Scope { + mock_uri, + mock_scope: "mock.scope", + }), + ); + + let onvif_config = OnvifDiscoveryDetails { + ip_addresses: Some(FilterList { + action: FilterType::Exclude, + items: vec!["nonexist.ip".to_string()], + }), + mac_addresses: None, + scopes: None, + discovery_timeout_seconds: 1, + }; + let instances = apply_filters(&onvif_config, vec![mock_uri.to_string()], &mock) + .await + .unwrap(); + + assert_eq!(1, instances.len()); + } + + #[tokio::test] + async fn test_apply_filters_exclude_ip_exist() { + let mock_uri = "device_uri"; + let mock_ip = "mock.ip"; + + let mut mock = MockOnvifQuery::new(); + configure_scenario( + &mut mock, + Some(IpAndMac { + mock_uri, + mock_ip, + mock_mac: "mock:mac", + }), + None, + ); + + let onvif_config = OnvifDiscoveryDetails { + ip_addresses: Some(FilterList { + action: FilterType::Exclude, + items: vec![mock_ip.to_string()], + }), + mac_addresses: None, + scopes: None, + discovery_timeout_seconds: 1, + }; + let instances = apply_filters(&onvif_config, vec![mock_uri.to_string()], &mock) + .await + .unwrap(); + + assert_eq!(0, instances.len()); + } + + #[tokio::test] + async fn test_apply_filters_include_mac_exist() { + let mock_uri = "device_uri"; + let mock_mac = "mock:mac"; + + let mut mock = MockOnvifQuery::new(); + configure_scenario( + &mut mock, + Some(IpAndMac { + mock_uri, + mock_ip: "mock.ip", + mock_mac, + }), + Some(Scope { + mock_uri, + mock_scope: "mock.scope", + }), + ); + + let onvif_config = OnvifDiscoveryDetails { + ip_addresses: None, + mac_addresses: Some(FilterList { + action: FilterType::Include, + items: vec![mock_mac.to_string()], + }), + scopes: None, + discovery_timeout_seconds: 1, + }; + let instances = apply_filters(&onvif_config, vec![mock_uri.to_string()], &mock) + .await + .unwrap(); + + assert_eq!(1, instances.len()); + } + + #[tokio::test] + async fn test_apply_filters_include_mac_nonexist() { + let mock_uri = "device_uri"; + + let mut mock = MockOnvifQuery::new(); + configure_scenario( + &mut mock, + Some(IpAndMac { + mock_uri, + mock_ip: "mock.ip", + mock_mac: "mock:mac", + }), + None, + ); + + let onvif_config = OnvifDiscoveryDetails { + ip_addresses: None, + mac_addresses: Some(FilterList { + action: FilterType::Include, + items: vec!["nonexist:mac".to_string()], + }), + scopes: None, + discovery_timeout_seconds: 1, + }; + let instances = apply_filters(&onvif_config, vec![mock_uri.to_string()], &mock) + .await + .unwrap(); + + assert_eq!(0, instances.len()); + } + + #[tokio::test] + async fn test_apply_filters_exclude_mac_nonexist() { + let mock_uri = "device_uri"; + + let mut mock = MockOnvifQuery::new(); + configure_scenario( + &mut mock, + Some(IpAndMac { + mock_uri, + mock_ip: "mock.ip", + mock_mac: "mock:mac", + }), + Some(Scope { + mock_uri, + mock_scope: "mock.scope", + }), + ); + + let onvif_config = OnvifDiscoveryDetails { + ip_addresses: None, + mac_addresses: Some(FilterList { + action: FilterType::Exclude, + items: vec!["nonexist:mac".to_string()], + }), + scopes: None, + discovery_timeout_seconds: 1, + }; + let instances = apply_filters(&onvif_config, vec![mock_uri.to_string()], &mock) + .await + .unwrap(); + + assert_eq!(1, instances.len()); + } + + #[tokio::test] + async fn test_apply_filters_exclude_mac_exist() { + let mock_uri = "device_uri"; + let mock_mac = "mock:mac"; + + let mut mock = MockOnvifQuery::new(); + configure_scenario( + &mut mock, + Some(IpAndMac { + mock_uri, + mock_ip: "mock.ip", + mock_mac, + }), + None, + ); + + let onvif_config = OnvifDiscoveryDetails { + ip_addresses: None, + mac_addresses: Some(FilterList { + action: FilterType::Exclude, + items: vec![mock_mac.to_string()], + }), + scopes: None, + discovery_timeout_seconds: 1, + }; + let instances = apply_filters(&onvif_config, vec![mock_uri.to_string()], &mock) + .await + .unwrap(); + + assert_eq!(0, instances.len()); + } +} diff --git a/agent/src/protocols/onvif/discovery_impl.rs b/discovery-handlers/onvif/src/discovery_impl.rs similarity index 99% rename from agent/src/protocols/onvif/discovery_impl.rs rename to discovery-handlers/onvif/src/discovery_impl.rs index e506dc18f..840cd57a1 100644 --- a/agent/src/protocols/onvif/discovery_impl.rs +++ b/discovery-handlers/onvif/src/discovery_impl.rs @@ -1,4 +1,4 @@ -mod to_serialize { +pub mod to_serialize { use super::common::*; use std::io::Write; use yaserde::YaSerialize; @@ -42,7 +42,7 @@ mod to_serialize { } } -mod to_deserialize { +pub mod to_deserialize { use super::common::*; use std::io::Read; use yaserde::YaDeserialize; @@ -91,7 +91,7 @@ pub mod probe_types { pub const NETWORK_VIDEO_TRANSMITTER: &str = "netwsdl:NetworkVideoTransmitter"; } -mod common { +pub mod common { use std::io::{Read, Write}; use yaserde::{YaDeserialize, YaSerialize}; @@ -225,7 +225,7 @@ pub mod util { } } - pub async fn simple_onvif_discover(timeout: Duration) -> Result, failure::Error> { + pub async fn simple_onvif_discover(timeout: Duration) -> Result, anyhow::Error> { let (mut discovery_timeout_tx, mut discovery_timeout_rx) = mpsc::channel(2); let (mut discovery_cancel_tx, mut discovery_cancel_rx) = mpsc::channel(2); let shared_devices = Arc::new(Mutex::new(Vec::new())); diff --git a/discovery-handlers/onvif/src/discovery_utils.rs b/discovery-handlers/onvif/src/discovery_utils.rs new file mode 100644 index 000000000..56b9d2432 --- /dev/null +++ b/discovery-handlers/onvif/src/discovery_utils.rs @@ -0,0 +1,656 @@ +use async_trait::async_trait; +use futures_util::stream::TryStreamExt; +use hyper::Request; +use log::trace; +#[cfg(test)] +use mockall::{automock, predicate::*}; +use std::io::{Error, ErrorKind}; +use sxd_document::{parser, Package}; +use sxd_xpath::Value; + +pub const ONVIF_DEVICE_SERVICE_URL_LABEL_ID: &str = "ONVIF_DEVICE_SERVICE_URL"; +pub const ONVIF_DEVICE_IP_ADDRESS_LABEL_ID: &str = "ONVIF_DEVICE_IP_ADDRESS"; +pub const ONVIF_DEVICE_MAC_ADDRESS_LABEL_ID: &str = "ONVIF_DEVICE_MAC_ADDRESS"; +pub const MEDIA_WSDL: &str = "http://www.onvif.org/ver10/media/wsdl"; +pub const DEVICE_WSDL: &str = "http://www.onvif.org/ver10/device/wsdl"; + +/// OnvifQuery can access ONVIF properties given an ONVIF camera's device service url. +/// +/// An implementation of an onvif query can retrieve the camera's ip/mac address, scopes, profiles and streaming uri. +#[cfg_attr(test, automock)] +#[async_trait] +pub trait OnvifQuery { + async fn get_device_ip_and_mac_address( + &self, + service_url: &str, + ) -> Result<(String, String), anyhow::Error>; + async fn get_device_scopes(&self, url: &str) -> Result, anyhow::Error>; + async fn get_device_service_uri( + &self, + url: &str, + service: &str, + ) -> Result; + async fn get_device_profiles(&self, url: &str) -> Result, anyhow::Error>; + async fn get_device_profile_streaming_uri( + &self, + url: &str, + profile_token: &str, + ) -> Result; +} + +pub struct OnvifQueryImpl {} + +#[async_trait] +impl OnvifQuery for OnvifQueryImpl { + /// Gets the ip and mac address of a given ONVIF camera + async fn get_device_ip_and_mac_address( + &self, + service_url: &str, + ) -> Result<(String, String), anyhow::Error> { + let http = HttpRequest {}; + inner_get_device_ip_and_mac_address(service_url, &http).await + } + + /// Gets the list of scopes for a given ONVIF camera + async fn get_device_scopes(&self, url: &str) -> Result, anyhow::Error> { + let http = HttpRequest {}; + inner_get_device_scopes(url, &http).await + } + + /// Gets specific service, like media, from a given ONVIF camera + async fn get_device_service_uri( + &self, + url: &str, + service: &str, + ) -> Result { + let http = HttpRequest {}; + inner_get_device_service_uri(url, service, &http).await + } + + /// Gets the list of streaming profiles for a given ONVIF camera + async fn get_device_profiles(&self, url: &str) -> Result, anyhow::Error> { + let http = HttpRequest {}; + inner_get_device_profiles(url, &http).await + } + + /// Gets the streaming uri for a given ONVIF camera's profile + async fn get_device_profile_streaming_uri( + &self, + url: &str, + profile_token: &str, + ) -> Result { + let http = HttpRequest {}; + inner_get_device_profile_streaming_uri(url, profile_token, &http).await + } +} + +/// Http can send an HTTP::Post. +/// +/// An implementation of http can send an HTTP::Post. +#[cfg_attr(test, automock)] +#[async_trait] +trait Http { + async fn post(&self, url: &str, mime_action: &str, msg: &str) + -> Result; +} + +struct HttpRequest {} + +impl HttpRequest { + /// This converts an http response body into an sxd_document::Package + fn handle_request_body(body: &str) -> Result { + let xml_as_tree = match parser::parse(&body) { + Ok(xml_as_tree) => xml_as_tree, + Err(e) => return Err(Error::new(ErrorKind::InvalidData, e).into()), + }; + trace!( + "handle_request_body - response as xmltree: {:?}", + xml_as_tree + ); + Ok(xml_as_tree) + } +} + +#[async_trait] +impl Http for HttpRequest { + /// This sends an HTTP::Post and converts the response body into an sxd_document::Package + async fn post( + &self, + url: &str, + mime_action: &str, + msg: &str, + ) -> Result { + trace!( + "post - url:{}, mime_action:{}, msg:{}", + &url, + &mime_action, + &msg + ); + + let full_mime = format!( + "{}; {}; {};", + "application/soap+xml", "charset=utf-8", mime_action + ); + let request = Request::post(url) + .header("CONTENT-TYPE", full_mime) + .body(msg.to_string().into()) + .expect("infallible"); + let response = hyper::Client::new().request(request).await.unwrap(); + if response.status() != 200 { + return Err(anyhow::format_err!("failure")); + } + let response_body = response + .into_body() + .try_fold(bytes::BytesMut::new(), |mut acc, chunk| async { + acc.extend(chunk); + Ok(acc) + }) + .await? + .freeze(); + let response_body_str = std::str::from_utf8(&response_body)?; + match HttpRequest::handle_request_body(&response_body_str) { + Ok(dom) => Ok(dom), + Err(e) => { + trace!( + "post - failure to handle response: {:?}", + &response_body_str + ); + Err(Error::new(ErrorKind::InvalidData, e).into()) + } + } + } +} + +/// Creates a SOAP mime action +fn get_action(wsdl: &str, function: &str) -> String { + format!("action=\"{}/{}\"", wsdl, function) +} + +/// Gets the ip and mac address for a given ONVIF camera +async fn inner_get_device_ip_and_mac_address( + service_url: &str, + http: &impl Http, +) -> Result<(String, String), anyhow::Error> { + let network_interfaces_xml = match http + .post( + service_url, + &get_action(DEVICE_WSDL, "GetNetworkInterfaces"), + &GET_NETWORK_INTERFACES_TEMPLATE.to_string(), + ) + .await + { + Ok(xml) => xml, + Err(e) => { + return Err(anyhow::format_err!( + "failed to get network interfaces from device: {:?}", + e + )) + } + }; + let network_interfaces_doc = network_interfaces_xml.as_document(); + let ip_address = match sxd_xpath::evaluate_xpath( + &network_interfaces_doc, + "//*[local-name()='GetNetworkInterfacesResponse']/*[local-name()='NetworkInterfaces']/*[local-name()='IPv4']/*[local-name()='Config']/*/*[local-name()='Address']/text()" + ) { + Ok(Value::String(ip)) => ip, + Ok(Value::Nodeset(ns)) => match ns.into_iter().map(|x| x.string_value()).collect::>().first() { + Some(first) => first.to_string(), + None => return Err(anyhow::format_err!("Failed to get ONVIF ip address: none specified in response")) + }, + Ok(Value::Boolean(_)) | + Ok(Value::Number(_)) => return Err(anyhow::format_err!("Failed to get ONVIF ip address: unexpected type")), + Err(e) => return Err(anyhow::format_err!("Failed to get ONVIF ip address: {}", e)) + }; + trace!( + "inner_get_device_ip_and_mac_address - network interfaces (ip address): {:?}", + ip_address + ); + let mac_address = match sxd_xpath::evaluate_xpath( + &network_interfaces_doc, + "//*[local-name()='GetNetworkInterfacesResponse']/*[local-name()='NetworkInterfaces']/*[local-name()='Info']/*[local-name()='HwAddress']/text()" + ) { + Ok(Value::String(mac)) => mac, + Ok(Value::Nodeset(ns)) => match ns.iter().map(|x| x.string_value()).collect::>().first() { + Some(first) => first.to_string(), + None => return Err(anyhow::format_err!("Failed to get ONVIF mac address: none specified in response")) + }, + Ok(Value::Boolean(_)) | + Ok(Value::Number(_)) => return Err(anyhow::format_err!("Failed to get ONVIF mac address: unexpected type")), + Err(e) => return Err(anyhow::format_err!("Failed to get ONVIF mac address: {}", e)) + }; + trace!( + "inner_get_device_ip_and_mac_address - network interfaces (mac address): {:?}", + mac_address + ); + Ok((ip_address, mac_address)) +} + +/// Gets the list of scopes for a given ONVIF camera +async fn inner_get_device_scopes( + url: &str, + http: &impl Http, +) -> Result, anyhow::Error> { + let scopes_xml = match http + .post( + &url, + &get_action(DEVICE_WSDL, "GetScopes"), + &GET_SCOPES_TEMPLATE.to_string(), + ) + .await + { + Ok(xml) => xml, + Err(e) => { + return Err(anyhow::format_err!( + "failed to get scopes from device: {:?}", + e + )) + } + }; + let scopes_doc = scopes_xml.as_document(); + let scopes_query = sxd_xpath::evaluate_xpath( + &scopes_doc, + "//*[local-name()='GetScopesResponse']/*[local-name()='Scopes']/*[local-name()='ScopeItem']/text()" + ); + let scopes = match scopes_query { + Ok(Value::Nodeset(scope_items)) => scope_items + .iter() + .map(|scope_item| scope_item.string_value()) + .collect::>(), + Ok(Value::Boolean(_)) | Ok(Value::Number(_)) | Ok(Value::String(_)) => { + return Err(anyhow::format_err!( + "Failed to get ONVIF scopes: unexpected type" + )) + } + Err(e) => return Err(anyhow::format_err!("Failed to get ONVIF scopes: {}", e)), + }; + trace!("inner_get_device_scopes - scopes: {:?}", scopes); + Ok(scopes) +} + +/// SOAP request body for getting the network interfaces for an ONVIF camera +const GET_NETWORK_INTERFACES_TEMPLATE: &str = r#" + + + + + "#; + +/// SOAP request body for getting scopes for an ONVIF camera +const GET_SCOPES_TEMPLATE: &str = r#" + + + + + "#; + +/// Gets a specific service (like media) uri from an ONVIF camera +async fn inner_get_device_service_uri( + url: &str, + service: &str, + http: &impl Http, +) -> Result { + let services_xml = match http + .post( + &url, + &get_action(DEVICE_WSDL, "GetServices"), + &GET_SERVICES_TEMPLATE.to_string(), + ) + .await + { + Ok(xml) => xml, + Err(e) => { + return Err(anyhow::format_err!( + "failed to get services from device: {:?}", + e + )) + } + }; + let services_doc = services_xml.as_document(); + let service_xpath_query = format!( + "//*[local-name()='GetServicesResponse']/*[local-name()='Service' and *[local-name()='Namespace']/text() ='{}']/*[local-name()='XAddr']/text()", + service + ); + let requested_device_service_uri = + match sxd_xpath::evaluate_xpath(&services_doc, service_xpath_query.as_str()) { + Ok(uri) => uri.string(), + Err(e) => { + return Err(anyhow::format_err!( + "failed to get servuce uri from resoinse: {:?}", + e + )) + } + }; + trace!( + "inner_get_device_service_uri - service ({}) uris: {:?}", + service, + requested_device_service_uri + ); + Ok(requested_device_service_uri) +} + +/// SOAP request body for getting the supported services' uris for an ONVIF camera +const GET_SERVICES_TEMPLATE: &str = r#" + + + + + "#; + +/// Gets list of media profiles for a given ONVIF camera +async fn inner_get_device_profiles( + url: &str, + http: &impl Http, +) -> Result, anyhow::Error> { + let action = get_action(MEDIA_WSDL, "GetProfiles"); + let message = GET_PROFILES_TEMPLATE.to_string(); + let profiles_xml = match http.post(&url, &action, &message).await { + Ok(xml) => xml, + Err(e) => { + return Err(anyhow::format_err!( + "failed to get profiles from device: {:?}", + e + )) + } + }; + let profiles_doc = profiles_xml.as_document(); + let profiles_query = sxd_xpath::evaluate_xpath( + &profiles_doc, + "//*[local-name()='GetProfilesResponse']/*[local-name()='Profiles']/@token", + ); + let profiles = match profiles_query { + Ok(Value::Nodeset(profiles_items)) => profiles_items + .iter() + .map(|profile_item| profile_item.string_value()) + .collect::>(), + Ok(Value::Boolean(_)) | Ok(Value::Number(_)) | Ok(Value::String(_)) => { + return Err(anyhow::format_err!( + "Failed to get ONVIF profiles: unexpected type" + )) + } + Err(e) => return Err(anyhow::format_err!("Failed to get ONVIF profiles: {}", e)), + }; + trace!("inner_get_device_scopes - profiles: {:?}", profiles); + Ok(profiles) +} + +/// Gets the streaming uri for a given profile for an ONVIF camera +async fn inner_get_device_profile_streaming_uri( + url: &str, + profile_token: &str, + http: &impl Http, +) -> Result { + let stream_soap = get_stream_uri_message(&profile_token); + let stream_uri_xml = match http + .post(&url, &get_action(MEDIA_WSDL, "GetStreamUri"), &stream_soap) + .await + { + Ok(xml) => xml, + Err(e) => { + return Err(anyhow::format_err!( + "failed to get streaming uri from device: {:?}", + e + )) + } + }; + let stream_uri_doc = stream_uri_xml.as_document(); + let stream_uri = match sxd_xpath::evaluate_xpath( + &stream_uri_doc, + "//*[local-name()='GetStreamUriResponse']/*[local-name()='MediaUri']/*[local-name()='Uri']/text()" + ) { + Ok(stream) => stream.string(), + Err(e) => { + return Err(anyhow::format_err!( + "failed to get servuce uri from resoinse: {:?}", + e + )) + } + }; + Ok(stream_uri) +} + +/// Gets SOAP request body for getting the streaming uri for a specific profile for an ONVIF camera +fn get_stream_uri_message(profile: &str) -> String { + format!( + r#" + + + + + RTP-Unicast + + RTSP + + + {} + + + ;"#, + profile + ) +} + +/// SOAP request body for getting the media profiles for an ONVIF camera +const GET_PROFILES_TEMPLATE: &str = r#" + + + + + "#; + +// const GET_DEVICE_INFORMATION_TEMPLATE: &str = r#" +// +// +// +// +// "#; + +// const GET_HOSTNAME_TEMPLATE: &str = r#" +// +// +// +// +// "#; + +#[cfg(test)] +mod tests { + use super::*; + + fn configure_post(mock: &mut MockHttp, url: &str, mime: &str, msg: &str, output_xml: &str) { + let inner_url = url.to_string(); + let inner_mime = mime.to_string(); + let inner_msg = msg.to_string(); + let inner_output_xml = output_xml.to_string(); + trace!("mock.expect_post url:{}, mime:{}, msg:{}", url, mime, msg); + mock.expect_post() + .times(1) + .withf(move |actual_url, actual_mime, actual_msg| { + actual_url == inner_url && actual_mime == inner_mime && actual_msg == inner_msg + }) + .returning(move |_, _, _| { + let xml_as_tree = parser::parse(&inner_output_xml).unwrap(); + Ok(xml_as_tree) + }); + } + + #[tokio::test] + async fn test_inner_get_device_ip_and_mac_address_ip_in_manual() { + let _ = env_logger::builder().is_test(true).try_init(); + + let mut mock = MockHttp::new(); + let response = "\ntrueeth000:12:41:5c:a1:a51500false10Fullfalse10Full0true192.168.1.3624false"; + configure_post( + &mut mock, + &"test_inner_get_device_ip_and_mac_address-url".to_string(), + &get_action(DEVICE_WSDL, "GetNetworkInterfaces"), + &GET_NETWORK_INTERFACES_TEMPLATE.to_string(), + &response.to_string(), + ); + assert_eq!( + ("192.168.1.36".to_string(), "00:12:41:5c:a1:a5".to_string()), + inner_get_device_ip_and_mac_address( + &"test_inner_get_device_ip_and_mac_address-url".to_string(), + &mock + ) + .await + .unwrap() + ); + } + + #[tokio::test] + async fn test_inner_get_device_ip_and_mac_address_ip_in_from_dhcp() { + let _ = env_logger::builder().is_test(true).try_init(); + + let mut mock = MockHttp::new(); + let response = "\ntrueeth000:FC:DA:B1:69:CC1500true10.137.185.208010.137.185.20823true\r\n"; + configure_post( + &mut mock, + &"test_inner_get_device_ip_and_mac_address-url".to_string(), + &get_action(DEVICE_WSDL, "GetNetworkInterfaces"), + &GET_NETWORK_INTERFACES_TEMPLATE.to_string(), + &response.to_string(), + ); + assert_eq!( + ( + "10.137.185.208".to_string(), + "00:FC:DA:B1:69:CC".to_string() + ), + inner_get_device_ip_and_mac_address( + &"test_inner_get_device_ip_and_mac_address-url".to_string(), + &mock + ) + .await + .unwrap() + ); + } + + #[tokio::test] + async fn test_inner_get_device_scopes() { + let _ = env_logger::builder().is_test(true).try_init(); + + let mut mock = MockHttp::new(); + let response = "\nFixedonvif://www.onvif.org/type/video_encoderFixedonvif://www.onvif.org/type/audio_encoderFixedonvif://www.onvif.org/hardware/IPC-modelFixedonvif://www.onvif.org/location/country/chinaFixedonvif://www.onvif.org/name/NVTFixedonvif://www.onvif.org/Profile/StreamingConfigurableodm:name:fjEvtevision"; + configure_post( + &mut mock, + &"test_inner_get_device_scopes-url".to_string(), + &get_action(DEVICE_WSDL, "GetScopes"), + &GET_SCOPES_TEMPLATE.to_string(), + &response.to_string(), + ); + + let mut expected = [ + "onvif://www.onvif.org/type/audio_encoder".to_string(), + "onvif://www.onvif.org/type/video_encoder".to_string(), + "onvif://www.onvif.org/Profile/Streaming".to_string(), + "odm:name:fjEvtevision".to_string(), + "onvif://www.onvif.org/hardware/IPC-model".to_string(), + "onvif://www.onvif.org/location/country/china".to_string(), + "onvif://www.onvif.org/name/NVT".to_string(), + ] + .to_vec(); + expected.sort(); + + let mut actual = + inner_get_device_scopes(&"test_inner_get_device_scopes-url".to_string(), &mock) + .await + .unwrap(); + actual.sort(); + + assert_eq!(expected, actual); + } + + #[tokio::test] + async fn test_inner_get_device_service_uri() { + let _ = env_logger::builder().is_test(true).try_init(); + + let mut mock = MockHttp::new(); + let response = "\nhttp://www.onvif.org/ver10/device/wsdlhttp://192.168.1.35:8899/onvif/device_service241http://www.onvif.org/ver10/media/wsdlhttp://192.168.1.35:8899/onvif/Media241http://www.onvif.org/ver10/events/wsdlhttp://192.168.1.35:8899/onvif/Events241http://www.onvif.org/ver20/imaging/wsdlhttp://192.168.1.35:8899/onvif/Imaging241http://www.onvif.org/ver20/ptz/wsdlhttp://192.168.1.35:8899/onvif/PTZ241"; + configure_post( + &mut mock, + &"test_inner_get_device_service_uri-url".to_string(), + &get_action(DEVICE_WSDL, "GetServices"), + &GET_SERVICES_TEMPLATE.to_string(), + &response.to_string(), + ); + assert_eq!( + "http://192.168.1.35:8899/onvif/Media".to_string(), + inner_get_device_service_uri( + &"test_inner_get_device_service_uri-url".to_string(), + &MEDIA_WSDL.to_string(), + &mock + ) + .await + .unwrap() + ); + } + + #[tokio::test] + async fn test_inner_get_device_profiles() { + let _ = env_logger::builder().is_test(true).try_init(); + + let mut mock = MockHttp::new(); + // GetProfiles is the first call + { + let response = "\nProfile_000VideoS_0003000Audio_0002000VideoE_0001H2641280720525125602HighIPv4224.1.2.300falsePT10SAudioE_0002G711648IPv4224.1.2.300falsePT10SAnalytics_0002PTZ_0002000http://www.onvif.org/ver10/tptz/PanTiltSpaces/TranslationGenericSpacehttp://www.onvif.org/ver10/tptz/ZoomSpaces/TranslationGenericSpacehttp://www.onvif.org/ver10/tptz/PanTiltSpaces/VelocityGenericSpacehttp://www.onvif.org/ver10/tptz/ZoomSpaces/VelocityGenericSpacePT1Shttp://www.onvif.org/ver10/tptz/PanTiltSpaces/PositionGenericSpace-11-11http://www.onvif.org/ver10/tptz/ZoomSpaces/PositionGenericSpace-11Profile_001VideoS_0003000Audio_0002000VideoE_0011H264704576525110242HighIPv4224.1.2.300falsePT10SAudioE_0002G711648IPv4224.1.2.300falsePT10SAnalytics_0002PTZ_0002000http://www.onvif.org/ver10/tptz/PanTiltSpaces/TranslationGenericSpacehttp://www.onvif.org/ver10/tptz/ZoomSpaces/TranslationGenericSpacehttp://www.onvif.org/ver10/tptz/PanTiltSpaces/VelocityGenericSpacehttp://www.onvif.org/ver10/tptz/ZoomSpaces/VelocityGenericSpacePT1Shttp://www.onvif.org/ver10/tptz/PanTiltSpaces/PositionGenericSpace-11-11http://www.onvif.org/ver10/tptz/ZoomSpaces/PositionGenericSpace-11Profile_002VideoS_0003000VideoE_0021JPEG7045764-36001512IPv4224.1.2.300falsePT10S"; + configure_post( + &mut mock, + &"test_inner_get_device_profiles-url".to_string(), + &get_action(MEDIA_WSDL, "GetProfiles"), + &GET_PROFILES_TEMPLATE.to_string(), + &response.to_string(), + ); + } + let mut actual_profiles = + inner_get_device_profiles(&"test_inner_get_device_profiles-url".to_string(), &mock) + .await + .unwrap(); + actual_profiles.sort(); + assert_eq!( + vec!["000".to_string(), "001".to_string(), "002".to_string()], + actual_profiles, + ); + } + + #[tokio::test] + async fn test_inner_get_device_profile_streaming_uri() { + let _ = env_logger::builder().is_test(true).try_init(); + + let expected_result = vec![ + "rtsp://192.168.0.36:554/user=admin_password=tlJwpbo6_channel=1_stream=0.sdp?real_stream".to_string(), + "rtsp://192.168.1.36:554/user=admin_password=tlJwpbo6_channel=1_stream=0.sdp?real_stream".to_string(), + "rtsp://192.168.2.36:554/user=admin_password=tlJwpbo6_channel=1_stream=0.sdp?real_stream".to_string() + ]; + + for (i, expected_uri) in expected_result.iter().enumerate().take(3) { + let mut mock = MockHttp::new(); + let profile = format!("00{}", i).to_string(); + let message = get_stream_uri_message(&profile); + let response = format!( + "\nrtsp://192.168.{}.36:554/user=admin_password=tlJwpbo6_channel=1_stream=0.sdp?real_streamfalsefalsePT10S", + i + ); + configure_post( + &mut mock, + &"test_inner_get_device_profile_streaming_uri-url".to_string(), + &get_action(MEDIA_WSDL, "GetStreamUri"), + &message, + &response.to_string(), + ); + + assert_eq!( + expected_uri.to_string(), + inner_get_device_profile_streaming_uri( + &"test_inner_get_device_profile_streaming_uri-url".to_string(), + &profile, + &mock + ) + .await + .unwrap() + ); + } + } + + #[test] + fn test_http_handle_request_body_no_panic() { + assert!(HttpRequest::handle_request_body("\r\n").is_err()); + } +} diff --git a/discovery-handlers/onvif/src/lib.rs b/discovery-handlers/onvif/src/lib.rs new file mode 100644 index 000000000..2fe844b10 --- /dev/null +++ b/discovery-handlers/onvif/src/lib.rs @@ -0,0 +1,13 @@ +pub mod discovery_handler; +mod discovery_impl; +mod discovery_utils; + +#[macro_use] +extern crate serde_derive; +#[macro_use] +extern crate yaserde_derive; + +/// Name that onvif discovery handlers use when registering with the Agent +pub const DISCOVERY_HANDLER_NAME: &str = "onvif"; +/// Defines whether this discovery handler discovers local devices on nodes rather than ones visible to multiple nodes +pub const SHARED: bool = true; diff --git a/discovery-handlers/opcua/Cargo.toml b/discovery-handlers/opcua/Cargo.toml new file mode 100644 index 000000000..d2b66ff0e --- /dev/null +++ b/discovery-handlers/opcua/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "akri-opcua" +version = "0.6.5" +authors = ["Kate Goldenring "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +akri-discovery-utils = { path = "../../discovery-utils" } +akri-shared = { path = "../../shared" } +anyhow = "1.0.38" +async-trait = "0.1.0" +env_logger = "0.8.3" +futures-util = "0.3" +log = "0.4" +opcua-client = "0.8.0" +prost = "0.6" +serde = "1.0.104" +serde_json = "1.0.45" +serde_yaml = "0.8.11" +serde_derive = "1.0.1" +tokio = { version = "0.2", features = ["rt-threaded", "sync", "time", "stream", "fs", "macros", "uds"] } +tonic = {version = "0.1.0", features = ["tls"] } +url = "2.2.0" + +[dev-dependencies] +mockall = "0.9.0" \ No newline at end of file diff --git a/discovery-handlers/opcua/src/discovery_handler.rs b/discovery-handlers/opcua/src/discovery_handler.rs new file mode 100644 index 000000000..a59f22132 --- /dev/null +++ b/discovery-handlers/opcua/src/discovery_handler.rs @@ -0,0 +1,192 @@ +use super::{discovery_impl::do_standard_discovery, OPCUA_DISCOVERY_URL_LABEL}; +use akri_discovery_utils::{ + discovery::{ + discovery_handler::{deserialize_discovery_details, DISCOVERED_DEVICES_CHANNEL_CAPACITY}, + v0::{ + discovery_handler_server::DiscoveryHandler, Device, DiscoverRequest, DiscoverResponse, + }, + DiscoverStream, + }, + filtering::FilterList, +}; +use async_trait::async_trait; +use log::{error, info, trace}; +use std::time::Duration; +use tokio::sync::mpsc; +use tokio::time::delay_for; +use tonic::{Response, Status}; + +// TODO: make this configurable +pub const DISCOVERY_INTERVAL_SECS: u64 = 10; + +/// Methods for discovering OPC UA Servers +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(rename_all = "camelCase")] +pub enum OpcuaDiscoveryMethod { + Standard(StandardOpcuaDiscovery), + // TODO: add scan +} + +/// Discovers OPC UA Servers and/or LocalDiscoveryServers at specified DiscoveryURLs. +/// If the DiscoveryURL is for a LocalDiscoveryServer, it will discover all Servers +/// that have registered with that LocalDiscoveryServer. +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(rename_all = "camelCase")] +pub struct StandardOpcuaDiscovery { + #[serde(default = "lds_discovery_url", skip_serializing_if = "Vec::is_empty")] + pub discovery_urls: Vec, +} + +/// If no DiscoveryURLs are specified, uses the OPC UA default DiscoveryURL +/// for the LocalDiscoveryServer running on the host +fn lds_discovery_url() -> Vec { + vec!["opc.tcp://localhost:4840/".to_string()] +} + +/// This defines the OPC UA data stored in the Configuration +/// CRD +/// +/// The OPC UA discovery handler is designed to support multiple methods +/// for discovering OPC UA servers and stores a filter list for +/// application names. +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(rename_all = "camelCase")] +pub struct OpcuaDiscoveryDetails { + pub opcua_discovery_method: OpcuaDiscoveryMethod, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub application_names: Option, +} + +/// `DiscoveryHandlerImpl` discovers udev instances by parsing the udev rules in `discovery_handler_config.udev_rules`. +/// The instances it discovers are always unshared. +pub struct DiscoveryHandlerImpl { + register_sender: Option>, +} + +impl DiscoveryHandlerImpl { + pub fn new(register_sender: Option>) -> Self { + DiscoveryHandlerImpl { register_sender } + } +} + +#[async_trait] +impl DiscoveryHandler for DiscoveryHandlerImpl { + type DiscoverStream = DiscoverStream; + async fn discover( + &self, + request: tonic::Request, + ) -> Result, Status> { + info!("discover - called for OPC UA protocol"); + let register_sender = self.register_sender.clone(); + let discover_request = request.get_ref(); + let (mut discovered_devices_sender, discovered_devices_receiver) = + mpsc::channel(DISCOVERED_DEVICES_CHANNEL_CAPACITY); + let discovery_handler_config: OpcuaDiscoveryDetails = + deserialize_discovery_details(&discover_request.discovery_details) + .map_err(|e| tonic::Status::new(tonic::Code::InvalidArgument, format!("{}", e)))?; + let mut previously_discovered_devices: Vec = Vec::new(); + tokio::spawn(async move { + let discovery_method = discovery_handler_config.opcua_discovery_method.clone(); + let application_names = discovery_handler_config.application_names.clone(); + loop { + let discovery_urls: Vec = match discovery_method.clone() { + OpcuaDiscoveryMethod::Standard(standard_opcua_discovery) => { + do_standard_discovery( + standard_opcua_discovery.discovery_urls.clone(), + application_names.clone(), + ) + } // No other discovery methods implemented yet + }; + + // Build DiscoveryResult for each server discovered + let discovered_devices = discovery_urls + .into_iter() + .map(|discovery_url| { + let mut properties = std::collections::HashMap::new(); + trace!( + "discover - found OPC UA server at DiscoveryURL {}", + discovery_url + ); + properties + .insert(OPCUA_DISCOVERY_URL_LABEL.to_string(), discovery_url.clone()); + Device { + id: discovery_url, + properties, + mounts: Vec::default(), + device_specs: Vec::default(), + } + }) + .collect::>(); + let mut changed_device_list = false; + let mut matching_device_count = 0; + discovered_devices.iter().for_each(|device| { + if !previously_discovered_devices.contains(device) { + changed_device_list = true; + } else { + matching_device_count += 1; + } + }); + if changed_device_list + || matching_device_count != previously_discovered_devices.len() + { + trace!("discover - for OPC UA, sending updated device list"); + previously_discovered_devices = discovered_devices.clone(); + if let Err(e) = discovered_devices_sender + .send(Ok(DiscoverResponse { + devices: discovered_devices, + })) + .await + { + error!( + "discover - for OPC UA failed to send discovery response with error {}", + e + ); + if let Some(mut sender) = register_sender { + sender.send(()).await.unwrap(); + } + break; + } + } + delay_for(Duration::from_secs(DISCOVERY_INTERVAL_SECS)).await; + } + }); + Ok(Response::new(discovered_devices_receiver)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_deserialize_discovery_details_empty() { + // Check that if no DiscoveryUrls are provided, the default LDS url is used. + let yaml = r#" + opcuaDiscoveryMethod: + standard: {} + "#; + let dh_config: OpcuaDiscoveryDetails = deserialize_discovery_details(&yaml).unwrap(); + let serialized = serde_json::to_string(&dh_config).unwrap(); + let expected_deserialized = r#"{"opcuaDiscoveryMethod":{"standard":{"discoveryUrls":["opc.tcp://localhost:4840/"]}}}"#; + assert_eq!(expected_deserialized, serialized); + } + + #[test] + fn test_deserialize_discovery_details_detailed() { + // Test standard discovery + let yaml = r#" + opcuaDiscoveryMethod: + standard: + discoveryUrls: + - opc.tcp://127.0.0.1:4855/ + applicationNames: + action: Include + items: + - "Some application name" + "#; + let dh_config: OpcuaDiscoveryDetails = deserialize_discovery_details(&yaml).unwrap(); + let serialized = serde_json::to_string(&dh_config).unwrap(); + let expected_serialized = r#"{"opcuaDiscoveryMethod":{"standard":{"discoveryUrls":["opc.tcp://127.0.0.1:4855/"]}},"applicationNames":{"items":["Some application name"],"action":"Include"}}"#; + assert_eq!(expected_serialized, serialized); + } +} diff --git a/agent/src/protocols/opcua/discovery_impl.rs b/discovery-handlers/opcua/src/discovery_impl.rs similarity index 93% rename from agent/src/protocols/opcua/discovery_impl.rs rename to discovery-handlers/opcua/src/discovery_impl.rs index 34df08920..262421ee5 100644 --- a/agent/src/protocols/opcua/discovery_impl.rs +++ b/discovery-handlers/opcua/src/discovery_impl.rs @@ -1,7 +1,10 @@ -use super::opcua_client_wrapper::{create_opcua_discovery_client, OpcuaClient}; -use super::tcp_stream_wrapper::{TcpStream, TcpStreamImpl}; +use super::wrappers::{ + opcua_client_wrapper::{create_opcua_discovery_client, OpcuaClient}, + tcp_stream_wrapper::{TcpStream, TcpStreamImpl}, +}; use ::url::Url; -use akri_shared::akri::configuration::{should_include, FilterList}; +use akri_discovery_utils::filtering::{should_include, FilterList}; +use log::{error, info, trace}; use opcua_client::prelude::*; use std::{ net::{SocketAddr, ToSocketAddrs}, @@ -26,14 +29,14 @@ pub fn do_standard_discovery( discovery_urls: Vec, filter_list: Option, ) -> Vec { - trace!( + info!( "do_standard_discovery - for DiscoveryUrls {:?}", discovery_urls ); - let mut discovery_client = create_opcua_discovery_client(); + let mut discovery_handler_client = create_opcua_discovery_client(); let tcp_stream = TcpStreamImpl {}; get_discovery_urls( - &mut discovery_client, + &mut discovery_handler_client, discovery_urls, filter_list, tcp_stream, @@ -45,7 +48,7 @@ pub fn do_standard_discovery( /// (2) discover other servers registered with a Local Discovery Server in the case that the DiscoveryURL is for an LDS /// (3) determine whether the application at that URL should be included according to `ApplicationType` and the `application_names` filter fn get_discovery_urls( - discovery_client: &mut impl OpcuaClient, + discovery_handler_client: &mut impl OpcuaClient, lds_urls: Vec, filter_list: Option, tcp_stream: impl TcpStream, @@ -53,13 +56,12 @@ fn get_discovery_urls( let mut discovery_urls: Vec = Vec::new(); lds_urls.iter().for_each(|url| { if let Err(e) = test_tcp_connection(url, &tcp_stream) { - trace!( + error!( "get_discovery_urls - failed to make tcp connection with url {} with error {:?}", - url, - e + url, e ); } else { - match discovery_client.find_servers(url) { + match discovery_handler_client.find_servers(url) { Ok(applications) => { trace!( "get_discovery_urls - Server at {} responded with {} Applications", @@ -94,14 +96,14 @@ fn get_discovery_urls( /// The Rust OPC UA implementation of FindServers does not use a timeout when connecting with a Server over TCP /// So, an unsuccessful attempt can take over 2 minutes. /// Therefore, this tests the connection using a timeout before calling FindServers on the DiscoveryURL. -fn test_tcp_connection(url: &str, tcp_stream: &impl TcpStream) -> Result<(), failure::Error> { +fn test_tcp_connection(url: &str, tcp_stream: &impl TcpStream) -> Result<(), anyhow::Error> { let socket_addr = get_socket_addr(url)?; match tcp_stream.connect_timeout( &socket_addr, Duration::from_secs(TCP_CONNECTION_TEST_TIMEOUT_SECS), ) { Ok(_stream) => Ok(()), - Err(e) => Err(failure::format_err!("{:?}", e)), + Err(e) => Err(anyhow::format_err!("{:?}", e)), } } @@ -158,10 +160,10 @@ fn get_discovery_url_from_application_description( } /// This returns a socket address for the OPC UA DiscoveryURL else an error if not properly formatted -fn get_socket_addr(url: &str) -> Result { - let url = Url::parse(&url).map_err(|_| failure::format_err!("could not parse url"))?; +fn get_socket_addr(url: &str) -> Result { + let url = Url::parse(&url).map_err(|_| anyhow::format_err!("could not parse url"))?; if url.scheme() != OPC_TCP_SCHEME { - return Err(failure::format_err!( + return Err(anyhow::format_err!( "format of OPC UA url {} is not valid", url )); @@ -169,7 +171,7 @@ fn get_socket_addr(url: &str) -> Result { let host = url.host_str().unwrap(); let port = url .port() - .ok_or_else(|| failure::format_err!("provided discoveryURL is missing port"))?; + .ok_or_else(|| anyhow::format_err!("provided discoveryURL is missing port"))?; // Convert host and port to socket address let addr_str = format!("{}:{}", host, port); @@ -180,8 +182,9 @@ fn get_socket_addr(url: &str) -> Result { #[cfg(test)] mod tests { - use super::super::opcua_client_wrapper::MockOpcuaClient; - use super::super::tcp_stream_wrapper::MockTcpStream; + use super::super::wrappers::{ + opcua_client_wrapper::MockOpcuaClient, tcp_stream_wrapper::MockTcpStream, + }; use super::*; use mockall::Sequence; diff --git a/discovery-handlers/opcua/src/lib.rs b/discovery-handlers/opcua/src/lib.rs new file mode 100644 index 000000000..e65c2e587 --- /dev/null +++ b/discovery-handlers/opcua/src/lib.rs @@ -0,0 +1,14 @@ +#[macro_use] +extern crate serde_derive; + +pub mod discovery_handler; +mod discovery_impl; +mod wrappers; + +/// Name of the environment variable that will be mounted into the OPC UA broker pods. +/// Holds the DiscoveryURL for the OPC UA Server the broker is to connect to. +pub const OPCUA_DISCOVERY_URL_LABEL: &str = "OPCUA_DISCOVERY_URL"; +/// Name that OPC UA discovery handlers use when registering with the Agent +pub const DISCOVERY_HANDLER_NAME: &str = "opcua"; +/// Defines whether this discovery handler discovers local devices on nodes rather than ones visible to multiple nodes +pub const SHARED: bool = true; diff --git a/agent/src/protocols/opcua/mod.rs b/discovery-handlers/opcua/src/wrappers.rs similarity index 79% rename from agent/src/protocols/opcua/mod.rs rename to discovery-handlers/opcua/src/wrappers.rs index 329446c0e..00d0bd68d 100644 --- a/agent/src/protocols/opcua/mod.rs +++ b/discovery-handlers/opcua/src/wrappers.rs @@ -1,18 +1,10 @@ -mod discovery_handler; -mod discovery_impl; -pub use self::discovery_handler::OpcuaDiscoveryHandler; - -/// Name of the environment variable that will be mounted into the OPC UA broker pods. -/// Holds the DiscoveryURL for the OPC UA Server the broker is to connect to. -pub const OPCUA_DISCOVERY_URL_LABEL: &str = "OPCUA_DISCOVERY_URL"; - /// Wrapper to enable mocking of OPC UA Client pub mod opcua_client_wrapper { - use mockall::predicate::*; - use mockall::*; + #[cfg(test)] + use mockall::{automock, predicate::*}; use opcua_client::prelude::*; - #[automock] + #[cfg_attr(test, automock)] pub trait OpcuaClient { fn find_servers( &mut self, @@ -58,23 +50,23 @@ pub mod opcua_client_wrapper { // Do not try to create a session again let session_retry_limit = 0; OpcuaClientImpl::new( - "DiscoveryClient", - "urn:DiscoveryClient", + "DiscoveryHandlerClient", + "urn:DiscoveryHandlerClient", create_sample_keypair, session_retry_limit, ) } } pub mod tcp_stream_wrapper { - use mockall::predicate::*; - use mockall::*; + #[cfg(test)] + use mockall::{automock, predicate::*}; use std::{ io, net::{SocketAddr, TcpStream as StdTcpStream}, time::Duration, }; - #[automock] + #[cfg_attr(test, automock)] pub trait TcpStream { fn connect_timeout(&self, addr: &SocketAddr, timeout: Duration) -> io::Result<()>; } @@ -84,7 +76,7 @@ pub mod tcp_stream_wrapper { impl TcpStream for TcpStreamImpl { fn connect_timeout(&self, addr: &SocketAddr, timeout: Duration) -> io::Result<()> { // Do not need to return the stream since it is not used, so map success to Ok(()) - StdTcpStream::connect_timeout(addr, timeout).and_then(|_| Ok(())) + StdTcpStream::connect_timeout(addr, timeout).map(|_| ()) } } } diff --git a/discovery-handlers/udev/Cargo.toml b/discovery-handlers/udev/Cargo.toml new file mode 100644 index 000000000..7cba92200 --- /dev/null +++ b/discovery-handlers/udev/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "akri-udev" +version = "0.6.5" +authors = ["Kate Goldenring "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +akri-discovery-utils = { path = "../../discovery-utils" } +anyhow = "1.0.38" +async-trait = "0.1.0" +env_logger = "0.8.3" +futures-util = "0.3" +log = "0.4" +pest = "2.0" +pest_derive = "2.0" +prost = "0.6" +regex = "1" +serde = "1.0.104" +serde_json = "1.0.45" +serde_yaml = "0.8.11" +serde_derive = "1.0.104" +tokio = { version = "0.2", features = ["rt-threaded", "sync", "time", "stream", "fs", "macros", "uds"] } +tonic = {version = "0.1.0", features = ["tls"] } +udev = "0.5" + +[dev-dependencies] +mockall = "0.9.0" \ No newline at end of file diff --git a/discovery-handlers/udev/src/discovery_handler.rs b/discovery-handlers/udev/src/discovery_handler.rs new file mode 100644 index 000000000..ee217f334 --- /dev/null +++ b/discovery-handlers/udev/src/discovery_handler.rs @@ -0,0 +1,159 @@ +use super::{discovery_impl::do_parse_and_find, wrappers::udev_enumerator}; +use akri_discovery_utils::discovery::{ + discovery_handler::{deserialize_discovery_details, DISCOVERED_DEVICES_CHANNEL_CAPACITY}, + v0::{ + discovery_handler_server::DiscoveryHandler, Device, DiscoverRequest, DiscoverResponse, + Mount, + }, + DiscoverStream, +}; +use async_trait::async_trait; +use log::{error, info, trace}; +use std::collections::HashSet; +use std::time::Duration; +use tokio::sync::mpsc; +use tokio::time::delay_for; +use tonic::{Response, Status}; + +// TODO: make this configurable +pub const DISCOVERY_INTERVAL_SECS: u64 = 10; + +/// This defines the udev data stored in the Configuration +/// CRD DiscoveryDetails +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(rename_all = "camelCase")] +pub struct UdevDiscoveryDetails { + pub udev_rules: Vec, +} + +/// `DiscoveryHandlerImpl` discovers udev instances by parsing the udev rules in `discovery_handler_config.udev_rules`. +pub struct DiscoveryHandlerImpl { + register_sender: Option>, +} + +impl DiscoveryHandlerImpl { + pub fn new(register_sender: Option>) -> Self { + DiscoveryHandlerImpl { register_sender } + } +} + +#[async_trait] +impl DiscoveryHandler for DiscoveryHandlerImpl { + type DiscoverStream = DiscoverStream; + async fn discover( + &self, + request: tonic::Request, + ) -> Result, Status> { + info!("discover - called for udev protocol"); + let register_sender = self.register_sender.clone(); + let discover_request = request.get_ref(); + let (mut discovered_devices_sender, discovered_devices_receiver) = + mpsc::channel(DISCOVERED_DEVICES_CHANNEL_CAPACITY); + let discovery_handler_config: UdevDiscoveryDetails = + deserialize_discovery_details(&discover_request.discovery_details) + .map_err(|e| tonic::Status::new(tonic::Code::InvalidArgument, format!("{}", e)))?; + let mut previously_discovered_devices: Vec = Vec::new(); + tokio::spawn(async move { + let udev_rules = discovery_handler_config.udev_rules.clone(); + loop { + trace!("discover - for udev rules {:?}", udev_rules); + let mut devpaths: HashSet = HashSet::new(); + udev_rules.iter().for_each(|rule| { + let enumerator = udev_enumerator::create_enumerator(); + let paths = do_parse_and_find(enumerator, &rule).unwrap(); + paths.into_iter().for_each(|path| { + devpaths.insert(path); + }); + }); + trace!( + "discover - mapping and returning devices at devpaths {:?}", + devpaths + ); + let discovered_devices = devpaths + .into_iter() + .map(|path| { + let mut properties = std::collections::HashMap::new(); + properties.insert(super::UDEV_DEVNODE_LABEL_ID.to_string(), path.clone()); + let mount = Mount { + container_path: path.clone(), + host_path: path.clone(), + read_only: true, + }; + // TODO: use device spec + Device { + id: path, + properties, + mounts: vec![mount], + device_specs: Vec::default(), + } + }) + .collect::>(); + let mut changed_device_list = false; + let mut matching_device_count = 0; + discovered_devices.iter().for_each(|device| { + if !previously_discovered_devices.contains(device) { + changed_device_list = true; + } else { + matching_device_count += 1; + } + }); + if changed_device_list + || matching_device_count != previously_discovered_devices.len() + { + info!("discover - sending updated device list"); + previously_discovered_devices = discovered_devices.clone(); + if let Err(e) = discovered_devices_sender + .send(Ok(DiscoverResponse { + devices: discovered_devices, + })) + .await + { + error!( + "discover - for udev failed to send discovery response with error {}", + e + ); + if let Some(mut sender) = register_sender { + sender.send(()).await.unwrap(); + } + break; + } + } + delay_for(Duration::from_secs(DISCOVERY_INTERVAL_SECS)).await; + } + }); + Ok(Response::new(discovered_devices_receiver)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_deserialize_discovery_details_empty() { + // Check that udev errors if no udev rules passed in + let udev_dh_config: Result = + deserialize_discovery_details(""); + assert!(udev_dh_config.is_err()); + + let yaml = r#" + udevRules: [] + "#; + let udev_dh_config: UdevDiscoveryDetails = deserialize_discovery_details(yaml).unwrap(); + assert!(udev_dh_config.udev_rules.is_empty()); + let serialized = serde_json::to_string(&udev_dh_config).unwrap(); + let expected_deserialized = r#"{"udevRules":[]}"#; + assert_eq!(expected_deserialized, serialized); + } + + #[test] + fn test_deserialize_discovery_details_detailed() { + let yaml = r#" + udevRules: + - 'KERNEL=="video[0-9]*"' + "#; + let udev_dh_config: UdevDiscoveryDetails = deserialize_discovery_details(yaml).unwrap(); + assert_eq!(udev_dh_config.udev_rules.len(), 1); + assert_eq!(&udev_dh_config.udev_rules[0], "KERNEL==\"video[0-9]*\""); + } +} diff --git a/agent/src/protocols/udev/discovery_impl.rs b/discovery-handlers/udev/src/discovery_impl.rs similarity index 98% rename from agent/src/protocols/udev/discovery_impl.rs rename to discovery-handlers/udev/src/discovery_impl.rs index 93d1adc86..a1f3ce117 100644 --- a/agent/src/protocols/udev/discovery_impl.rs +++ b/discovery-handlers/udev/src/discovery_impl.rs @@ -1,10 +1,11 @@ -extern crate udev; - -use super::udev_device::{ - get_attribute_value, get_devnode, get_devpath, get_driver, get_parent, get_property_value, - get_subsystem, get_sysname, DeviceExt, +use super::wrappers::{ + udev_device::{ + get_attribute_value, get_devnode, get_devpath, get_driver, get_parent, get_property_value, + get_subsystem, get_sysname, DeviceExt, + }, + udev_enumerator::Enumerator, }; -use super::udev_enumerator::Enumerator; +use log::{error, info, trace}; use pest::iterators::Pair; use pest::Parser; use regex::Regex; @@ -12,7 +13,7 @@ use regex::Regex; const TAGS: &str = "TAGS"; #[derive(Parser)] -#[grammar = "protocols/udev/udev_rule_grammar.pest"] +#[grammar = "udev_rule_grammar.pest"] pub struct UdevRuleParser; #[derive(Debug, PartialEq)] @@ -26,7 +27,7 @@ pub struct UdevFilter<'a> { pub fn do_parse_and_find( enumerator: impl Enumerator, udev_rule_string: &str, -) -> Result, failure::Error> { +) -> Result, anyhow::Error> { let udev_filters = parse_udev_rule(udev_rule_string)?; let devpaths = find_devices(enumerator, udev_filters)?; trace!( @@ -44,7 +45,7 @@ pub fn do_parse_and_find( /// Udev discovery is only interested in match operations ("==", "!="), so all action ("=" , "+=" , "-=" , ":=") operations /// will be ignored. /// Udev discovery is only interested in match fields, so all action fields, such as TEST, are ignored -fn parse_udev_rule(udev_rule_string: &str) -> Result, failure::Error> { +fn parse_udev_rule(udev_rule_string: &str) -> Result, anyhow::Error> { info!( "parse_udev_rule - enter for udev rule string {}", udev_rule_string @@ -69,7 +70,7 @@ fn parse_udev_rule(udev_rule_string: &str) -> Result, failure::E let field_pair = inner_rules.next().unwrap(); let inner_field = field_pair.into_inner().next().unwrap(); if inner_field.as_rule() == Rule::unsupported_field { - return Err(failure::format_err!( + return Err(anyhow::format_err!( "parse_udev_rule - unsupported field {}", inner_field.into_inner().next().unwrap().as_str() )); @@ -91,7 +92,7 @@ fn parse_udev_rule(udev_rule_string: &str) -> Result, failure::E value: value.to_string(), }); } else { - return Err(failure::format_err!("parse_udev_rule - unsupported action operation for rule with field [{}], operation [{:?}], and value[{}]", + return Err(anyhow::format_err!("parse_udev_rule - unsupported action operation for rule with field [{}], operation [{:?}], and value[{}]", inner_field.into_inner().as_str(), operation, value)); } } @@ -546,7 +547,7 @@ fn device_or_parents_have_tag(device: &impl DeviceExt, value_regex: &Regex) -> b #[cfg(test)] mod discovery_tests { - use super::super::udev_enumerator::{create_enumerator, MockEnumerator}; + use super::super::wrappers::udev_enumerator::{create_enumerator, MockEnumerator}; use super::*; use std::{ collections::HashMap, @@ -667,7 +668,7 @@ mod discovery_tests { #[test] fn test_parse_udev_rule_from_file() { let _ = env_logger::builder().is_test(true).try_init(); - let file_path = "../test/example.rules"; + let file_path = "../../test/example.rules"; let file = File::open(file_path).expect("no such file"); let buf = BufReader::new(file); let mut num_udev_filters: Vec = Vec::new(); @@ -689,7 +690,7 @@ mod discovery_tests { #[test] fn test_parse_unsupported_udev_rule_from_file() { let _ = env_logger::builder().is_test(true).try_init(); - let file_path = "../test/example-unsupported.rules"; + let file_path = "../../test/example-unsupported.rules"; let file = File::open(file_path).expect("no such file"); let buf = BufReader::new(file); buf.lines().for_each(|line| { diff --git a/discovery-handlers/udev/src/lib.rs b/discovery-handlers/udev/src/lib.rs new file mode 100644 index 000000000..38a85da6d --- /dev/null +++ b/discovery-handlers/udev/src/lib.rs @@ -0,0 +1,18 @@ +extern crate pest; +#[macro_use] +extern crate pest_derive; +extern crate udev; +#[macro_use] +extern crate serde_derive; + +pub mod discovery_handler; +mod discovery_impl; +mod wrappers; + +/// Name of environment variable that is set in udev brokers. Contains devnode for udev device +/// the broker should connect to. +pub const UDEV_DEVNODE_LABEL_ID: &str = "UDEV_DEVNODE"; +/// Name that udev discovery handlers use when registering with the Agent +pub const DISCOVERY_HANDLER_NAME: &str = "udev"; +/// Defines whether this discovery handler discovers local devices on nodes rather than ones visible to multiple nodes +pub const SHARED: bool = false; diff --git a/agent/src/protocols/udev/udev_rule_grammar.pest b/discovery-handlers/udev/src/udev_rule_grammar.pest similarity index 100% rename from agent/src/protocols/udev/udev_rule_grammar.pest rename to discovery-handlers/udev/src/udev_rule_grammar.pest diff --git a/agent/src/protocols/udev/mod.rs b/discovery-handlers/udev/src/wrappers.rs similarity index 95% rename from agent/src/protocols/udev/mod.rs rename to discovery-handlers/udev/src/wrappers.rs index 37afd0de0..5fbecdc01 100644 --- a/agent/src/protocols/udev/mod.rs +++ b/discovery-handlers/udev/src/wrappers.rs @@ -1,8 +1,3 @@ -mod discovery_handler; -mod discovery_impl; -pub use self::discovery_handler::UdevDiscoveryHandler; -pub const UDEV_DEVNODE_LABEL_ID: &str = "UDEV_DEVNODE"; - pub mod udev_device { extern crate udev; use std::{ffi::OsStr, path::Path}; @@ -89,11 +84,11 @@ pub mod udev_device { pub mod udev_enumerator { extern crate udev; - use mockall::predicate::*; - use mockall::*; + #[cfg(test)] + use mockall::{automock, predicate::*}; /// Wrap udev::Enumerator functions in a trait to enable mocking for testing. - #[automock] + #[cfg_attr(test, automock)] pub trait Enumerator { fn match_subsystem(&mut self, value: &str) -> std::io::Result<()>; fn nomatch_subsystem(&mut self, value: &str) -> std::io::Result<()>; diff --git a/discovery-utils/Cargo.toml b/discovery-utils/Cargo.toml new file mode 100644 index 000000000..590732dd1 --- /dev/null +++ b/discovery-utils/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "akri-discovery-utils" +version = "0.6.5" +authors = ["Kate Goldenring "] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +prost = "0.6" +tonic = { version = "0.1.0", features = ["tls"] } +akri-shared = { path = "../shared" } +anyhow = "1.0.38" +async-trait = { version = "0.1.0", optional = true } +futures = { version = "0.3.1", package = "futures" } +log = "0.4" +serde = "1.0" +serde_derive = "1.0" +serde_yaml = "0.8.11" +tempfile = { version = "3.1.0", optional = true } +tokio = { version = "0.2", features = ["time", "net", "sync"] } +tower = "0.3" + +[features] +mock-discovery-handler = ["async-trait", "tempfile"] + +[dev-dependencies] +async-trait = "0.1.0" +tempfile = "3.1.0" + +[build-dependencies] +tonic-build = "0.1.1" \ No newline at end of file diff --git a/discovery-utils/build.rs b/discovery-utils/build.rs new file mode 100644 index 000000000..6d5f169a3 --- /dev/null +++ b/discovery-utils/build.rs @@ -0,0 +1,6 @@ +fn main() { + tonic_build::configure() + .out_dir("./src/discovery") + .compile(&["proto/discovery.proto"], &["proto"]) + .expect("failed to compile protos"); +} diff --git a/discovery-utils/proto/discovery.proto b/discovery-utils/proto/discovery.proto new file mode 100644 index 000000000..cfc14d968 --- /dev/null +++ b/discovery-utils/proto/discovery.proto @@ -0,0 +1,85 @@ +syntax = "proto3"; + +package v0; + +// Registration is the service advertised by the Akri Agent. +// Any `DiscoveryHandler` can register with the Akri Agent. +service Registration { + rpc RegisterDiscoveryHandler(RegisterDiscoveryHandlerRequest) returns (Empty) {} +} + + +message RegisterDiscoveryHandlerRequest { + // Name of the `DiscoveryHandler`. This name is specified in an + // Akri Configuration, to request devices discovered by this `DiscoveryHandler`. + string name = 1; + // Endpoint for the registering `DiscoveryHandler` + string endpoint = 2; + // Specifies the type of endpoint. + enum EndpointType { + UDS = 0; + NETWORK = 1; + } + EndpointType endpoint_type = 3; + // Specifies whether this device could be used by multiple nodes (e.g. an IP camera) + // or can only be ever be discovered by a single node (e.g. a local USB device) + bool shared = 4; +} + +message Empty { +} + +service DiscoveryHandler { + rpc Discover (DiscoverRequest) returns (stream DiscoverResponse); +} + +message DiscoverRequest { + // String containing all the details (such as filtering options) + // the `DiscoveryHandler` needs to find a set of devices. + string discovery_details = 1; +} + +message DiscoverResponse { + // List of discovered devices + repeated Device devices = 1; +} + +message Device { + // Identifier for this device + string id = 1; + // Properties that identify the device. These are stored in the device's instance + // and set as environment variables in the device's broker Pods. May be information + // about where to find the device such as an RTSP URL or a device node (e.g. `/dev/video1`) + map properties = 2; + // Optionally specify mounts for Pods that request this device as a resource + repeated Mount mounts = 3; + // Optionally specify device information to be mounted for Pods that request this device as a resource + repeated DeviceSpec device_specs = 4; +} + +// From Device Plugin API +// Mount specifies a host volume to mount into a container. +// where device library or tools are installed on host and container +message Mount { + // Path of the mount within the container. + string container_path = 1; + // Path of the mount on the host. + string host_path = 2; + // If set, the mount is read-only. + bool read_only = 3; +} + +// From Device Plugin API +// DeviceSpec specifies a host device to mount into a container. +message DeviceSpec { + // Path of the device within the container. + string container_path = 1; + // Path of the device on the host. + string host_path = 2; + // Cgroups permissions of the device, candidates are one or more of + // * r - allows container to read from the specified device. + // * w - allows container to write to the specified device. + // * m - allows container to create device files that do not yet exist. + string permissions = 3; +} + diff --git a/discovery-utils/src/discovery/mod.rs b/discovery-utils/src/discovery/mod.rs new file mode 100644 index 000000000..30a3244a4 --- /dev/null +++ b/discovery-utils/src/discovery/mod.rs @@ -0,0 +1,312 @@ +/// Akri's Discovery API code, which is auto-generated by `build.rs` from `proto/discovery.proto` +pub mod v0; + +/// Definition of the DiscoverStream type expected for supported embedded Akri DiscoveryHandlers +pub type DiscoverStream = tokio::sync::mpsc::Receiver>; + +pub mod discovery_handler { + use super::super::registration_client::{ + register_discovery_handler, register_discovery_handler_again, + }; + use super::{ + server::run_discovery_server, + v0::{ + discovery_handler_server::DiscoveryHandler, + register_discovery_handler_request::EndpointType, RegisterDiscoveryHandlerRequest, + }, + }; + use log::trace; + use tokio::sync::mpsc; + + const DISCOVERY_PORT: i16 = 10000; + + /// Capacity of channel over which a message is sent by `DiscoveryHandler::discover` that its `DiscoveryHandler` + /// should re-register due to the Agent dropping its end of the current connection. + pub const REGISTER_AGAIN_CHANNEL_CAPACITY: usize = 1; + + /// Capacity of channel over which discovery handlers send updates to clients about currently discovered devices. It + /// is assumed that clients are always listening for updates; however, the size is increased to account for any delays + /// in receiving. + pub const DISCOVERED_DEVICES_CHANNEL_CAPACITY: usize = 4; + + pub async fn run_discovery_handler( + discovery_handler: impl DiscoveryHandler, + register_receiver: mpsc::Receiver<()>, + protocol_name: &str, + shared: bool, + ) -> Result<(), Box> { + let mut use_uds = true; + let mut endpoint: String = match std::env::var("POD_IP") { + Ok(pod_ip) => { + trace!("run_discovery_handler - registering with Agent with IP endpoint"); + use_uds = false; + format!("{}:{}", pod_ip, DISCOVERY_PORT) + } + Err(_) => { + trace!("run_discovery_handler - registering with Agent with uds endpoint"); + format!( + "{}/{}.sock", + std::env::var(super::super::DISCOVERY_HANDLERS_DIRECTORY_LABEL).unwrap(), + protocol_name + ) + } + }; + let endpoint_clone = endpoint.clone(); + let discovery_handle = tokio::spawn(async move { + run_discovery_server(discovery_handler, &endpoint_clone) + .await + .unwrap(); + }); + let endpoint_type = if !use_uds { + endpoint.insert_str(0, "http://"); + EndpointType::Network + } else { + EndpointType::Uds + }; + let register_request = RegisterDiscoveryHandlerRequest { + name: protocol_name.to_string(), + endpoint, + endpoint_type: endpoint_type as i32, + shared, + }; + register_discovery_handler(®ister_request).await?; + let registration_handle = tokio::spawn(async move { + register_discovery_handler_again(register_receiver, ®ister_request).await; + }); + tokio::try_join!(discovery_handle, registration_handle)?; + Ok(()) + } + + /// This obtains the expected type `T` from a discovery details String by running it through function `f` which will + /// attempt to deserialize the String. + pub fn deserialize_discovery_details(discovery_details: &str) -> Result + where + T: serde::de::DeserializeOwned, + { + let discovery_handler_config: T = serde_yaml::from_str(discovery_details).map_err(|e| { + anyhow::format_err!( + "Configuration discovery details improperly configured with error {:?}", + e + ) + })?; + Ok(discovery_handler_config) + } +} + +#[cfg(any(feature = "mock-discovery-handler", test))] +pub mod mock_discovery_handler { + use super::v0::{ + discovery_handler_server::DiscoveryHandler, Device, DiscoverRequest, DiscoverResponse, + }; + use akri_shared::uds::unix_stream; + use async_trait::async_trait; + use tempfile::Builder; + use tokio::sync::mpsc; + + /// Simple discovery handler for tests + /// Has fields for specifying that it return an error or a set of devices. + pub struct MockDiscoveryHandler { + pub return_error: bool, + pub devices: Vec, + } + + #[async_trait] + impl DiscoveryHandler for MockDiscoveryHandler { + type DiscoverStream = super::DiscoverStream; + async fn discover( + &self, + _: tonic::Request, + ) -> Result, tonic::Status> { + let (mut discovered_devices_sender, discovered_devices_receiver) = + mpsc::channel(super::discovery_handler::DISCOVERED_DEVICES_CHANNEL_CAPACITY); + let devices = self.devices.clone(); + tokio::spawn(async move { + discovered_devices_sender + .send(Ok(DiscoverResponse { devices })) + .await + .unwrap(); + }); + // Conditionally return error if specified + if self.return_error { + Err(tonic::Status::invalid_argument( + "mock discovery handler error", + )) + } else { + Ok(tonic::Response::new(discovered_devices_receiver)) + } + } + } + + pub fn get_mock_discovery_handler_dir_and_endpoint(socket_name: &str) -> (String, String) { + let discovery_handler_temp_dir = Builder::new() + .prefix("discovery-handlers") + .tempdir() + .unwrap(); + let discovery_handler_temp_dir_path = discovery_handler_temp_dir.path().join(socket_name); + ( + discovery_handler_temp_dir + .path() + .to_str() + .unwrap() + .to_string(), + discovery_handler_temp_dir_path + .to_str() + .unwrap() + .to_string(), + ) + } + + pub async fn run_mock_discovery_handler( + discovery_handler_dir: &str, + discovery_handler_endpoint: &str, + return_error: bool, + devices: Vec, + ) -> tokio::task::JoinHandle<()> { + let discovery_handler = MockDiscoveryHandler { + return_error, + devices, + }; + let discovery_handler_dir_string = discovery_handler_dir.to_string(); + let discovery_handler_endpoint_string = discovery_handler_endpoint.to_string(); + let handle = tokio::spawn(async move { + super::server::internal_run_discovery_server( + discovery_handler, + &discovery_handler_endpoint_string, + &discovery_handler_dir_string, + ) + .await + .unwrap(); + }); + + // Try to connect in loop until first thread has served Discovery Handler + unix_stream::try_connect(discovery_handler_endpoint) + .await + .unwrap(); + handle + } +} + +pub mod server { + use super::v0::discovery_handler_server::{DiscoveryHandler, DiscoveryHandlerServer}; + use akri_shared::uds::unix_stream; + use futures::stream::TryStreamExt; + use log::info; + use std::path::Path; + use tokio::net::UnixListener; + use tonic::transport::Server; + + pub async fn run_discovery_server( + discovery_handler: impl DiscoveryHandler, + discovery_endpoint: &str, + ) -> Result<(), Box> { + internal_run_discovery_server( + discovery_handler, + discovery_endpoint, + &std::env::var(super::super::DISCOVERY_HANDLERS_DIRECTORY_LABEL).unwrap(), + ) + .await + } + + /// Creates a DiscoveryHandlerServer for the given Discovery Handler at the specified endpoint Verifies the endpoint + /// by checking that it is in the discovery handler directory if it is UDS or that it is a valid IP address and + /// port. + pub async fn internal_run_discovery_server( + discovery_handler: impl DiscoveryHandler, + discovery_endpoint: &str, + discovery_handler_directory: &str, + ) -> Result<(), Box> { + info!("internal_run_discovery_server - entered"); + + if discovery_endpoint.starts_with(discovery_handler_directory) { + tokio::fs::create_dir_all(Path::new(&discovery_endpoint[..]).parent().unwrap()).await?; + // Delete socket if it already exists + std::fs::remove_file(discovery_endpoint).unwrap_or(()); + let mut uds = UnixListener::bind(discovery_endpoint)?; + Server::builder() + .add_service(DiscoveryHandlerServer::new(discovery_handler)) + .serve_with_incoming(uds.incoming().map_ok(unix_stream::UnixStream)) + .await?; + std::fs::remove_file(discovery_endpoint).unwrap_or(()); + } else { + let addr = discovery_endpoint.parse()?; + Server::builder() + .add_service(DiscoveryHandlerServer::new(discovery_handler)) + .serve(addr) + .await?; + } + info!("internal_run_discovery_server - finished"); + Ok(()) + } + + #[cfg(test)] + pub mod tests { + use super::super::{ + mock_discovery_handler::{ + get_mock_discovery_handler_dir_and_endpoint, run_mock_discovery_handler, + MockDiscoveryHandler, + }, + v0::{discovery_handler_client::DiscoveryHandlerClient, DiscoverRequest}, + }; + use super::*; + use std::convert::TryFrom; + use tempfile::Builder; + use tokio::net::UnixStream; + use tonic::{ + transport::{Endpoint, Uri}, + Request, + }; + + #[tokio::test] + async fn test_run_discovery_server_uds() { + let (discovery_handler_dir, discovery_handler_socket) = + get_mock_discovery_handler_dir_and_endpoint("protocol.sock"); + let _handle: tokio::task::JoinHandle<()> = run_mock_discovery_handler( + &discovery_handler_dir, + &discovery_handler_socket, + false, + Vec::new(), + ) + .await; + let channel = Endpoint::try_from("dummy://[::]:50051") + .unwrap() + .connect_with_connector(tower::service_fn(move |_: Uri| { + UnixStream::connect(discovery_handler_socket.clone()) + })) + .await + .unwrap(); + let mut discovery_handler_client = DiscoveryHandlerClient::new(channel); + let mut stream = discovery_handler_client + .discover(Request::new(DiscoverRequest { + discovery_details: String::new(), + })) + .await + .unwrap() + .into_inner(); + assert!(stream.message().await.unwrap().unwrap().devices.is_empty()); + } + + // Test when improper socket path or IP address is given as an endpoint + #[tokio::test] + async fn test_run_discovery_server_error_invalid_ip_addr() { + let discovery_handler = MockDiscoveryHandler { + return_error: false, + devices: Vec::new(), + }; + let discovery_handler_temp_dir = Builder::new() + .prefix("discovery-handlers") + .tempdir() + .unwrap(); + if let Err(e) = internal_run_discovery_server( + discovery_handler, + "random", + discovery_handler_temp_dir.path().to_str().unwrap(), + ) + .await + { + assert!((*e).to_string().contains("invalid IP address syntax")) + } else { + panic!("should be invalid IP address error") + } + } + } +} diff --git a/discovery-utils/src/discovery/v0.rs b/discovery-utils/src/discovery/v0.rs new file mode 100644 index 000000000..699178fa5 --- /dev/null +++ b/discovery-utils/src/discovery/v0.rs @@ -0,0 +1,426 @@ +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RegisterDiscoveryHandlerRequest { + /// Name of the `DiscoveryHandler`. This name is specified in an + /// Akri Configuration, to request devices discovered by this `DiscoveryHandler`. + #[prost(string, tag = "1")] + pub name: std::string::String, + /// Endpoint for the registering `DiscoveryHandler` + #[prost(string, tag = "2")] + pub endpoint: std::string::String, + #[prost( + enumeration = "register_discovery_handler_request::EndpointType", + tag = "3" + )] + pub endpoint_type: i32, + /// Specifies whether this device could be used by multiple nodes (e.g. an IP camera) + /// or can only be ever be discovered by a single node (e.g. a local USB device) + #[prost(bool, tag = "4")] + pub shared: bool, +} +pub mod register_discovery_handler_request { + /// Specifies the type of endpoint. + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum EndpointType { + Uds = 0, + Network = 1, + } +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Empty {} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DiscoverRequest { + /// String containing all the details (such as filtering options) + /// the `DiscoveryHandler` needs to find a set of devices. + #[prost(string, tag = "1")] + pub discovery_details: std::string::String, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DiscoverResponse { + /// List of discovered devices + #[prost(message, repeated, tag = "1")] + pub devices: ::std::vec::Vec, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Device { + /// Identifier for this device + #[prost(string, tag = "1")] + pub id: std::string::String, + /// Properties that identify the device. These are stored in the device's instance + /// and set as environment variables in the device's broker Pods. May be information + /// about where to find the device such as an RTSP URL or a device node (e.g. `/dev/video1`) + #[prost(map = "string, string", tag = "2")] + pub properties: ::std::collections::HashMap, + /// Optionally specify mounts for Pods that request this device as a resource + #[prost(message, repeated, tag = "3")] + pub mounts: ::std::vec::Vec, + /// Optionally specify device information to be mounted for Pods that request this device as a resource + #[prost(message, repeated, tag = "4")] + pub device_specs: ::std::vec::Vec, +} +/// From Device Plugin API +/// Mount specifies a host volume to mount into a container. +/// where device library or tools are installed on host and container +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Mount { + /// Path of the mount within the container. + #[prost(string, tag = "1")] + pub container_path: std::string::String, + /// Path of the mount on the host. + #[prost(string, tag = "2")] + pub host_path: std::string::String, + /// If set, the mount is read-only. + #[prost(bool, tag = "3")] + pub read_only: bool, +} +/// From Device Plugin API +/// DeviceSpec specifies a host device to mount into a container. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeviceSpec { + /// Path of the device within the container. + #[prost(string, tag = "1")] + pub container_path: std::string::String, + /// Path of the device on the host. + #[prost(string, tag = "2")] + pub host_path: std::string::String, + /// Cgroups permissions of the device, candidates are one or more of + /// * r - allows container to read from the specified device. + /// * w - allows container to write to the specified device. + /// * m - allows container to create device files that do not yet exist. + #[prost(string, tag = "3")] + pub permissions: std::string::String, +} +#[doc = r" Generated client implementations."] +pub mod registration_client { + #![allow(unused_variables, dead_code, missing_docs)] + use tonic::codegen::*; + #[doc = " Registration is the service advertised by the Akri Agent."] + #[doc = " Any `DiscoveryHandler` can register with the Akri Agent."] + pub struct RegistrationClient { + inner: tonic::client::Grpc, + } + impl RegistrationClient { + #[doc = r" Attempt to create a new client by connecting to a given endpoint."] + pub async fn connect(dst: D) -> Result + where + D: std::convert::TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl RegistrationClient + where + T: tonic::client::GrpcService, + T::ResponseBody: Body + HttpBody + Send + 'static, + T::Error: Into, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_interceptor(inner: T, interceptor: impl Into) -> Self { + let inner = tonic::client::Grpc::with_interceptor(inner, interceptor); + Self { inner } + } + pub async fn register_discovery_handler( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result, tonic::Status> { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = + http::uri::PathAndQuery::from_static("/v0.Registration/RegisterDiscoveryHandler"); + self.inner.unary(request.into_request(), path, codec).await + } + } + impl Clone for RegistrationClient { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + } + } + } +} +#[doc = r" Generated client implementations."] +pub mod discovery_handler_client { + #![allow(unused_variables, dead_code, missing_docs)] + use tonic::codegen::*; + pub struct DiscoveryHandlerClient { + inner: tonic::client::Grpc, + } + impl DiscoveryHandlerClient { + #[doc = r" Attempt to create a new client by connecting to a given endpoint."] + pub async fn connect(dst: D) -> Result + where + D: std::convert::TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl DiscoveryHandlerClient + where + T: tonic::client::GrpcService, + T::ResponseBody: Body + HttpBody + Send + 'static, + T::Error: Into, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_interceptor(inner: T, interceptor: impl Into) -> Self { + let inner = tonic::client::Grpc::with_interceptor(inner, interceptor); + Self { inner } + } + pub async fn discover( + &mut self, + request: impl tonic::IntoRequest, + ) -> Result>, tonic::Status> + { + self.inner.ready().await.map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/v0.DiscoveryHandler/Discover"); + self.inner + .server_streaming(request.into_request(), path, codec) + .await + } + } + impl Clone for DiscoveryHandlerClient { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + } + } + } +} +#[doc = r" Generated server implementations."] +pub mod registration_server { + #![allow(unused_variables, dead_code, missing_docs)] + use tonic::codegen::*; + #[doc = "Generated trait containing gRPC methods that should be implemented for use with RegistrationServer."] + #[async_trait] + pub trait Registration: Send + Sync + 'static { + async fn register_discovery_handler( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + } + #[doc = " Registration is the service advertised by the Akri Agent."] + #[doc = " Any `DiscoveryHandler` can register with the Akri Agent."] + #[derive(Debug)] + #[doc(hidden)] + pub struct RegistrationServer { + inner: _Inner, + } + struct _Inner(Arc, Option); + impl RegistrationServer { + pub fn new(inner: T) -> Self { + let inner = Arc::new(inner); + let inner = _Inner(inner, None); + Self { inner } + } + pub fn with_interceptor(inner: T, interceptor: impl Into) -> Self { + let inner = Arc::new(inner); + let inner = _Inner(inner, Some(interceptor.into())); + Self { inner } + } + } + impl Service> for RegistrationServer { + type Response = http::Response; + type Error = Never; + type Future = BoxFuture; + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/v0.Registration/RegisterDiscoveryHandler" => { + struct RegisterDiscoveryHandlerSvc(pub Arc); + impl + tonic::server::UnaryService + for RegisterDiscoveryHandlerSvc + { + type Response = super::Empty; + type Future = BoxFuture, tonic::Status>; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = + async move { inner.register_discovery_handler(request).await }; + Box::pin(fut) + } + } + let inner = self.inner.clone(); + let fut = async move { + let interceptor = inner.1.clone(); + let inner = inner.0; + let method = RegisterDiscoveryHandlerSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = if let Some(interceptor) = interceptor { + tonic::server::Grpc::with_interceptor(codec, interceptor) + } else { + tonic::server::Grpc::new(codec) + }; + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => Box::pin(async move { + Ok(http::Response::builder() + .status(200) + .header("grpc-status", "12") + .body(tonic::body::BoxBody::empty()) + .unwrap()) + }), + } + } + } + impl Clone for RegistrationServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { inner } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(self.0.clone(), self.1.clone()) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::transport::NamedService for RegistrationServer { + const NAME: &'static str = "v0.Registration"; + } +} +#[doc = r" Generated server implementations."] +pub mod discovery_handler_server { + #![allow(unused_variables, dead_code, missing_docs)] + use tonic::codegen::*; + #[doc = "Generated trait containing gRPC methods that should be implemented for use with DiscoveryHandlerServer."] + #[async_trait] + pub trait DiscoveryHandler: Send + Sync + 'static { + #[doc = "Server streaming response type for the Discover method."] + type DiscoverStream: Stream> + + Send + + Sync + + 'static; + async fn discover( + &self, + request: tonic::Request, + ) -> Result, tonic::Status>; + } + #[derive(Debug)] + #[doc(hidden)] + pub struct DiscoveryHandlerServer { + inner: _Inner, + } + struct _Inner(Arc, Option); + impl DiscoveryHandlerServer { + pub fn new(inner: T) -> Self { + let inner = Arc::new(inner); + let inner = _Inner(inner, None); + Self { inner } + } + pub fn with_interceptor(inner: T, interceptor: impl Into) -> Self { + let inner = Arc::new(inner); + let inner = _Inner(inner, Some(interceptor.into())); + Self { inner } + } + } + impl Service> for DiscoveryHandlerServer { + type Response = http::Response; + type Error = Never; + type Future = BoxFuture; + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/v0.DiscoveryHandler/Discover" => { + struct DiscoverSvc(pub Arc); + impl + tonic::server::ServerStreamingService + for DiscoverSvc + { + type Response = super::DiscoverResponse; + type ResponseStream = T::DiscoverStream; + type Future = + BoxFuture, tonic::Status>; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = self.0.clone(); + let fut = async move { inner.discover(request).await }; + Box::pin(fut) + } + } + let inner = self.inner.clone(); + let fut = async move { + let interceptor = inner.1; + let inner = inner.0; + let method = DiscoverSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = if let Some(interceptor) = interceptor { + tonic::server::Grpc::with_interceptor(codec, interceptor) + } else { + tonic::server::Grpc::new(codec) + }; + let res = grpc.server_streaming(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => Box::pin(async move { + Ok(http::Response::builder() + .status(200) + .header("grpc-status", "12") + .body(tonic::body::BoxBody::empty()) + .unwrap()) + }), + } + } + } + impl Clone for DiscoveryHandlerServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { inner } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(self.0.clone(), self.1.clone()) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::transport::NamedService for DiscoveryHandlerServer { + const NAME: &'static str = "v0.DiscoveryHandler"; + } +} diff --git a/discovery-utils/src/filtering.rs b/discovery-utils/src/filtering.rs new file mode 100644 index 000000000..7c98f3de1 --- /dev/null +++ b/discovery-utils/src/filtering.rs @@ -0,0 +1,99 @@ +/// This defines the types of supported filters +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq)] +pub enum FilterType { + /// If the filter type is Exclude, any items NOT found in the + /// list are accepted + Exclude, + /// If the filter type is Include, only items found in the + /// list are accepted + Include, +} + +/// The default filter type is `Include` +fn default_action() -> FilterType { + FilterType::Include +} + +/// This defines a filter list. +/// +/// The items list can either define the only acceptable +/// items (Include) or can define the only unacceptable items +/// (Exclude) +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(rename_all = "camelCase")] +pub struct FilterList { + /// This defines a list of items that will be evaluated as part + /// of the filtering process + #[serde(skip_serializing_if = "Vec::is_empty")] + pub items: Vec, + /// This defines what the evaluation of items will be. The default + /// is `Include` + #[serde(default = "default_action")] + pub action: FilterType, +} + +/// This tests whether an item should be included according to the `FilterList` +pub fn should_include(filter_list: Option<&FilterList>, item: &str) -> bool { + if filter_list.is_none() { + return true; + } + let item_contained = filter_list.unwrap().items.contains(&item.to_string()); + if filter_list.as_ref().unwrap().action == FilterType::Include { + item_contained + } else { + !item_contained + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_should_include() { + // Test when FilterType::Exclude + let exclude_items = vec!["beep".to_string(), "bop".to_string()]; + let exclude_filter_list = Some(FilterList { + items: exclude_items, + action: FilterType::Exclude, + }); + assert_eq!(should_include(exclude_filter_list.as_ref(), "beep"), false); + assert_eq!(should_include(exclude_filter_list.as_ref(), "bop"), false); + assert_eq!(should_include(exclude_filter_list.as_ref(), "boop"), true); + + // Test when FilterType::Exclude and FilterList.items is empty + let empty_exclude_items = Vec::new(); + let empty_exclude_filter_list = Some(FilterList { + items: empty_exclude_items, + action: FilterType::Exclude, + }); + assert_eq!( + should_include(empty_exclude_filter_list.as_ref(), "beep"), + true + ); + + // Test when FilterType::Include + let include_items = vec!["beep".to_string(), "bop".to_string()]; + let include_filter_list = Some(FilterList { + items: include_items, + action: FilterType::Include, + }); + assert_eq!(should_include(include_filter_list.as_ref(), "beep"), true); + assert_eq!(should_include(include_filter_list.as_ref(), "bop"), true); + assert_eq!(should_include(include_filter_list.as_ref(), "boop"), false); + + // Test when FilterType::Include and FilterList.items is empty + let empty_include_items = Vec::new(); + let empty_include_filter_list = Some(FilterList { + items: empty_include_items, + action: FilterType::Include, + }); + assert_eq!( + should_include(empty_include_filter_list.as_ref(), "beep"), + false + ); + + // Test when None + assert_eq!(should_include(None, "beep"), true); + } +} diff --git a/discovery-utils/src/lib.rs b/discovery-utils/src/lib.rs new file mode 100644 index 000000000..e0067f9c7 --- /dev/null +++ b/discovery-utils/src/lib.rs @@ -0,0 +1,22 @@ +pub mod discovery; +pub mod filtering; +pub mod registration_client; + +#[macro_use] +extern crate serde_derive; + +/// Path of the Agent registration socket +pub const AGENT_REGISTRATION_SOCKET_NAME: &str = "agent-registration.sock"; + +/// Name of the environment variable that holds the directory containing the Agent registration +/// and Discovery Handler sockets +pub const DISCOVERY_HANDLERS_DIRECTORY_LABEL: &str = "DISCOVERY_HANDLERS_DIRECTORY"; + +/// Returns the socket address for the Agent registration service +pub fn get_registration_socket() -> String { + std::path::Path::new(&std::env::var(DISCOVERY_HANDLERS_DIRECTORY_LABEL).unwrap()) + .join(AGENT_REGISTRATION_SOCKET_NAME) + .to_str() + .unwrap() + .to_string() +} diff --git a/discovery-utils/src/registration_client.rs b/discovery-utils/src/registration_client.rs new file mode 100644 index 000000000..7c7fc1231 --- /dev/null +++ b/discovery-utils/src/registration_client.rs @@ -0,0 +1,50 @@ +use super::discovery::v0::{ + registration_client::RegistrationClient, RegisterDiscoveryHandlerRequest, +}; +use log::{info, trace}; +use std::convert::TryFrom; +use tonic::{ + transport::{Endpoint, Uri}, + Request, +}; + +pub async fn register_discovery_handler( + register_request: &RegisterDiscoveryHandlerRequest, +) -> Result<(), Box> { + info!("register_discovery_handler - entered"); + loop { + // We will ignore this dummy uri because UDS does not use it. + if let Ok(channel) = Endpoint::try_from("dummy://[::]:50051")? + .connect_with_connector(tower::service_fn(move |_: Uri| { + tokio::net::UnixStream::connect(super::get_registration_socket()) + })) + .await + { + let mut client = RegistrationClient::new(channel); + let request = Request::new(register_request.clone()); + client.register_discovery_handler(request).await?; + break; + } + trace!("register_discovery_handler - sleeping for 10 seconds and trying again"); + tokio::time::delay_for(std::time::Duration::from_secs(10)).await; + } + Ok(()) +} + +/// Continually waits for message to re-register with an Agent +pub async fn register_discovery_handler_again( + mut register_receiver: tokio::sync::mpsc::Receiver<()>, + register_request: &RegisterDiscoveryHandlerRequest, +) { + loop { + match register_receiver.recv().await { + Some(_) => { + info!("register_again - received signal ... registering with Agent again"); + register_discovery_handler(register_request).await.unwrap(); + } + None => { + info!("register_again - connection to register_again_sender closed ... error") + } + } + } +} diff --git a/docs/agent-in-depth.md b/docs/agent-in-depth.md index 77f5de7b2..4da7db325 100644 --- a/docs/agent-in-depth.md +++ b/docs/agent-in-depth.md @@ -9,7 +9,7 @@ These two tasks enable Akri to find configured resources (leaf devices), expose ## Handling resource availability changes The first step in handling resource availability is determining what resources (leaf devices) to look for. This is accomplished by finding existing Configurations and watching for changes to them. -Once the Akri Agent understands what resources to look for (via `Configuration.protocol`), it will find any resources that are visible. +Once the Akri Agent understands what resources to look for (via `Configuration.discovery_handler`), it will [find any resources that are visible](##resource-discovery). For each resource that is found: @@ -24,3 +24,29 @@ This process allows Akri to dynamically represent resources that appear and disa To enable resource sharing, the Akri Agent creates and updates the `Instance.deviceUsage` map and communicates with kubelet. The `Instance.deviceUsage` map is used to coordinate between Nodes. The kubelet communication allows Akri Agent to communicate any resource availability changes to the Kubernetes scheduler. For more detailed information, see the [in-depth resource sharing doc](./resource-sharing-in-depth.md). + +## Resource discovery +The Agent discovers resources via Discovery Handlers (DHs). A Discovery Handler is anything that implements the +`DiscoveryHandler` service defined in [`discovery.proto`](../discovery-utils/proto/discovery.proto). In order to be +utilized, a DH must register with the Agent, which hosts the `Registration` service defined in +[`discovery.proto`](../discovery-utils/proto/discovery.proto). The Agent maintains a list of registered DHs and their +connectivity statuses, which is either `Waiting`, `Active`, or `Offline(Instant)`. When registered, a DH's status is +`Waiting`. Once a Configuration requesting resources discovered by a DH is applied to the Akri-enabled cluster, the +Agent will create a connection with the DH requested in the Configuration and set the status of the DH to `Active`. If +the Agent is unable to connect or loses a connection with a DH, its status is set to `Offline(Instant)`. The `Instant` +marks the time at which the DH became unresponsive. If the DH has been offline for more than 5 minutes, it is removed +from the Agent's list of registered Discovery Handlers. If a Configuration is deleted, the Agent drops the connection it +made with all DHs for that Configuration and marks the DHs' statuses as `Waiting`. Note, while probably not commonplace, +the Agent allows for multiple DHs to be registered for the same protocol. IE: you could have two udev DHs running on a +node on different sockets. + +The Agent's registration service defaults to running on the socket `/var/lib/akri/agent-registration.sock` but can be +Configured with Helm. While Discovery Handlers must register with this service over UDS, the Discovery Handler's service +can run over UDS or an IP based endpoint. + +Supported Rust DHs each have a [library](../discovery-handlers) and a [binary +implementation](../discovery-handler-modules). This allows them to either be run within the Agent binary or in their own +Pod. + +Reference the [Discovery Handler development document](./discovery-handler-development.md) to learn how to implement a +Discovery Handler. diff --git a/docs/architecture.md b/docs/architecture.md index aa81cc606..237eb6950 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -15,7 +15,7 @@ The configuration of Akri is enabled by the Configuration CRD. Akri users will * a ServiceSpec (spec.instanceServiceSpec) that defines the service that provides a single stable endpoint to access each individual resource's set of broker pods. * a ServiceSpec (spec.configurationServiceSpec) that defines the service that provides a single stable endpoint to access the set of all brokers for all resources associated with the Configuration. -Akri has already provided two Configurations, one for discovering IP cameras using the ONVIF protocol and the other for discovering USB cameras via udev. Let's look at an [example ONVIF Configuration yaml](../test/yaml/akri-onvif-video.yaml). You can see it specifies the protocol ONVIF, an image for the broker pod, a capacity of 5, and two Kubernetes services. In this case, the broker pod is a sample frame server we have provided. To get only the frames from a specific camera, a user could point an application at the Instance service, while the Configuration service provides the frames from all the cameras.The ONVIF Configuration can be customized using Helm. When installing the ONVIF Configuration to your Akri enabled cluster, you can specify [the values](../deployment/helm/values.yaml) you want to be inserted into the [ONVIF Configuration template](../deployment/helm/templates/onvif.yaml). Learn more about [deploying the ONVIF sample here](./onvif-configuration.md). +Akri has already provided two Configurations, one for discovering IP cameras using the ONVIF protocol and the other for discovering USB cameras via udev. Let's look at an [example ONVIF Configuration yaml](../test/yaml/akri-onvif-video-configuration.yaml). You can see it specifies the protocol ONVIF, an image for the broker pod, a capacity of 5, and two Kubernetes services. In this case, the broker pod is a sample frame server we have provided. To get only the frames from a specific camera, a user could point an application at the Instance service, while the Configuration service provides the frames from all the cameras.The ONVIF Configuration can be customized using Helm. When installing the ONVIF Configuration to your Akri enabled cluster, you can specify [the values](../deployment/helm/values.yaml) you want to be inserted into the [ONVIF Configuration template](../deployment/helm/templates/onvif-configuration.yaml). Learn more about [deploying the ONVIF sample here](./onvif-configuration.md). ### Akri Instance CRD Each Instance represents an individual resource that is visible to the cluster. So, if there are 5 IP cameras visible to the cluster, there will be 5 Instances. Akri coordination and resource sharing is enabled by the Instance CRD. These instances store internal Akri state and are not intended to be edited by users. For a more in-depth understanding on how resource sharing is accomplished, see [Resource Sharing In-depth](./resource-sharing-in-depth.md). @@ -54,8 +54,9 @@ For a more in-depth understanding, see [Controller In-depth](./controller-in-dep metadata:   name: akri- spec: -   protocol: -     : + discoveryHandler: +     name: protocolA + discovery_details: {} brokerPodSpec: containers: - name: custom-broker @@ -63,7 +64,7 @@ For a more in-depth understanding, see [Controller In-depth](./controller-in-dep # ... capacity: 3 ``` -1. The Akri Agent sees the Configuration and discovers a leaf device using the protocol specified in the Configuration. It creates a device plugin for that leaf device and registers it with the kubelet. The Agent then creates an Instance for the discovered leaf device, listing itself as a node that can access it under `nodes`. The Akri Agent puts all the information that the broker pods will need in order to connect to the specific device under the `metadata` section of the Instance. Later, the controller will mount these as environment variables in the broker pods. Note how Instance has 3 available `deviceUsage` slots, since capacity was set to 3 and no brokers have been scheduled to the leaf device yet. +1. The Akri Agent sees the Configuration and discovers a leaf device using the protocol specified in the Configuration. It creates a device plugin for that leaf device and registers it with the kubelet. When creating the device plugin, it tells the kubelet to set connection information for that specific device and additional metadata from a Configuration's `brokerProperties` as environment variables in all Pods that request this device's resource. This information is also set in the `brokerProperties` section of the Instance the Agent creates to represent the discovered leaf device. In the Instance, the Agent also lists itself as a node that can access the device under `nodes`. Note how Instance has 3 available `deviceUsage` slots, since capacity was set to 3 and no brokers have been scheduled to the leaf device yet. ```yaml kind: Instance metadata: @@ -75,7 +76,7 @@ For a more in-depth understanding, see [Controller In-depth](./controller-in-dep     akri---0: ""     akri---1: ""     akri---2: "" - metadata: + brokerProperties: BROKER_ENV_VAR_1: BROKER_ENV_VAR_N:   nodes: @@ -114,7 +115,7 @@ For a more in-depth understanding, see [Controller In-depth](./controller-in-dep # ... phase: Pending ``` -1. The kubelet on the selected node sees the scheduled pod and resource limit. It checks to see if the resource is available by calling `allocate` on the device plugin running in the Agent for the requested leaf device. When calling `allocate`, the kubelet requests a specific `deviceUsage` slot. Let's say the kubelet requested `akri---1`. The leaf device's device plugin checks to see that the requested `deviceUsage` slot has not been taken by another node. If it is available, it reserves that `deviceUsage` slot for this node (as shown below) and returns true. +1. The kubelet on the selected node sees the scheduled pod and resource limit. It checks to see if the resource is available by calling `allocate` on the device plugin running in the Agent for the requested leaf device. When calling `allocate`, the kubelet requests a specific `deviceUsage` slot. Let's say the kubelet requested `akri---1`. The leaf device's device plugin checks to see that the requested `deviceUsage` slot has not been taken by another node. If it is available, it reserves that `deviceUsage` slot for this node (as shown below) and returns true. In the `allocate` response, the Agent also tells kubelet to mount the `Instance.brokerProperties` as environment variables in the broker Pod. ```yaml kind: Instance metadata: @@ -126,7 +127,7 @@ For a more in-depth understanding, see [Controller In-depth](./controller-in-dep akri---0: "" akri---1: ""     akri---2: "" - metadata: + brokerProperties: BROKER_ENV_VAR_1: BROKER_ENV_VAR_N: nodes: diff --git a/docs/broker-development.md b/docs/broker-development.md new file mode 100644 index 000000000..3c61d3794 --- /dev/null +++ b/docs/broker-development.md @@ -0,0 +1,106 @@ +# Creating a Broker to Utilize Discovered Devices +Akri's Agent discovers devices described by an Akri Configuration, and for each discovered device, it creates Kubernetes +resources using the Device Plugin Framework, which can later be requested by Pods. Akri's Controller can automate the +usage of discovered devices by deploying Pods that request the newly created resources. **Akri calls these Pods brokers.** + +> Background: Akri chose the term "broker" because one use case Akri initially envisioned was deploying Pods that acted +> as protocol translation gateways. For example, Akri could discover USB cameras and automatically deploy a broker to +> each camera that advertizes the camera as an IP camera that could be accessed outside the Node. + +Akri takes a micro-service approach to deploying brokers. A broker is deployed to each Node that can see a discovered +device (limited by a `capacity` that can be set in a Configuration to limit the number of Nodes that can utilize a +device at once). Each broker is provisioned with device connection information and other metadata as environment +variables. These environment variables come from two sources: a Configuration's `brokerProperties` and the `properties` +of a `Device` discovered by a Discovery Handler. The former is where an operator can specify environment variables that +will be set in brokers that utilize any device discovered via the Configuration. The latter is specific to one device +and usually contains connection information such as an RTSP URL for an ONVIF camera or a devnode for a USB device. Also, +while `brokerProperties` can be unique to a scenario, the `properties` environment variable keys are consistent to a +Discovery Handler with values changing based on device. All the environment variables from these two sources are +displayed in an Instance that represents a discovered device, making it a good reference for what environment variables +the broker should expect. The image below expresses how a broker Pod's environment variables come from the two +aforementioned sources. + +![Diagram depicting source of broker Pod environment variables](./media/setting-broker-environment-variables.svg "Source +of broker Pod environment variables") + +## Discovery Handler specified environment variables +The first step to developing a broker is understanding what information will be made available to the Pod via the +Discovery Handler (aka the `Device.properties`). The following table contains the environment variables specified by +each of Akri's currently supported Discovery Handlers, and the expected content of the environment variables. + +| Discovery Handler | Env Var Name | Value Type | Examples | Always Present? (Y/N) | +|---|---|---|---|---| +| debugEcho (for testing) | `DEBUG_ECHO_DESCRIPTION` | some random string | `foo`, `bar` | Y | +| ONVIF | `ONVIF_DEVICE_SERVICE_URL` | ONVIF camera source URL | `http://10.123.456.789:1000/onvif/device_service` | Y | +| ONVIF | `ONVIF_DEVICE_IP_ADDRESS` | IP address of the camera | `10.123.456.789` | Y | +| ONVIF | `ONVIF_DEVICE_MAC_ADDRESS` | MAC address of the camera | `48:0f:cf:4e:1b:3d`, `480fcf4e1b3d`| Y | +| OPC UA | `OPCUA_DISCOVERY_URL` | [DiscoveryURL](https://reference.opcfoundation.org/GDS/docs/4.3.3/) of specific OPC UA Server/Application | `10.123.456.789:1000/Some/Path/` | Y | +| udev | `UDEV_DEVNODE` | device node for specific device | `/dev/video1`, `/dev/snd/pcmC1D0p`, `/dev/dri/card0` | Y | + +A broker should look up the variables set by the appropriate Discovery Handler and use the contents to connect to a +specific device. + +## Exposing device information over a service +Oftentimes, it is useful for a broker to expose some information from its device over a service. Akri, by default, +assumes this behavior, creating a Kubernetes service for each broker (called an Instance level service) and for all +brokers of a Configuration (called a Configuration level service). This allows an application to target a specific +device/broker or all devices/brokers, the latter of which allows the application to be oblivious to the coming and going +of devices (and thereby brokers). + +> Note: This default creation of Instance and Configuration services can be disabled by setting ` name>.configuration.createInstanceServices=false` and ` name>.configuration.createConfigurationService=false` when installing Akri's Helm chart. + +A broker can expose information via REST, gRPC, etc. Akri's [sample brokers](../samples/brokers) all use gRPC. For +example, the udev video and ONVIF brokers both use the same [camera proto +file](../samples/brokers/udev-video-broker/proto/camera.proto) for their gRPC interfaces, which contains a service that +serves camera frames. This means that one end application can be deployed that implements the client side of the +interface and grabs frames from all cameras, whether IP or USB based. This is exactly what our [sample streaming +application](../samples/apps/video-streaming-app) does. + +## Deploying your custom broker +Once you have created a broker, you can ask Akri to automatically deploy it to all all devices discovered by a +Configuration by specifying the image in `.configuration.brokerPod.image.repository` and +`.configuration.brokerPod.image.tag`. For example, say you created a broker that connects to a +USB camera and advertises it as an IP camera. You want to deploy it to all USB cameras on your cluster's nodes using +Akri, so you deploy Akri with a Configuration that uses the udev Discovery Handler and set the image of your broker (say +`ghcr.io/brokers/camera-broker:v0.0.1`), like so: +```sh +helm repo add akri-helm-charts https://deislabs.github.io/akri/ +helm install akri akri-helm-charts/akri-dev \ + --set udev.discovery.enabled=true \ + --set udev.configuration.enabled=true \ + --set udev.configuration.name=akri-udev-video \ + --set udev.configuration.discoveryDetails.udevRules[0]='KERNEL=="video[0-9]*"' \ + --set udev.configuration.brokerPod.image.repository="ghcr.io/brokers/camera-broker" \ + --set udev.configuration.brokerPod.image.tag="v0.0.1" +``` +### Setting compute resource requests and limits for your broker +The default broker Pod memory and CPU resource request and limits in Akri's Helm chart are based off the requirements of Akri's sample brokers. The following brokers were created for demo purposes: +| Discovery Handler | Akri Sample Broker Pod image | Description | +|---|---|---| +| debugEcho | `nginx:stable-alpine` | standard nginx image for testing | +| ONVIF | `ghcr.io/deislabs/akri/onvif-video-broker:latest` | .NET camera frame server | +| OPC UA | `ghcr.io/deislabs/akri/opcua-monitoring-broker:latest` | .Net App subscribes to specific NodeID and serves latest value | +| udev | `ghcr.io/deislabs/akri/udev-video-broker:latest` | Rust camera frame server | + +The limit and request bounds were obtained using Kubernetes' [Vertical Pod Autoscaler (VPA)](https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler). You should choose bounds appropriate to your broker Pod. [This blog](https://pretired.dazwilkin.com/posts/210305/#vertical-pod-autoscaler-vpa) is a good starting point for learning how to use the VPA to choose bounds. + +## Specifying additional broker environment variables in a Configuration +You can request that additional environment variables are set in Pods that request devices discovered via an Akri +Configuration. These are set as key/value pairs in a Configuration's `brokerProperties`. For example, take the scenario +of brokers being deployed to USB cameras discovered by Akri. You may wish to give the brokers extra information about the +image format and resolution the cameras support. The brokers then can look up these variables to know how to properly +utilize their camera. These `brokerProperties` could be set in a Configuration during a Helm installation as follows: +```sh + helm repo add akri-helm-charts https://deislabs.github.io/akri/ + helm install akri akri-helm-charts/akri-dev \ + --set udev.discovery.enabled=true \ + --set udev.configuration.enabled=true \ + --set udev.configuration.name=akri-udev-video \ + --set udev.configuration.discoveryDetails.udevRules[0]='KERNEL=="video[0-9]*"' \ + --set udev.configuration.brokerPod.image.repository="ghcr.io/deislabs/akri/udev-video-broker" \ + --set udev.configuration.brokerProperties.FORMAT='JPEG' \ + --set udev.configuration.brokerProperties.RESOLUTION_WIDTH='1000' \ + --set udev.configuration.brokerProperties.RESOLUTION_HEIGHT='800' +``` \ No newline at end of file diff --git a/docs/building.md b/docs/building.md new file mode 100644 index 000000000..7db223797 --- /dev/null +++ b/docs/building.md @@ -0,0 +1,157 @@ +# Akri Build +Building Akri, whether locally or in the automated CI builds, leverages the same set of `make` commands. + +In essence, Akri components can be thought of as: +1. Runtime components + 1. Rust code: containers based on Rust code are built using `Cargo cross` and subsequent `docker build` commands include the cross-built binaries. + > Note: For Rust code, build/Dockerfile.* does NOT run `cargo build`, instead they simply copy cross-built binaries into the container + 2. Other code: these containers can be .NET or python or whatever else ... the build/Dockerfile.* must do whatever building is required. +2. Intermediate components: these containers are used as part of the build process and are not used in production explicitly + +## Runtime components +The Akri runtime components are the containers that provide Akri's functionality. They include the agent, the controller, the webhook, the brokers, and the applications. The majority of Akri runtime components are written in Rust, but there are several components that are written in .NET or python. + +All of the runtime components are built with a `make` command. These are the supporting makefiles: +* `Makefile`: this provides a single point of entry to build any Akri component +* `build/akri-containers.mk`: this provides the build and push functionality for Akri containers +* `build/akri-rust-containers.mk`: this provides a simple definition to build and push Akri components written in Rust +* `build/akri-dotnet-containers.mk`: this provides a simple definition to build and push Akri components written in .NET +* `build/akri-python-containers.mk`: this provides a simple definition to build and push Akri components written in Python + +### Configurability +The makefiles allow for several configurations: +* BUILD_AMD64: if set not to 1, the make commands will ignore AMD64 +* BUILD_ARM32: if set not to 1, the make commands will ignore ARM32 +* BUILD_ARM64: if set not to 1, the make commands will ignore ARM64 +* REGISTRY: allows configuration of the container registry (defaults to imaginary: devcaptest.azurecr.io) +* UNIQUE_ID: allows configuration of container registry account (defaults to $USER) +* PREFIX: allows configuration of container registry path for containers +* LABEL_PREFIX: allows configuration of container labels +* CACHE_OPTION: when `CACHE_OPTION=--no-cache`, the `docker build` commands will not use local caches + +### Local development usage +For a local build, some typical patterns are: +* `make akri-build`: run Rust cross-build for all platforms +* `BUILD_AMD64=0 BUILD_ARM32=0 BUILD_ARM64=1 make akri-build`: run Rust cross-build for ARM64 +* `PREFIX=ghcr.io/myaccount make akri`: builds all of the Akri containers and stores them in a container registry, `ghcr.io/myaccount`. +* `PREFIX=ghcr.io/myaccount make akri`: builds all of the Akri containers and stores them in a container registry, `ghcr.io/myaccount`. +* `PREFIX=ghcr.io/myaccount LABEL_PREFIX=local make akri`: builds all of the Akri containers and stores them in a container registry, `ghcr.io/myaccount` with labels prefixed with `local`. +* `PREFIX=ghcr.io/myaccount BUILD_AMD64=1 BUILD_ARM32=0 BUILD_ARM64=0 make akri`: builds all of the Akri containers for AMD64 and stores them in a container registry, `ghcr.io/myaccount`. +* `PREFIX=ghcr.io/myaccount make akri-controller`: builds the Akri controller container for all platforms and stores them in a container registry, `ghcr.io/myaccount`. + +### make targets +For each component, there will be a common set of targets: +* `akri-`: this target will cross-build Akri and build+push this component's container for all platforms +* `akri-docker-`: this target will build+push this component's container for all platforms +* `-build`: this target will build this component's container for all platforms +* `-build-amd64`: this target will build this component's container for amd64 +* `-build-arm32`: this target will build this component's container for arm32 +* `-build-arm64`: this target will build this component's container for arm64 +* `-docker-per-arch`: this target will push this component's container for all platforms +* `-docker-per-arch-amd64`: this target will push this component's container for amd64 +* `-docker-per-arch-arm32`: this target will push this component's container for arm32 +* `-docker-per-arch-arm64`: this target will push this component's container for arm64 +* `-docker-multi-arch-create`: this target will create a multi-arch manifest for this component and include all platforms +* `-docker-multi-arch-push`: this target will push a multi-arch manifest for this component + +### Adding a new component +To add a new Rust-based component, follow these steps (substituting the new component name for ``): +1. Add `$(eval $(call add_rust_targets,,))` to `build/akri-containers.mk` +1. Create `build/Dockerfile.` + > A simple way to do this is to copy `build/Dockerfile.agent` and replace `agent` with whatever `` is. +1. Create `.github/workflows/build--container.yml` + > A simple way to do this is to copy `.github/workflows/build-agent-container.yml` and replace `agent` with whatever `` is. + + +## Intermediate components +These are the intermediate components: +* [rust-crossbuild](https://github.com/orgs/deislabs/packages/container/package/akri%2Frust-crossbuild) +* [opencvsharp-build](https://github.com/orgs/deislabs/packages/container/package/akri%2Fopencvsharp-build) + +### rust-crossbuild +This container is used by the Akri cross-build process. The main purpose of these containers is to provide `Cargo cross` with a Rust build environment that has all the required dependencies installed. This container can be built locally for all platforms using this command: +```bash +BUILD_AMD64=1 BUILD_ARM32=1 BUILD_AMD64=1 make rust-crossbuild +``` + +If a change needs to be made to this container, 2 pull requests are needed. +1. Create PR with desired `rust-crossbuild` changes (new dependencies, etc) AND update `BUILD_RUST_CROSSBUILD_VERSION` in `build/intermediate-containers.mk`. This PR is intended to create the new version of `rust-crossbuild` (not to use it). +1. After 1st PR is merged and the new version of `rust-crossbuild` is pushed to ghcr.io/akri, create PR with any changes that will leverage the new version of `rust-crossbuild` AND update `Cross.toml` (the `BUILD_RUST_CROSSBUILD_VERSION` value specified in step 1 should be each label's suffix). This PR is intended to **use** the new version of `rust-crossbuild`. + +### opencvsharp-build +This container is used by the [onvif-video-broker](https://github.com/orgs/deislabs/packages/container/package/akri%2Fonvif-video-broker) as part of its build process. The main purpose of this container is to prevent each build from needing to build the OpenCV C# platform. This container can be built locally for all platforms using this command: +```bash +BUILD_AMD64=1 BUILD_ARM32=1 BUILD_AMD64=1 make opencv-base +``` + +If a change needs to be made to this container, 2 pull requests are needed. +1. Create PR with desired `opencvsharp-build` changes (new dependencies, etc) AND update `BUILD_OPENCV_BASE_VERSION` in `build/intermediate-containers.mk`. This PR is intended to create the new version of `opencvsharp-build` (not to use it). +1. After 1st PR is merged and the new version of `opencvsharp-build` is pushed to ghcr.io/akri, create PR with any changes that will leverage the new version of `opencvsharp-build` AND update `USE_OPENCV_BASE_VERSION` in `build/akri-containers.mk`. This PR is intended to **use** the new version of `opencvsharp-build`. + + +## Automated builds usage +The automated CI builds essentially run these commands, where `` is one of (`controller`|`agent`|`udev`|`webhook-configuration`|`onvif`|`opcua-monitoring`|`anomaly-detection`|`streaming`) and `` is one of (`amd64`|`arm32`|`arm64`): +```bash +# Install the Rust cross building tools +make install-cross +# Cross-builds Rust code for specified platform +make akri-cross-build- +# Cross-builds Rust code for specified component and platform +make -build- +# Create container for specified component and platform using versioned label +LABEL_PREFIX="v$(cat version.txt)-dev" make -build- +# Create container for specified component and platform using latest label +LABEL_PREFIX=`latest-dev` make -build- + +PREFIX=`ghcr.io/deislabs` +# Push container for specified component and platform with versioned label to container registry +LABEL_PREFIX="v$(cat version.txt)-dev" make -docker-per-arch- +# Push container for specified component and platform with latest label to container registry +LABEL_PREFIX=`latest-dev` make -docker-per-arch- + +DOCKER_CLI_EXPERIMENTAL=`enabled` +PREFIX=`ghcr.io/deislabs` +# Create manifest for multi-arch versioned container +LABEL_PREFIX="v$(cat version.txt)-dev" make -docker-multi-arch-create +# Push manifest for multi-arch versioned container +LABEL_PREFIX="v$(cat version.txt)-dev" make -docker-multi-arch-push +# Create manifest for multi-arch latest container +LABEL_PREFIX=`latest-dev` make -docker-multi-arch-create +# Push manifest for multi-arch latest container +LABEL_PREFIX=`latest-dev` make -docker-multi-arch-push + +``` + +## Build and run Akri without a Container Registry +For development and/or testing, it can be convenient to run Akri without a Container Registry. For example, the Akri CI tests that validate pull requests build Akri components locally, store the containers only in local docker, and configure Helm to only use the local docker containers. + +There are two steps to this. For the sake of this demonstration, only the amd64 version of the agent and controller will be built, but this method can be extended to any and all components: +1. Build: + ```bash + # Only build AMD64 + BUILD_AMD64=1 + # PREFIX can be anything, as long as it matches what is specified in the Helm command + PREFIX=no-container-registry + # LABEL_PREFIX can be anything, as long as it matches what is specified in the Helm command + LABEL_PREFIX=dev + # Build the Rust code + make akri-build + # Build the controller container locally for amd64 + make controller-build-amd64 + # Build the agent container locally for amd64 + make agent-build-amd64 + ``` +2. Runtime + ```bash + # Specify pullPolicy as Never + # Specify repository as $PREFIX/ + # Specify tag as $LABEL_PREFIX-amd64 + helm install akri ./deployment/helm \ + --set agent.image.pullPolicy=Never \ + --set agent.image.repository="$PREFIX/agent" \ + --set agent.image.tag="$LABEL_PREFIX-amd64" \ + --set controller.image.pullPolicy=Never \ + --set controller.image.repository="$PREFIX/controller" \ + --set controller.image.tag="$LABEL_PREFIX-amd64" + ``` + diff --git a/docs/contributing.md b/docs/contributing.md index 2601023fc..5cde9ce8b 100644 --- a/docs/contributing.md +++ b/docs/contributing.md @@ -7,6 +7,7 @@ Akri utilizes a variety of technologies, and different background knowledge is m - The Akri Controller and Agent are written in the [Rust programming language](https://www.rust-lang.org/learn). - All of Akri's components run on Linux, so you will need to set up an Ubuntu VM if you do not already have a Linux environment. - Sample brokers and end applications can be written in any language and are individually containerized. +- [Discovery handlers](./agent-in-depth#resource-discovery) can be written in any language and can be deployed in their own Pods. However, if you would like your discovery handler to be embedded in the Akri Agent Pods, it must be written in Rust. - We use Docker to build our [containers](https://www.docker.com/resources/what-container). ## How do I get started developing? @@ -21,7 +22,7 @@ Contributions can be made by forking the repository and creating a pull request. See the [Development](./development.md) documentation for more information on how to set up your environment and build Akri components locally. ## Versioning -We follow the [SymVer](https://semver.org/) versioning strategy: [MAJOR].[MINOR].[PATCH]. Our current version can be found in `./version.txt`. +We follow the [SymVer](https://semver.org/) versioning strategy: [MAJOR].[MINOR].[PATCH]. Our current version can be found in `./version.txt`. * For non-breaking bug fixes and small code changes, [PATCH] should be incremented. This can be accomplished by running `version.sh -u -p` * For non-breaking feature changes, [MINOR] should be incremented. This can be accomplished by running `version.sh -u -n` @@ -29,6 +30,18 @@ We follow the [SymVer](https://semver.org/) versioning strategy: [MAJOR].[MINOR] To ensure that all product versioning is consistent, our CI builds will execute `version.sh -c` to check all known instances of version in our YAML, TOML, and code. This will also check to make sure that version.txt has been changed. If a pull request is needed where the version should not be changed, include `[SAME VERSION]` in the pull request title. +> Note for MacOS users: `version.sh` uses the GNU `sed` command under-the-hood, but MacOS has built-in its own version. We recommend installing the GNU version via `brew install gnu-sed`. Then follow the brew instructions on how to use the installed GNU `sed` instead of the MacOS one. + +## Logging +Akri follows similar logging conventions as defined by the [Tracing crate](https://docs.rs/tracing/0.1.22/tracing/struct.Level.html). When adding logging to new code, follow the verbosity guidelines. +| verbosity | when to use? | +|---|---| +| error | Unrecoverable fatal errors | +| warn | Unexpected errors that may/may not lead to serious problems | +| info | Useful information that provides an overview of the current state of things (ex: config values, state change) | +| debug | Verbose information for high-level debugging and diagnoses of issues | +| trace | Extremely verbose information for developers of Akri | + ## PR title flags Akri's workflows check for three flags in the titles of PRs in order to decide whether to execute certain checks. @@ -45,4 +58,4 @@ Most contributions require you to agree to a Contributor License Agreement (CLA) When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. ## Code of Conduct -Participation in the Akri community is governed by the [Code of Conduct](../CODE_OF_CONDUCT.md). \ No newline at end of file +Participation in the Akri community is governed by the [Code of Conduct](../CODE_OF_CONDUCT.md). diff --git a/docs/customizing-akri-installation.md b/docs/customizing-akri-installation.md index 6b215d093..24739deeb 100644 --- a/docs/customizing-akri-installation.md +++ b/docs/customizing-akri-installation.md @@ -1,34 +1,33 @@ # Customizing an Akri Installation -The [ONVIF](./onvif-configuration.md), [udev](./udev-configuration.md), and [OPC UA](./opcua-configuration.md) documentation explains how to deploy Akri for a specific -protocol Configuration using Helm (more information about the Akri Helm charts can be found in the [user guide](./user-guide.md#understanding-akri-helm-charts)). This documentation elaborates upon them, covering the following: +The [ONVIF](./onvif-configuration.md), [udev](./udev-configuration.md), and [OPC UA](./opcua-configuration.md) Configurations documentation explains how to deploy Akri and utilize a specific +Discovery Handler using Helm (more information about the Akri Helm charts can be found in the [user guide](./user-guide.md#understanding-akri-helm-charts)). This documentation elaborates upon them, covering the following: 1. Starting Akri without any Configurations -1. Generating, modifying and applying a custom Configuration +1. Generating, modifying and applying a Configuration 1. Deploying multiple Configurations 1. Modifying a deployed Configuration 1. Adding another Configuration to a cluster 1. Deleting a Configuration from a cluster +1. Applying Discovery Handlers ## Starting Akri without any Configurations To install Akri without any protocol Configurations, run this: ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ -helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true +helm install akri akri-helm-charts/akri ``` -This will start the Akri controller and deploy Akri Agents. +This will deploy the Akri Controller and deploy Akri Agents. -## Generating, modifying and applying a custom Configuration -Helm allows us to parametrize the commonly modified fields in our configuration files and we have provided many (to see -them, run `helm inspect values akri-helm-charts/akri`). For more advanced configuration changes that are not aided by +## Generating, modifying and applying a Configuration +Helm allows us to parametrize the commonly modified fields in our Configuration templates and we have provided many (to see +them, run `helm inspect values akri-helm-charts/akri`). For more advanced Configuration changes that are not aided by our Helm chart, we suggest creating a Configuration file using Helm and then manually modifying it. For example, to create an ONVIF Configuration file, run the following. (To instead create a udev Configuration, -substitute `onvif.enabled` with `udev.enabled` and add a udev rule. For OPC UA, substitute with `opcua.enabled`.) +substitute `onvif.configuration.enabled` with `udev.configuration.enabled` and add a udev rule. For OPC UA, substitute with `opcua.configuration.enabled`.) ```bash helm template akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set onvif.enabled=true \ - --set onvif.brokerPod.image.repository=nginx \ + --set onvif.configuration.enabled=true \ + --set onvif.configuration.brokerPod.image.repository=nginx \ --set rbac.enabled=false \ --set controller.enabled=false \ --set agent.enabled=false > configuration.yaml @@ -46,10 +45,9 @@ installed from the start with both the ONVIF and udev Configurations like so: ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set onvif.enabled=true \ - --set udev.enabled=true \ - --set udev.udevRules[0]='KERNEL=="video[0-9]*"' + --set onvif.configuration.enabled=true \ + --set udev.configuration.enabled=true \ + --set udev.configuration.discoveryDetails.udevRules[0]='KERNEL=="video[0-9]*"' ``` Note: You must specify a udev rule to successfully build the udev Configuration. @@ -57,6 +55,21 @@ You can confirm that both a `akri-onvif` and `akri-udev` Configuration have been ``` bash kubectl get akric ``` +Each Configuration could also have been deployed via separate Helm installations: +```bash +helm install udev-config akri-helm-charts/akri \ + --set controller.enabled=false \ + --set agent.enabled=false \ + --set rbac.enabled=false \ + --set udev.configuration.enabled=true \ + --set udev.configuration.discoveryDetails.udevRules[0]='KERNEL=="video[0-9]*"' + +helm install onvif-config akri-helm-charts/akri \ + --set controller.enabled=false \ + --set agent.enabled=false \ + --set rbac.enabled=false \ + --set onvif.configuration.enabled=true +``` ## Modifying a deployed Configuration An already deployed Configuration can be modified in one of two ways: @@ -71,13 +84,13 @@ say an IP camera with IP address 10.0.0.1 is malfunctioning and should be filter command could be run: ```bash helm upgrade akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set onvif.enabled=true \ - --set onvif.brokerPod.image.repository= \ - --set onvif.ipAddresses.action=Exclude \ - --set onvif.ipAddresses.items[0]=10.0.0.1 + --set onvif.configuration.enabled=true \ + --set onvif.configuration.brokerPod.image.repository= \ + --set onvif.configuration.brokerPod.image.tag= \ + --set onvif.configuration.discoveryDetails.ipAddresses.action=Exclude \ + --set onvif.configuration.discoveryDetails.ipAddresses.items[0]=10.0.0.1 ``` -Note that the command is not simply `helm upgrade --set onvif.ipAddresses.items[0]=10.0.0.1`; rather, it includes +Note that the command is not simply `helm upgrade --set onvif.configuration.discoveryDetails.ipAddresses.items[0]=10.0.0.1`; rather, it includes all the old settings along with the new one. Also, note that we assumed you specified a broker pod image in your original installation command, so that brokers were deployed to utilize discovered cameras. Helm will create a new ONVIF Configuration and apply it to the cluster. @@ -97,7 +110,6 @@ spec: containers: - name: akri-onvif-video-broker image: "ghcr.io/deislabs/akri/onvif-video-broker:latest-dev" - imagePullPolicy: Always resources: limits: "{{PLACEHOLDER}}" : "1" @@ -114,7 +126,6 @@ spec: containers: - name: akri-onvif-video-broker image: "ghcr.io/deislabs/akri/onvif-video-broker:latest-dev" - imagePullPolicy: Always resources: requests: memory: 30Mi @@ -149,35 +160,44 @@ Note: the simple properties of `instanceServiceSpec` and `configurationServiceSp set using Helm's `--set` command (`--set onvif.instanceService.targetPort=90`). ## Adding another Configuration to a cluster -Another Configuration can be added to an existing Akri installation using `helm upgrade` or manually using `helm -template` and kubectl. +Another Configuration can be added to an existing Akri installation using `helm upgrade` or via a new Helm installation. ### Adding additional Configurations using `helm upgrade` Another Configuration can be added to the cluster by using `helm upgrade`. For example, if you originally installed just the ONVIF Configuration and now also want to discover local cameras via udev, as well, simply run the following: ```bash helm upgrade akri akri-helm-charts/akri \ - --set useLatestContainers=true \ --set onvif.enabled=true \ --set udev.enabled=true \ --set udev.udevRules[0]='KERNEL=="video[0-9]*"' ``` -### Adding additional Configurations manually -An additional Configuration can also be added to an existing Akri installation using the same process of using `helm -template` to generate a Configuration and then using kubectl to apply it as in the ["Generating, modifying, and applying -a Configuration yaml"](#generating-modifying-and-applying-a-custom-configuration) section above. +### Adding additional Configurations via new Helm installations +The udev Configuration could also have been applied via a new Helm installation like so: +```bash +helm install udev-config akri-helm-charts/akri \ + --set controller.enabled=false \ + --set agent.enabled=false \ + --set rbac.enabled=false \ + --set udev.configuration.enabled=true \ + --set udev.configuration.discoveryDetails.udevRules[0]='KERNEL=="video[0-9]*"' +``` ## Deleting a Configuration from a cluster If an operator no longer wants Akri to discover devices defined by a Configuration, they can delete the Configuration -and all associated broker pods will automatically be brought down. This can be done with `helm upgrade` or kubectl. +and all associated broker pods will automatically be brought down. This can be done with `helm upgrade`, `helm delete`, or kubectl. ### Deleting a Configuration using `helm upgrade` A Configuration can be deleted from a cluster using `helm upgrade`. For example, if both ONVIF and udev Configurations have been installed in a cluster, the udev Configuration can be deleted by only specifying the ONVIF Configuration in a `helm upgrade` command like the following: ```bash helm upgrade akri akri-helm-charts/akri \ - --set useLatestContainers=true \ --set onvif.enabled=true ``` +### Deleting a Configuration using `helm delete` +If the Configuration was applied in its own Helm installation (named `udev-config` in this example), the Configuration can be +deleted by deleting the installation. +```bash +helm delete udev-config +``` ### Deleting a Configuration using kubectl A configuration can also be deleted using kubectl. To list all applied Configurations, run `kubectl get akric`. If both udev and ONVIF Configurations have been applied with capacities of 5. The output should look like the following: @@ -189,4 +209,20 @@ akri-udev 5 16m To delete the ONVIF Configuration and bring down all ONVIF broker pods, run: ```bash kubectl delete akric akri-onvif -``` \ No newline at end of file +``` +## Installing Discovery Handlers +The Agent discovers devices via Discovery Handlers. Akri supports an Agent image that includes all supported Discovery Handlers. +This Agent will be used if `agent.full=true`, like so: +```bash +helm install akri akri-helm-charts/akri \ + --set agent.full=true +``` + +By default, a slim Agent without any embedded Discovery Handlers is deployed and the required Discovery Handlers can be deployed as DaemonSets by specifying +`.discovery.enabled=true` when installing Akri. For example, Akri is installed with the OPC UA and ONVIF Discovery Handlers like so: +```bash +helm install akri akri-helm-charts/akri \ + --set opcua.discovery.enabled=true \ + --set onvif.discovery.enabled=true +``` + diff --git a/docs/debug-echo-configuration.md b/docs/debug-echo-configuration.md new file mode 100644 index 000000000..86467a1d3 --- /dev/null +++ b/docs/debug-echo-configuration.md @@ -0,0 +1,157 @@ +# Debugging Akri using the Debug Echo Discovery Handler and Configuration +## Background +In order to kick start using and debugging Akri, a "debug echo" Discovery Handler has been created. The Discovery +Handler "discovers" all devices listed in the `descriptions` array in the `discoveryDetails` of a Debug Echo +configuration. Devices are visible to the Discovery Handler so long as the word "OFFLINE" does not exist in the file +`/tmp/debug-echo-availability.txt` in the Pod in which the Discovery Handler is running. + +## Deploying the Debug Echo Discovery Handler +In order for the Agent to know how to discover Debug Echo devices, the Debug Echo Discovery Handler must exist. Akri +supports an Agent image that includes all supported Discovery Handlers. This Agent will be used if `agent.full=true`. By +default, a slim Agent without any embedded Discovery Handlers is deployed and the required Discovery Handlers can be +deployed as DaemonSets. This documentation will use that strategy, deploying Debug Echo Discovery Handlers by specifying +`debugEcho.discovery.enabled=true` when installing Akri. Notes are provided for how the steps change if using embedded +Discovery Handlers. + +Since the Debug Echo Discovery Handler is for debugging, its use must be explicitly enabled by setting +`agent.allowDebugEcho=true`. + +## Quickstart +### Installation +To install Akri with **external** Debug Echo Discovery Handlers and a Configuration to discover unshared Debug Echo +devices, run: +```bash +helm repo add akri-helm-charts https://deislabs.github.io/akri/ +helm install akri akri-helm-charts/akri \ + --set agent.allowDebugEcho=true \ + --set debugEcho.discovery.enabled=true \ + --set debugEcho.configuration.enabled=true \ + --set debugEcho.configuration.shared=false +``` + +> Note: To instead install Akri with Debug Echo Discovery Handlers **embedded** in the Agent, set `agent.full=true` and +> remove `debugEcho.discovery.enabled=true` like in the following installation: +>```bash +>helm repo add akri-helm-charts https://deislabs.github.io/akri/ +>helm install akri akri-helm-charts/akri \ +> --set agent.allowDebugEcho=true \ +> --set agent.full=true \ +> --set debugEcho.configuration.enabled=true \ +> --set debugEcho.configuration.shared=false +>``` + +By default, the Debug Echo Configuration discovers two devices, `foo1` and `foo2`, and automatically deploys an empty +nginx broker Pod to each discovered device, so you should see two instances and two brokers created as a result of your +installation. By default, it also creates an Instance service for each device and a Configuration service for all +discovered devices. The Akri Agents, Controller, and (if using external Discovery Handlers) Debug Echo Discovery +Handlers should also be created. + +```sh +watch kubectl get pods,akric,akrii,services -o wide +``` + +Set `debugEcho.configuration.shared=true` to discover Debug Echo devices that are shared by all nodes. For example, when +Akri is installed like above with `debugEcho.configuration.shared=false` onto a 3 node cluster. 6 Debug Echo devices +will be discovered and 6 Instances will be created, 2 for each Node. However, if `debugEcho.configuration.shared=true` +is set, only 2 will be discovered as it is mocking all 3 nodes "utilizing" the same two devices. Set +`debugEcho.configuration.capacity=3` to allow all 3 nodes to receive brokers to utilize each of the shared devices. It +defaults to `1`. + +### Marking Devices "OFFLINE" +Debug Echo devices are "unplugged"/"disconnected" by writing `"OFFLINE"` into the `/tmp/debug-echo-availability.txt` +file inside the pod in which the Discovery Handler is running. + +By default, Debug Echo Discovery Handlers run in their own Pods, so exec into each to mark the devices offline. For +a single node cluster: +```sh +DEBUG_ECHO_DH_POD_NAME=$(kubectl get pods --selector=name=akri-debug-echo-discovery | grep akri | awk '{print $1}') +kubectl exec -i $DEBUG_ECHO_DH_POD_NAME -- /bin/sh -c "echo "OFFLINE" > /tmp/debug-echo-availability.txt" +``` +>Note: `shared` devices have a 5 minute grace period before their instances are deleted, as they are more often network +>devices prone to intermittent connectivity. + +>Note: For multi-node clusters, each Agent or Debug Echo Discovery Handler must be `exec`ed into. + +> Note: If `agent.full=true` was specified when installing Akri, the Debug Echo Discovery Handlers run inside the Agent, +> so exec into each Agent to mark the devices offline. For a single node cluster: +> ```sh +> AGENT_POD_NAME=$(kubectl get pods --selector=name=akri-agent | grep akri | awk '{print $1}') +> kubectl exec -i $AGENT_POD_NAME -- /bin/sh -c "echo "OFFLINE" > /tmp/debug-echo-availability.txt" +> ``` + +Caveat: **Debug Echo devices likely should not be marked as shared for multi-node clusters**. This is because the +contents of `/tmp/debug-echo-availability.txt` could be different for each node. If one node marks a device as "OFFLINE" +but another does not, there is inconsistency around the existence of the device. However, this may be a scenario you +want to consider or test. + +### Marking Devices "ONLINE" +Debug Echo devices are "plugged in"/"reconnected" by removing `"OFFLINE"` from the `/tmp/debug-echo-availability.txt` +file inside the pod in which the Discovery Handler is running. The commands below replace the file contents with +`"ONLINE"`. + +By default, Debug Echo Discovery Handlers run in their own Pods, so exec into each to mark the devices offline. For +a single node cluster: +```sh +DEBUG_ECHO_DH_POD_NAME=$(kubectl get pods --selector=name=akri-debug-echo-discovery | grep akri | awk '{print $1}') +kubectl exec -i $DEBUG_ECHO_DH_POD_NAME -- /bin/sh -c "echo "ONLINE" > /tmp/debug-echo-availability.txt" +``` + +>Note: For multi-node clusters, each Agent or Debug Echo Discovery Handler must be `exec`ed into. + +> Note: If `agent.full=true` was specified when installing Akri, the Debug Echo Discovery Handlers run inside the Agent, +> so exec into each Agent to mark the devices offline. For a single node cluster: +> ```sh +> AGENT_POD_NAME=$(kubectl get pods --selector=name=akri-agent | grep akri | awk '{print $1}') +> kubectl exec -i $AGENT_POD_NAME -- /bin/sh -c "echo "ONLINE" > /tmp/debug-echo-availability.txt" +> ``` + +## In the Weeds: Debug Echo Configuration Settings + +## Discovery Handler Discovery Details Settings +Discovery Handlers are passed discovery details that are set in a Configuration to determine what to discover, filter +out of discovery, and so on. The Debug Echo Discovery Handler simply "discovers" a device for each string in +`discoveryDetails.descriptions` in a Configuration. + +| Helm Key | Value | Default | Description | +|---|---|---|---| +| debugEcho.configuration.discoveryDetails.description | array of arbitrary Strings | ["foo1", "foo2"] | Names for fake devices that will be discovered | + +### Broker Pod Settings +By default, brokers are deployed to discovered Debug Echo devices. Set +`debugEcho.configuration.brokerPod.image.repository=""` to not deploy broker Pods: + +| Helm Key | Value | Default | Description | +|---|---|---|---| +| debugEcho.configuration.brokerPod.image.repository | image string | nginx | image of broker Pod that should be deployed to discovered devices | +| debugEcho.configuration.brokerPod.image.tag | tag string | "latest" | image tag of broker Pod that should be deployed to discovered devices | + +### Disabling Automatic Service Creation +By default, if a broker Pod is specified, the Debug Echo Configuration will create services for all the brokers of a +specific Akri Instance and all the brokers of an Akri Configuration. The creation of these services can be disabled: + +| Helm Key | Value | Default | Description | +|---|---|---|---| +| debugEcho.configuration.createInstanceServices | true, false | true | a service should be automatically created for each broker Pod | +| debugEcho.configuration.createConfigurationService | true, false | true | a single service should be created for all brokers of a Configuration | + +### Capacity Setting +By default, if a broker Pod is specified, a single broker Pod is deployed to each device. To modify the Configuration so +that an OPC UA server is accessed by more or fewer nodes via broker Pods, update the `debugEcho.configuration.capacity` +setting to reflect the correct number. For example, if your high availability needs are met by having 1 redundant pod, +you can update the Configuration by setting `debugEcho.configuration.capacity=2`: + +| Helm Key | Value | Default | Description | +|---|---|---|---| +| debugEcho.configuration.capacity | number | 1 | maximum number of brokers that can be deployed to utilize a device (up to 1 per Node) | + +## Modifying a Configuration +Akri provides further documentation on [modifying the broker +PodSpec](./customizing-akri-installation.md#modifying-the-brokerpodspec), [instanceServiceSpec, or +configurationServiceSpec](./customizing-akri-installation.md#modifying-instanceservicespec-or-configurationservicespec) +More information about how to modify an installed Configuration, add additional Configurations to a cluster, or delete a +Configuration can be found in the [Customizing an Akri Installation document](./customizing-akri-installation.md). + +## Implementation details +The DebugEcho implementation can be understood by looking at its [Discovery +Handler](../discovery-handlers/debug-echo/src/discovery_handler.rs), which contains the `DebugEchoDiscoveryDetails` +struct, which describes the expected format of a Configuration's `DiscoveryDetails`. diff --git a/docs/development.md b/docs/development.md index 416dd9d61..e0dbeb814 100644 --- a/docs/development.md +++ b/docs/development.md @@ -1,12 +1,14 @@ # Development -This document will walk you through how to set up a local development environment, build Akri component containers, and test Akri using your newly built containers. +This document will walk you through how to set up a local development environment, build Akri component containers, and test Akri using your newly built containers. + +The document includes [naming guidelines](#naming-guidelines) to help as you extend Akri. ## Required Tools To develop, you'll need: - A Linux environment whether on amd64 or arm64v8 -- Rust - version 1.41.0 which the build system uses can be installed using: +- Rust - version 1.51.0 which the build system uses can be installed using: ```sh - sudo curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain=1.41.0 + sudo curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain=1.51.0 cargo version ``` - .NET - the ONVIF broker is written in .NET, which can be installed according to [.NET instructions](https://docs.microsoft.com/dotnet/core/install/linux-ubuntu) @@ -48,9 +50,46 @@ There are unit tests for all of the Rust code. To run all unit tests, simply na To locally run the controller as part of a k8s cluster, follow these steps: -1. Create or provide access to a valid cluster configuration by setting KUBECONFIG (can be done in the commandline) ... for the sake of this, the config is assumed to be in ~/test.cluster.config -1. Build the repo by running `cargo build` -1. Run the desired component (in this case, looking at info-level logging and running the controller locally): `RUST_LOG=info KUBECONFIG=~/test.cluster.config ./target/debug/controller` +1. Create or provide access to a valid cluster configuration by setting KUBECONFIG (can be done in the commandline) ... + for the sake of this, the config is assumed to be in ~/test.cluster.config +1. Build the repo with all default features by running `cargo build` + > Note: By default, the Agent does not have embedded Discovery Handlers. To allow embedded Discovery Handlers in the + > Agent, turn on the `agent-full` feature and the feature for each Discovery Handler you wish to embed -- Debug echo + > is always included if `agent-full` is turned on. For example, to build an Agent with OPC UA, ONVIF, udev, and + > debug echo Discovery Handlers: `cargo build --manifest-path agent/Cargo.toml --features "agent-full udev-feat + > opcua-feat onvif-feat"`. +1. Run the desired component + + Run the **Controller** locally with info-level logging: `RUST_LOG=info KUBECONFIG=~/test.cluster.config + ./target/debug/controller` + + Run the **Agent** locally with info-level logging: + ```sh + sudo DEBUG_ECHO_INSTANCES_SHARED=true ENABLE_DEBUG_ECHO=1 RUST_LOG=info KUBECONFIG=~/test.cluster.config DISCOVERY_HANDLERS_DIRECTORY=~/tmp/akri AGENT_NODE_NAME=myNode HOST_CRICTL_PATH=/usr/bin/crictl HOST_RUNTIME_ENDPOINT=/var/run/dockershim.sock HOST_IMAGE_ENDPOINT=/var/run/dockershim.sock ./target/debug/agent + ``` + > Note: The environment variables `HOST_CRICTL_PATH`, `HOST_RUNTIME_ENDPOINT`, and `HOST_IMAGE_ENDPOINT` are for + > slot-reconciliation (making sure Pods that no longer exist are not still claiming Akri resources). The values of + > these vary based on Kubernetes distribution. The above is for vanilla Kubernetes. For MicroK8s, use + > `HOST_CRICTL_PATH=/usr/local/bin/crictl HOST_RUNTIME_ENDPOINT=/var/snap/microk8s/common/run/containerd.sock + > HOST_IMAGE_ENDPOINT=/var/snap/microk8s/common/run/containerd.sock` and for K3s, use + > `HOST_CRICTL_PATH=/usr/local/bin/crictl HOST_RUNTIME_ENDPOINT=/run/k3s/containerd/containerd.sock + > HOST_IMAGE_ENDPOINT=/run/k3s/containerd/containerd.sock`. + + To run **Discovery Handlers** locally, simply navigate to the Discovery Handler under + `akri/discovery-handler-modules/` and run privileged using `cargo run`, setting where the Discovery Handler socket should be + created in the `DISCOVERY_HANDLERS_DIRECTORY` variable. For example, to run the ONVIF Discovery Handler locally: + ```sh + cd akri/discovery-handler-modules/onvif-discovery-handler/ + sudo -s + RUST_LOG=info DISCOVERY_HANDLERS_DIRECTORY=~/tmp/akri AGENT_NODE_NAME=myNode cargo run + ``` + To run the [debug echo Discovery Handler](#testing-with-debug-echo-discovery-handler), an environment variable, + `DEBUG_ECHO_INSTANCES_SHARED`, must be set to specify whether it should register with the Agent as discovering + shared or unshared devices. Run the debug echo Discovery Handler to discover mock unshared devices like so: + ```sh + cd akri/discovery-handler-modules/debug-echo-discovery-handler/ + RUST_LOG=info DEBUG_ECHO_INSTANCES_SHARED=false DISCOVERY_HANDLERS_DIRECTORY=~/tmp/akri AGENT_NODE_NAME=myNode cargo run + ``` ### To build containers `Makefile` has been created to help with the more complicated task of building the Akri components and containers for the various supported platforms. @@ -111,12 +150,15 @@ By default, `Makefile` will try to create containers with tag following this for PREFIX=$CONTAINER_REPOSITORY make akri # To make a specific component: PREFIX=$CONTAINER_REPOSITORY make akri-controller -PREFIX=$CONTAINER_REPOSITORY make akri-agent PREFIX=$CONTAINER_REPOSITORY make akri-udev PREFIX=$CONTAINER_REPOSITORY make akri-onvif PREFIX=$CONTAINER_REPOSITORY make akri-opcua-monitoring PREFIX=$CONTAINER_REPOSITORY make akri-anomaly-detection PREFIX=$CONTAINER_REPOSITORY make akri-streaming +PREFIX=$CONTAINER_REPOSITORY make akri-agent +# To make an Agent with embedded Discovery Handlers, turn on the `agent-full` feature along with the +# feature for any Discovery Handlers that should be embedded. +PREFIX=$CONTAINER_REPOSITORY BUILD_SLIM_AGENT=0 AGENT_FEATURES="agent-full onvif-feat opcua-feat udev-feat" make akri-agent-full # To make a specific component on specific platform(s): PREFIX=$CONTAINER_REPOSITORY BUILD_AMD64=1 BUILD_ARM32=1 BUILD_ARM64=1 make akri-streaming @@ -135,6 +177,9 @@ source /home/$SUDO_USER/.cargo/env exit ``` +#### More information about Akri build +For more detailed information about the Akri build infrastructure, review the [Akri build infrastructure document](./building.md) + ## Install Akri with newly built containers When installing Akri using helm, you can set the `imagePullSecrets`, `image.repository` and `image.tag` [Helm values](../deployment/helm/values.yaml) to point to your newly created containers. For example, to install Akri with with custom Controller and Agent containers, run the following, specifying the `image.tag` version to reflect [version.txt](../version.txt): ```bash @@ -175,3 +220,57 @@ helm get manifest akri | less ### Helm Upgrade To modify an Akri installation to reflect a new state, you can use [`helm upgrade`](https://helm.sh/docs/helm/helm_upgrade/). See the [Customizing an Akri Installation document](./customizing-akri-installation.md) for further explanation. + +## Testing with Debug Echo Discovery Handler +In order to kickstart using and debugging Akri, a debug echo Discovery Handler has been created. See its +[documentation](./debug-echo-configuration.md) to start using it. + +## Naming Guidelines + +One of the [two hard things](https://martinfowler.com/bliki/TwoHardThings.html) in Computer Science is naming things. It is proposed that Akri adopt naming guidelines to make developers' lives easier by providing consistency and reduce naming complexity. + +Akri existed before naming guidelines were documented and may not employ the guidelines summarized here. However, it is hoped that developers will, at least, consider these guidelines when extending Akri. + +### General Principles + ++ Akri uses English ++ Akri is written principally in Rust, and Rust [naming](https://rust-lang.github.io/api-guidelines/naming.html) conventions are used ++ Types need not be included in names unless ambiguity would result ++ Shorter, simpler names are preferred + +### Akri Discovery Handlers + +Various Discovery Handlers have been developed: `debug_echo`, `onvif`, `opcua`, `udev` + +Guidance: + ++ `snake_case` names ++ (widely understood) initializations|acronyms are preferred + +### Akri Brokers + +Various Brokers have been developed: `onvif-video-broker`, `opcua-monitoring-broker`, `udev-video-broker` + +Guidance: + ++ Broker names should reflect Discovery Handler (Protocol) names and be suffixed `-broker` ++ Use Programming language-specific naming conventions when developing Brokers in non-Rust languages + +> **NOTE** Even though the initialization of [ONVIF](https://en.wikipedia.org/wiki/ONVIF) includes "Video", the specification is broader than video and the broker name adds specificity by including the word (`onvif-video-broker`) in order to effectively describe its functionality. + +### Kubernetes Resources + +Various Kubernetes Resources have been developed: + ++ CRDS: `Configurations`, `Instances` ++ Instances: `akri-agent-daemonset`, `akri-controller-deployment`, `akri-onvif`, `akri-opcua`, `akri-udev` + +Guidance: + ++ Kubernetes Convention is that resources (e.g. `DaemonSet`) and CRDs use (upper) CamelCase ++ Akri Convention is that Akri Kubernetes resources be prefixed `akri-`, e.g. `akri-agent-daemonset` ++ Names combining words should use hypens (`-`) to separate the words e.g. `akri-debug-echo` + +> **NOTE** `akri-agent-daemonset` contradicts the general principle of not including types, if it had been named after these guidelines were drafted, it would be named `akri-agent`. +> +> Kubernetes' resources are strongly typed and the typing is evident through the CLI e.g. `kubectl get daemonsets/akri-agent-daemonset` and through a resource's `Kind` (e.g. `DaemonSet`). Including such types in the name is redundant. diff --git a/docs/discovery-handler-development.md b/docs/discovery-handler-development.md new file mode 100644 index 000000000..e88b940c0 --- /dev/null +++ b/docs/discovery-handler-development.md @@ -0,0 +1,317 @@ +# Implementing a new Discovery Handler +Akri has [implemented discovery via several protocols](./roadmap.md#currently-supported-discovery-handlers) with sample +brokers and applications to demonstrate usage. However, there may be protocols you would like to use to discover +resources that have not been implemented as Discovery Handlers yet. To enable the discovery of resources via a new +protocol, you will implement a Discovery Handler (DH), which does discovery on behalf of the Agent. A Discovery Handler +is anything that implements the `DiscoveryHandler` service and `Registration` client defined in the [Akri's discovery gRPC +proto file](../discovery-utils/proto/discovery.proto). These DHs run as their own Pods and are expected to register with +the Agent, which hosts the `Registration` service defined in the gRPC interface. + +This document will walk you through the development steps to implement a Discovery Handler. If you would rather walk +through an example, see Akri's [extensibility demo](./extensibility.md), which walks through creating a Discovery +Handler that discovers HTTP based devices. This document will also cover the steps to get your Discovery Handler added +to Akri, should you wish to [contribute it back](./contributing.md). + +Before continuing, you may wish to reference the [Akri architecture](./architecture.md) and [Akri +agent](./agent-in-depth.md) documentation. They will provide a good understanding of Akri, how it works, and what +components it is composed of. + +A Discovery Handler can be written in any language using protobuf; however, Akri has provided a template for +accelerating the development of Rust Discovery Handlers. This document will walk through both of those options. If using the +Rust template, still read through the non-Rust section to gain context on the Discovery Handler interface. + +## Creating a Discovery Handler using Akri's Discovery Handler proto file +This section covers how to use [Akri's discovery gRPC proto file](../discovery-utils/proto/discovery.proto) to create a Discovery Handler in the +language of your choosing. It consists of three steps: +1. Registering your Discovery Handler with the Akri Agent +1. Specifying device filtering in a Configuration +1. Implementing the `DiscoveryHandler` service + +### Registering with the Akri Agent +Discovery Handlers and Agents run on each worker Node in a cluster. A Discovery Handler should register with the Agent +running on its Node at the Agent's registration socket, which defaults to `/var/lib/akri/agent-registration.sock`. The +directory can be changed when installing Akri by setting `agent.host.discoveryHandlers`. For example, to request that +the Agent's `Registration` service live at `~/akri/sockets/agent-registration.sock` set +`agent.host.discoveryHandlers=~/akri/sockets` when installing Akri. The Agent hosts the `Registration` service defined +in [Akri's discovery interface](../discovery-utils/proto/discovery.proto) on this socket. + +When registering with the Agent, a Discovery Handler specifies its name (the one that will later be specified in +Configurations), the endpoint of its Discovery Handler service, and whether the devices it discovers are shared +(visible to multiple nodes). + +```proto +message RegisterDiscoveryHandlerRequest { + // Name of the `DiscoveryHandler`. This name is specified in an + // Akri Configuration, to request devices discovered by this `DiscoveryHandler`. + string name = 1; + // Endpoint for the registering `DiscoveryHandler` + string endpoint = 2; + // Specifies the type of endpoint. + enum EndpointType { + UDS = 0; + NETWORK = 1; + } + EndpointType endpoint_type = 3; + // Specifies whether this device could be used by multiple nodes (e.g. an IP camera) + // or can only be ever be discovered by a single node (e.g. a local USB device) + bool shared = 4; +} +``` + +Also note, that a Discovery Handler must also specify an `EndpointType` of either `UDS` or `Network` in the +`RegisterDiscoveryHandlerRequest`. While Discovery Handlers must register with the Agent's `Registration` service over +UDS, a `DiscoveryHandler` service can run over UDS or an IP based endpoint. However, the current convention is to use +UDS for both registration and discovery. + + +### Specifying device filtering in a Configuration +Discovery Handlers are passed information about what subset of devices to discover from a Configuration's +`discoveryDetails`. Akri's Configuration CRD takes in [`DiscoveryHandlerInfo`](../shared/src/akri/configuration.rs), +which is defined structurally in Rust as follows: +```rust +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(rename_all = "camelCase")] +pub struct DiscoveryHandlerInfo { + pub name: String, + #[serde(default)] + pub discovery_details: String, +} +``` +When creating a Discovery Handler, you must decide what name to give it and add any details you would like your +Discovery Handler to receive in the `discovery_details` string. The Agent passes this string to Discovery Handlers as +part of a `DiscoverRequest`. A Discovery Handler must then parse this string -- Akri's built in Discovery Handlers store +an expected structure in it as serialized YAML -- to determine what to discover, filter out of discovery, and so on. + +For example, a Configuration that uses the ONVIF Discovery Handler, which allows filtering IP cameras by IP address, MAC +address, and scopes, looks like the following. +```yaml +apiVersion: akri.sh/v0 +kind: Configuration +metadata: +name: http +spec: +discoveryHandler: + name: onvif + discoveryDetails: |+ + ipAddresses: + action: Exclude + items: + - 10.0.0.1 + - 10.0.0.2 + macAddresses: + action: Exclude + items: [] + scopes: + action: Include + items: + - onvif://www.onvif.org/name/GreatONVIFCamera + - onvif://www.onvif.org/name/AwesomeONVIFCamera + discoveryTimeoutSeconds: 2 +``` +The `discoveryHandler.name` must match `RegisterDiscoveryHandlerRequest.name` the Discovery Handler uses when +registering with the Agent. Once you know what will be passed to your Discovery Handler, its time to implement the +discovery functionality. + +### Implementing the `DiscoveryHandler` service +The service should have all the functionality desired for discovering devices via your protocol and filtering for only +the desired set. Each device a Discovery Handler discovers is represented by the `Device` type, as shown in a subset of +the [discovery proto file](../discovery-utils/proto/discovery.proto) below. A Discovery Handler sets a unique `id` for +the device, device connection information that needs to be set as environment variables in Pods that request the device +in `properties`, and any mounts or devices that should be available to requesting Pods. + +```proto +message DiscoverResponse { + // List of discovered devices + repeated Device devices = 1; +} + +message Device { + // Identifier for this device + string id = 1; + // Properties that identify the device. These are stored in the device's instance + // and set as environment variables in the device's broker Pods. May be information + // about where to find the device such as an RTSP URL or a device node (e.g. `/dev/video1`) + map properties = 2; + // Optionally specify mounts for Pods that request this device as a resource + repeated Mount mounts = 3; + // Optionally specify device information to be mounted for Pods that request this device as a resource + repeated DeviceSpec device_specs = 4; +} +``` + +Note, `discover` creates a streamed connection with the Agent, where the Agent gets the receiving end of the channel and +the Discovery Handler sends device updates via the sending end of the channel. If the Agent drops its end, the Discovery +Handler should stop discovery and attempt to re-register with the Agent. The Agent may drop its end due to an error or a +deleted Configuration. + +## Creating a Discovery Handler in Rust using a template +Rust Discovery Handler development can be kick-started using Akri's [Discovery Handler template](https://github.com/kate-goldenring/akri-discovery-handler-template) and +[`cargo-generate`](https://github.com/cargo-generate/cargo-generate). Specify the name of your project. +```sh +cargo install cargo-generate +cargo generate --git https://github.com/kate-goldenring/akri-discovery-handler-template.git --name akri-discovery-handler +``` +This template abstracts away the work of registering with the Agent and creating the Discovery Handler service. All you +need to do is specify the Discovery Handler name, whether discovered devices are sharable, implement discovery, and +build the Discovery Handler. + +1. Specifying the Discovery Handler name and whether devices are sharable + + Inside the newly created `akri-discovery-handler` project, navigate to `main.rs`. It contains all the logic to register our + `DiscoveryHandler` with the Akri Agent. We only need to specify the `DiscoveryHandler` name and whether the devices + discovered by our `DiscoveryHandler` can be shared. This is the name the Discovery Handler uses when registering + with the Agent. It is later specified in a Configuration to tell the Agent which Discovery Handler to use. For + example, in Akri's [udev Discovery Handler](../discovery-handler-modules/udev-discovery-handler/src/main.rs), `name` + is set to `udev` and `shared` to `false` as all devices are locally attached to nodes. The Discovery Handler name + also resolves to the name of the socket the template serves the Discovery Handler on. +1. Implementing discovery + + A `DiscoveryHandlerImpl` Struct has been created (in `discovery_handler.rs`) that minimally + implements the `DiscoveryHandler` service. Fill in the `discover` function, which returns the list of discovered `devices`. +1. Build the Discovery Handler container + + Build your Discovery Handler and push it to your container registry. To do so, + we simply need to run this step from the base folder of the Akri repo: + ```bash + HOST="ghcr.io" + USER=[[GITHUB-USER]] + DH="discovery-handler" + TAGS="v1" + + DH_IMAGE="${HOST}/${USER}/${DH}" + DH_IMAGE_TAGGED="${DH_IMAGE}:${TAGS}" + + docker build \ + --tag=${DH_IMAGE_TAGGED} \ + --file=./Dockerfile.discovery-handler \ + . && \ + docker push ${DH_IMAGE_TAGGED} + ``` + + Save the name of your image. We will pass it into our Akri installation command when we are ready to deploy our + Discovery Handler. + +## Deploy Akri with your custom Discovery Handler +Now that you have created a Discovery Handler, deploy Akri and see how it discovers the devices and creates Akri +Instances for each Device. + +> Optional: If you've previous installed Akri and wish to reset, you may: +> +> ```bash +> # Delete Akri Helm +> sudo helm delete akri +> ``` + +Akri has provided Helm templates for custom Discovery Handlers and their Configurations. These templates are provided as +a starting point. They may need to be modified to meet the needs of a Discovery Handler. When installing Akri, specify +that you want to deploy a custom Discovery Handler as a DaemonSet by setting `custom.discovery.enabled=true`. Specify +the container for that DaemonSet as the Discovery Handler that you built +[above](###build-the-discoveryhandler-container) by setting `custom.discovery.image.repository=$DH_IMAGE` and +`custom.discovery.image.repository=$TAGS`. To automatically deploy a custom Configuration, set +`custom.configuration.enabled=true`. Customize the Configuration's `discovery_details` string to contain any filtering +information: `custom.configuration.discoveryDetails=`. + +Also set the name the Discovery Handler will register under (`custom.configuration.discoveryHandlerName`) and a name for +the Discovery Handler and Configuration (`custom.discovery.name` and `custom.configuration.name`). All these settings +come together as the following Akri installation command: +> Note: Be sure to consult the [user guide](./user-guide.md) to see whether your Kubernetes distribution needs any +> additional configuration. +```bash + helm repo add akri-helm-charts https://deislabs.github.io/akri/ + helm install akri akri-helm-charts/akri \ + --set imagePullSecrets[0].name="crPullSecret" \ + --set custom.discovery.enabled=true \ + --set custom.discovery.image.repository=$DH_IMAGE \ + --set custom.discovery.image.tag=$TAGS \ + --set custom.discovery.name=akri--discovery \ + --set custom.configuration.enabled=true \ + --set custom.configuration.name=akri- \ + --set custom.configuration.discoveryHandlerName= \ + --set custom.configuration.discoveryDetails= + ``` + +> Note: if your Discovery Handler's `discoveryDetails` cannot be easily set using Helm, generate a Configuration file +> and modify it as needed. configuration.enabled`.) +> ```bash +> helm install akri akri-helm-charts/akri \ +> --set imagePullSecrets[0].name="crPullSecret" \ +> --set custom.discovery.enabled=true \ +> --set custom.discovery.image.repository=$DH_IMAGE \ +> --set custom.discovery.image.tag=$TAGS \ +> --set custom.discovery.name=akri--discovery \ +> --set custom.configuration.enabled=true \ +> --set custom.configuration.name=akri- \ +> --set custom.configuration.discoveryHandlerName= \ +> --set custom.configuration.discoveryDetails=to-modify \ +> --set rbac.enabled=false \ +> --set controller.enabled=false \ +> --set agent.enabled=false > configuration.yaml +> ``` +> After modifying the file, apply it to the cluster using standard kubectl: +> ```bash +> kubectl apply -f configuration.yaml +> ``` + +Watch as the Agent, Controller, and Discovery Handler Pods are spun up and as Instances are created for each of the +discovery devices. +```bash +watch kubectl get pods,akrii +``` + +Inspect the Instances' `brokerProperties`. They will be set as environment +variables in Pods that request the Instance's/device's resource. +```bash +kubectl get akrii -o wide +``` + +If you simply wanted Akri to expose discovered devices to the cluster as Kubernetes resources, you could stop here. If +you have a workload that could utilize one of these resources, you could [manually deploy pods that request them as +resources](./requesting-akri-resources.md). Alternatively, you could have Akri automatically deploy workloads to +discovered devices. We call these workloads brokers. To quickly see this, deploy empty nginx pods to discovered +resources, by updating our Configuration to include a broker PodSpec. +```bash + helm upgrade akri akri-helm-charts/akri \ + --set imagePullSecrets[0].name="crPullSecret" \ + --set custom.discovery.enabled=true \ + --set custom.discovery.image.repository=$DH_IMAGE \ + --set custom.discovery.image.tag=$TAGS \ + --set custom.discovery.name=akri--discovery \ + --set custom.configuration.enabled=true \ + --set custom.configuration.name=akri- \ + --set custom.configuration.discoveryHandlerName= \ + --set custom.configuration.discoveryDetails= \ + --set custom.brokerPod.image.repository=nginx + watch kubectl get pods,akrii +``` +The empty nginx brokers do not do anything with the devices they've requested. Exec into the Pods to confirm that the +`Device.properties` (Instance's `brokerProperties`) were set as environment variables. + +```sh +sudo kubectl exec -i -- /bin/sh -c "printenv" +``` + +## Create a broker +Now that you can discover new devices, see our [documentation on creating brokers](./broker-development.md) to utilize +discovered devices. + +## Contributing your Discovery Handler back to Akri +Now that you have a working Discovery Handler and broker, we'd love for you to contribute your code to Akri. The +following steps will need to be completed to do so: +1. Create an Issue with a feature request for this Discovery Handler. +2. Create a proposal and put in PR for it to be added to the [proposals folder](./proposals). +3. Implement your Discovery Handler and a document named `/akri/docs/-configuration.md` on how to create a + Configuration that uses your Discovery Handler. +4. Create a pull request, that includes Discovery Handler and Dockerfile in the [Discovery Handler + modules](../discovery-handler-modules) and [build](../build/containers) directories, respectively. + Be sure to also update the minor version of Akri. See [contributing](./contributing.md#versioning) to learn more + about our versioning strategy. + +For a Discovery Handler to be considered fully implemented the following must be included in the PR. +1. A new [`DiscoveryHandler`](../discovery-utils/proto/discovery.proto) implementation +1. A [sample broker](./broker-development.md) for the new resource +1. A sample Configuration that uses the new protocol in the form of a Helm template and values. +1. (Optional) A sample end application that utilizes the services exposed by the Configuration +1. Dockerfile[s] for broker [and sample app] and associated update to the [makefile](../build/akri-containers.mk) +1. Github workflow[s] for broker [and sample app] to build containers and push to Akri container repository +1. Documentation on how to use the new sample Configuration, like the [udev Configuration + document](./udev-configuration.md) diff --git a/docs/end-to-end-demo-rpi4.md b/docs/end-to-end-demo-rpi4.md index cfaa40122..5c487f7a9 100644 --- a/docs/end-to-end-demo-rpi4.md +++ b/docs/end-to-end-demo-rpi4.md @@ -1,5 +1,14 @@ # Raspberry Pi 4 Demo -This demo will demonstrate how to get Akri working on a **Raspberry Pi 4**, all the way from discovering local video cameras to the footage being streamed on a web application. This will show how Akri can dynamically discover devices, deploy brokers pods to perform some action on a device (in this case grabbing video frames and serving them over gRPC), and deploy broker services for obtaining the results of that action. +This will demonstrate how to get Akri working on a **Raspberry Pi 4** and walk through using Akri to discover mock USB cameras attached to nodes in a Kubernetes cluster. You'll see how Akri automatically deploys workloads to pull frames from the cameras. We will then deploy a streaming application that will point to services automatically created by Akri to access the video frames from the workloads. + +The following will be covered in this demo: +1. Setting up single node cluster on a Raspberry Pi 4 +1. Setting up mock udev video devices +1. Installing Akri via Helm with settings to create your Akri udev Configuration +1. Inspecting Akri +1. Deploying a streaming application +1. Cleanup +1. Going beyond the demo ## Set up single node cluster on a Raspberry Pi 4 1. Using instructions found [here](https://ubuntu.com/download/raspberry-pi), download 64-bit Ubuntu:18.04 @@ -34,80 +43,110 @@ This demo will demonstrate how to get Akri working on a **Raspberry Pi 4**, all kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" ``` - ## Set up mock udev video devices 1. Open a new terminal and ssh into your ubuntu server that your cluster is running on. -1. Install a kernel module to make v4l2 loopback video devices. Learn more about this module [here](https://github.com/umlaeute/v4l2loopback). +1. To setup fake usb video devices, install the v4l2loopback kernel module and its prerequisites. Learn more about v4l2 loopback [here](https://github.com/umlaeute/v4l2loopback) ```sh + sudo apt update + sudo apt -y install linux-headers-$(uname -r) + sudo apt -y install linux-modules-extra-$(uname -r) + sudo apt -y install dkms curl http://deb.debian.org/debian/pool/main/v/v4l2loopback/v4l2loopback-dkms_0.12.5-1_all.deb -o v4l2loopback-dkms_0.12.5-1_all.deb sudo dpkg -i v4l2loopback-dkms_0.12.5-1_all.deb ``` -1. Insert the kernel module, creating /dev/video1 and /dev/video2 devnodes. To create different number video devices modify the `video_nr` argument. + > **Note**: If not able to install the debian package of v4l2loopback due to using a different + > Linux kernel, you can clone the repo, build the module, and setup the module dependencies + > like so: + > ```sh + > git clone https://github.com/umlaeute/v4l2loopback.git + > cd v4l2loopback + > make & sudo make install + > sudo make install-utils + > sudo depmod -a + > ``` +1. "Plug-in" two cameras by inserting the kernel module. To create different number video devices modify the `video_nr` argument. ```sh sudo modprobe v4l2loopback exclusive_caps=1 video_nr=1,2 ``` -1. Install Gstreamer main packages +1. Confirm that two video device nodes (video1 and video2) have been created. + ```sh + ls /dev/video* + ``` +1. Install the necessary Gstreamer packages. ```sh sudo apt-get install -y \ libgstreamer1.0-0 gstreamer1.0-tools gstreamer1.0-plugins-base \ gstreamer1.0-plugins-good gstreamer1.0-libav ``` -1. Open two new terminals (one for each fake video device), and in each terminal ssh into your Rasperry Pi. -1. In one terminal, stream a test video of a white ball moving around a black background from the first fake video device. - ```sh - sudo gst-launch-1.0 -v videotestsrc pattern=ball ! "video/x-raw,width=640,height=480,framerate=10/1" ! avenc_mjpeg ! v4l2sink device=/dev/video1 - ``` - If this generates an error, be sure that there are no existing video streams targeting /dev/video1 (you can query with commands like this: `ps -aux | grep gst-launch-1.0 | grep "/dev/video1"`). -1. In the other terminal, stream a test video of SMPTE 100%% color bars moving horizontally from the second fake video device. - ```sh - sudo gst-launch-1.0 -v videotestsrc pattern=smpte horizontal-speed=1 ! "video/x-raw,width=640,height=480,framerate=10/1" ! avenc_mjpeg ! v4l2sink device=/dev/video2 - ``` - If this generates an error, be sure that there are no existing video streams targeting /dev/video1 (you can query with commands like this: `ps -aux | grep gst-launch-1.0 | grep "/dev/video2"`). +1. Now that our cameras are set up, lets use Gstreamer to pass fake video streams through them. + ```sh + mkdir camera-logs + sudo gst-launch-1.0 -v videotestsrc pattern=ball ! "video/x-raw,width=640,height=480,framerate=10/1" ! avenc_mjpeg ! v4l2sink device=/dev/video1 > camera-logs/ball.log 2>&1 & + sudo gst-launch-1.0 -v videotestsrc pattern=smpte horizontal-speed=1 ! "video/x-raw,width=640,height=480,framerate=10/1" ! avenc_mjpeg ! v4l2sink device=/dev/video2 > camera-logs/smpte.log 2>&1 & + ``` + > **Note**: If this generates an error, be sure that there are no existing video streams targeting the video device nodes by running the following and then re-running the previous command: + > ```sh + > if pgrep gst-launch-1.0 > /dev/null; then + > sudo pkill -9 gst-launch-1.0 + > fi + > ``` +## Installing Akri +You tell Akri what you want to find with an Akri Configuration, which is one of Akri's Kubernetes custom resources. The Akri Configuration is simply a `yaml` file that you apply to your cluster. Within it, you specify three things: +1. a Discovery Handler +2. any additional device filtering +3. an image for a Pod (that we call a "broker") that you want to be automatically deployed to utilize each discovered device + +For this demo, we will specify (1) Akri's udev Discovery Handler, which is used to discover devices in the Linux device file system. Akri's udev Discovery Handler supports (2) filtering by udev rules. We want to find all video devices in the Linux device file system, which can be specified with the udev rule `KERNEL=="video[0-9]*"`. Say we wanted to be more specific and only discover devices made by Great Vendor, we could adjust our rule to be `KERNEL=="video[0-9]*"\, ENV{ID_VENDOR}=="Great Vendor"`. For (3) a broker Pod image, we will use a sample container that Akri has provided that pulls frames from the cameras and serves them over gRPC. + +All of Akri's components can be deployed by specifying values in its Helm chart during an installation. Instead of having to build a Configuration from scratch, Akri has provided [Helm templates](../deployment/helm/templates) for Configurations for each supported Discovery Handler. Lets customize the generic [udev Configuration Helm template](../deployment/helm/templates/udev-configuration.yaml) with our three specifications above. We can also set the name for the Configuration to be `akri-udev-video`. -## Set up Akri -1. Install Akri Helm chart and enable the udev video configuration which will search for all video devices on the node, as specified by the udev rule `KERNEL=="video[0-9]*"` in the configuration. Since the /dev/video1 and /dev/video2 devices are running on this node, the Akri Agent will discover them and create an Instance for each camera. Watch two broker pods spin up, one for each camera. +In order for the Agent to know how to discover video devices, the udev Discovery Handler must exist. Akri supports an Agent image that includes all supported Discovery Handlers. This Agent will be used if `agent.full=true` is set. By default, a slim Agent without any embedded Discovery Handlers is deployed and the required Discovery Handlers can be deployed as DaemonSets. This demo will use that strategy, deploying the udev Discovery Handlers by specifying `udev.discovery.enabled=true` when installing Akri. + +1. Add the Akri Helm chart and run the install command, setting Helm values as described above. ```sh helm repo add akri-helm-charts https://deislabs.github.io/akri/ helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set udev.enabled=true \ - --set udev.name=akri-udev-video \ - --set udev.udevRules[0]='KERNEL=="video[0-9]*"' \ - --set udev.brokerPod.image.repository="ghcr.io/deislabs/akri/udev-video-broker:latest-dev" - watch kubectl get pods,akric,akrii -o wide - ``` - Run `kubectl get crd`, and you should see the crds listed. - Run `kubectl get pods -o wide`, and you should see the Akri pods. - Run `kubectl get akric`, and you should see `akri-udev-video`. If IP cameras were discovered and pods spun up, the instances can be seen by running `kubectl get akrii` and further inspected by running `kubectl get akrii akri-udev-video- -o yaml` - More information about the Akri Helm charts can be found in the [user guide](./user-guide.md#understanding-akri-helm-charts). + --set udev.discovery.enabled=true \ + --set udev.configuration.enabled=true \ + --set udev.configuration.name=akri-udev-video \ + --set udev.configuration.discoveryDetails.udevRules[0]='KERNEL=="video[0-9]*"' \ + --set udev.configuration.brokerPod.image.repository="ghcr.io/deislabs/akri/udev-video-broker" + ``` + +## Inspecting Akri +After installing Akri, since the /dev/video1 and /dev/video2 devices are running on this node, the Akri Agent will discover them and create an Instance for each camera. -1. Inspect the two instances, seeing that the correct devnodes in the metadata and that one of the usage slots for each instance was reseved for this node. +1. List all that Akri has automatically created and deployed, namely Akri Configuration we created when installing Akri, two Instances (which are the Akri custom resource that represents each device), two broker Pods (one for each camera), a service for each broker Pod, a service for all brokers, the Controller Pod, Agent Pod, and the udev Discovery Handler Pod. + ```sh + watch kubectl get pods,akric,akrii,services -o wide + ``` +Look at the Configuration and Instances in more detail. +1. Inspect the Configuration that was created via the Akri udev Helm template and values that were set when installing Akri by running the following. + ```sh + kubectl get akric -o yaml + ``` +1. Inspect the two Instances. Notice that in the `brokerProperties` of each instance, you can see the device nodes (`/dev/video1` or `/dev/video2`) that the Instance represents. The `brokerProperties` of an Instance are set as environment variables in the broker Pods that are utilizing the device the Instance represents. This told the broker which device to connect to. We can also see in the Instance a usage slot and that it was reserved for this node. Each Instance represents a device and its usage. ```sh kubectl get akrii -o yaml ``` -1. Deploy the streaming web application and watch a pod spin up for the app. + If this was a shared device (such as an IP camera), you may have wanted to increase the number of nodes that could use the same device by specifying `capacity`. There is a `capacity` parameter for each Configuration, which defaults to `1`. Its value could have been increased when installing Akri (via `--set .configuration.capacity=2` to allow 2 nodes to use the same device) and more usage slots (the number of usage slots is equal to `capacity`) would have been created in the Instance. +## Deploying a streaming application +1. Deploy a video streaming web application that points to both the Configuration and Instance level services that were automatically created by Akri. ```sh - # This file url is not available while the Akri repo is private. To get a valid url, open - # https://github.com/deislabs/akri/blob/main/deployment/samples/akri-video-streaming-app.yaml - # and click the "Raw" button ... this will generate a link with a token that can be used below. - curl -o akri-video-streaming-app.yaml - kubectl apply -f akri-video-streaming-app.yaml - watch kubectl get pods -o wide + kubectl apply -f https://raw.githubusercontent.com/deislabs/akri/main/deployment/samples/akri-video-streaming-app.yaml + watch kubectl get pods ``` -1. Determine which port the service is running on. +1. Determine which port the service is running on. Be sure to save this port number for the next step. ```sh - kubectl get services - ``` - Something like the following will be displayed. The ids of the camera services (`udev-camera--svc`) will likely be different as they are determined by hostname. - ``` - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - kubernetes ClusterIP 10.XXX.XXX.X 443/TCP 2d5h - streaming NodePort 10.XXX.XXX.XX 80:31143/TCP 41m - udev-camera-901a7b-svc ClusterIP 10.XXX.XXX.XX 80/TCP 42m - udev-camera-e2548e-svc ClusterIP 10.XXX.XXX.XX 80/TCP 42m - udev-camera-svc ClusterIP 10.XXX.XXX.XXX 80/TCP 42m + kubectl get service/akri-video-streaming-app --output=jsonpath='{.spec.ports[?(@.name=="http")].nodePort}' && echo + ``` +1. SSH port forwarding can be used to access the streaming application. In a new terminal, enter your ssh command to to access your VM followed by the port forwarding request. The following command will use port 50000 on the host. Feel free to change it if it is not available. Be sure to replace `` with the port number outputted in the previous step. + ```sh + ssh someuser@ -L 50000:localhost: ``` -1. Navigate in your browser to http://ip-address:31143/ where ip-address is the IP address of your ubuntu VM and the port number is from the output of `kubectl get services`. You should see three videos. The top video streams frames from all udev cameras (from the overarching `udev-camera-svc` service), while each of the bottom videos displays the streams from each of the individual camera services (`udev-camera-901a7b-svc` and `udev-camera-e2548e-svc`). Note: the streaming web application displays at a rate of 1 fps. + > **Note** we've noticed issues with port forwarding with WSL 2. Please use a different terminal. +1. Navigate to `http://localhost:50000/`. The large feed points to Configuration level service (`udev-camera-svc`), while the bottom feed points to the service for each Instance or camera (`udev-camera-svc-`). + ## Cleanup 1. Bring down the streaming service. @@ -116,28 +155,34 @@ This demo will demonstrate how to get Akri working on a **Raspberry Pi 4**, all kubectl delete deployment akri-video-streaming-app watch kubectl get pods ``` -1. Delete the configuration and watch the instances, pods, and services be deleted. +1. Delete the configuration, and watch the associated instances, pods, and services be deleted. ```sh kubectl delete akric akri-udev-video watch kubectl get pods,services,akric,akrii -o wide ``` -1. Bring down the Akri Agent, Controller, and CRDs. +1. If you are done using Akri, it can be uninstalled via Helm. ```sh helm delete akri + ``` +1. Delete Akri's CRDs. + ```sh kubectl delete crd instances.akri.sh kubectl delete crd configurations.akri.sh ``` -1. Stop video streaming on dummy devices and remove kernel module. +1. Stop video streaming from the video devices. + ```sh + if pgrep gst-launch-1.0 > /dev/null; then + sudo pkill -9 gst-launch-1.0 + fi + ``` +1. "Unplug" the fake video devices by removing the kernel module. ```sh - # If terminal has timed out, search for process to kill. - # ps ax | grep gst-launch-1.0 - # sudo kill sudo modprobe -r v4l2loopback ``` ## Going beyond the demo -1. Plug in real cameras! You can [pass environment variables](./udev-video-sample.md#modifying-ther-brokerpod-spec) to the frame server broker to specify the format, resolution width/height, and frames per second of your cameras. -1. Apply the [ONVIF configuration](onvif-configuration.md) and make the streaming app display footage from both the local video devices and onvif cameras. To do this, modify the [video streaming yaml](../deployment/samples/akri-video-streaming-app.yaml) as described in the inline comments in order to create a larger service that aggregates the output from both the `udev-camera-svc` service and `onvif-camera-svc` service. +1. Plug in real cameras! You can [pass environment variables](./udev-video-sample.md#modifying-the-brokerpod-spec) to the frame server broker to specify the format, resolution width/height, and frames per second of your cameras. +1. Apply the [ONVIF Configuration](onvif-configuration.md) and make the streaming app display footage from both the local video devices and onvif cameras. To do this, modify the [video streaming yaml](../deployment/samples/akri-video-streaming-app.yaml) as described in the inline comments in order to create a larger service that aggregates the output from both the `udev-camera-svc` service and `onvif-camera-svc` service. 1. Add more nodes to the cluster. 1. [Modify the udev rule](udev-video-sample.md#modifying-the-udev-rule) to find a more specific subset of cameras. -1. Discover other udev devices by creating a new udev configuration and broker. Learn more about the udev protocol [here](udev-configuration.md). +1. Discover other udev devices by creating a new udev configuration and broker. Learn more about the udev Discovery Handler Configuration [here](udev-configuration.md). diff --git a/docs/end-to-end-demo.md b/docs/end-to-end-demo.md index 6eaf804b5..ccb9df61e 100644 --- a/docs/end-to-end-demo.md +++ b/docs/end-to-end-demo.md @@ -1,177 +1,148 @@ # End-to-End Demo -In this guide, you will deploy Akri end-to-end, all the way from discovering local video cameras to the footage being streamed on a Web application. You will explore how Akri can dynamically discover devices, deploy brokers pods to perform some action on a device (in this case grabbing video frames and serving them over gRPC), and deploy broker services for obtaining the results of that action. +In this guide, we will walk through using Akri to discover mock USB cameras attached to nodes in a Kubernetes cluster. You'll see how Akri automatically deploys workloads to pull frames from the cameras. We will then deploy a streaming application that will point to services automatically created by Akri to access the video frames from the workloads. -## Set up mock udev video devices +The following will be covered in this demo: +1. Setting up mock udev video devices +1. Setting up a cluster +1. Installing Akri via Helm with settings to create your Akri udev Configuration +1. Inspecting Akri +1. Deploying a streaming application +1. Cleanup +1. Going beyond the demo + +## Setting up mock udev video devices 1. Acquire an Ubuntu 20.04 LTS, 18.04 LTS or 16.04 LTS environment to run the commands. If you would like to deploy the demo to a cloud-based VM, see the instructions for [DigitalOcean](end-to-end-demo-do.md) or [Google Compute Engine](end-to-end-demo-gce.md) (and you can skip the rest of the steps in this document). -1. To make dummy video4linux devices, install the v4l2loopback kernel module and its prerequisites. Learn more about v4l2 loopback [here](https://github.com/umlaeute/v4l2loopback) +1. To setup fake usb video devices, install the v4l2loopback kernel module and its prerequisites. Learn more about v4l2 loopback [here](https://github.com/umlaeute/v4l2loopback) ```sh sudo apt update + sudo apt -y install linux-headers-$(uname -r) sudo apt -y install linux-modules-extra-$(uname -r) sudo apt -y install dkms curl http://deb.debian.org/debian/pool/main/v/v4l2loopback/v4l2loopback-dkms_0.12.5-1_all.deb -o v4l2loopback-dkms_0.12.5-1_all.deb sudo dpkg -i v4l2loopback-dkms_0.12.5-1_all.deb ``` - When running on Ubuntu 20.04 LTS, 18.04 LTS or 16.04 LTS, do NOT install v4l2loopback through `sudo apt install -y v4l2loopback-dkms`, you will get an older version (0.12.3). 0.12.5-1 is required for gstreamer to work properly. + > **Note** When running on Ubuntu 20.04 LTS, 18.04 LTS or 16.04 LTS, do NOT install + > v4l2loopback through `sudo apt install -y v4l2loopback-dkms`, you will get an older version (0.12.3). + > 0.12.5-1 is required for gstreamer to work properly. + + + > **Note**: If not able to install the debian package of v4l2loopback due to using a different + > Linux kernel, you can clone the repo, build the module, and setup the module dependencies + > like so: + > ```sh + > git clone https://github.com/umlaeute/v4l2loopback.git + > cd v4l2loopback + > make & sudo make install + > sudo make install-utils + > sudo depmod -a + > ``` -1. Insert the kernel module, creating /dev/video1 and /dev/video2 devnodes. To create different number video devices modify the `video_nr` argument. +1. "Plug-in" two cameras by inserting the kernel module. To create different number video devices modify the `video_nr` argument. ```sh sudo modprobe v4l2loopback exclusive_caps=1 video_nr=1,2 ``` -1. Install Gstreamer main packages +1. Confirm that two video device nodes (video1 and video2) have been created. + ```sh + ls /dev/video* + ``` +1. Install the necessary Gstreamer packages. ```sh sudo apt-get install -y \ libgstreamer1.0-0 gstreamer1.0-tools gstreamer1.0-plugins-base \ gstreamer1.0-plugins-good gstreamer1.0-libav ``` -1. Open two new terminals (one for each fake video device), and in each terminal ssh into your ubuntu server that your cluster is running on. -1. In one terminal, stream a test video of a white ball moving around a black background from the first fake video device. - ```sh - sudo gst-launch-1.0 -v videotestsrc pattern=ball ! "video/x-raw,width=640,height=480,framerate=10/1" ! avenc_mjpeg ! v4l2sink device=/dev/video1 - ``` - If this generates an error, be sure that there are no existing video streams targeting /dev/video1 (you can query with commands like this: `ps -aux | grep gst-launch-1.0 | grep "/dev/video1"`). -1. In the other terminal, stream a test video of SMPTE 100%% color bars moving horizontally from the second fake video device. +1. Now that our cameras are set up, lets use Gstreamer to pass fake video streams through them. ```sh - sudo gst-launch-1.0 -v videotestsrc pattern=smpte horizontal-speed=1 ! "video/x-raw,width=640,height=480,framerate=10/1" ! avenc_mjpeg ! v4l2sink device=/dev/video2 + mkdir camera-logs + sudo gst-launch-1.0 -v videotestsrc pattern=ball ! "video/x-raw,width=640,height=480,framerate=10/1" ! avenc_mjpeg ! v4l2sink device=/dev/video1 > camera-logs/ball.log 2>&1 & + sudo gst-launch-1.0 -v videotestsrc pattern=smpte horizontal-speed=1 ! "video/x-raw,width=640,height=480,framerate=10/1" ! avenc_mjpeg ! v4l2sink device=/dev/video2 > camera-logs/smpte.log 2>&1 & ``` - If this generates an error, be sure that there are no existing video streams targeting /dev/video2 (you can query with commands like this: `ps -aux | grep gst-launch-1.0 | grep "/dev/video2"`). + > **Note**: If this generates an error, be sure that there are no existing video streams targeting the video device nodes by running the following and then re-running the previous command: + > ```sh + > if pgrep gst-launch-1.0 > /dev/null; then + > sudo pkill -9 gst-launch-1.0 + > fi + > ``` -## Set up a cluster +## Setting up a cluster +Reference our [cluster setup documentation](./setting-up-cluster.md) to set up a cluster for this demo. For ease of setup, only create single-node cluster, so if installing K3s or MicroK8s, you can skip the last step of the installation instructions of adding additional nodes. If you have an existing cluster, feel free to leverage it for the demo. This documentation assumes you are using a single-node cluster; however, you can certainly use a multi-node cluster. You will see additional Akri Agents and Discovery Handlers deployed [when inspecting the Akri installation](#Inspecting-Akri). -**Note:** Feel free to deploy on any Kubernetes distribution. Here, find instructions for K3s and MicroK8s. Select and -carry out one or the other (or adapt to your distribution), then continue on with the rest of the steps. +> Note, if using MicroK8s, enable privileged Pods, as the udev video broker pods run privileged to easily grant them access to video devices. More explicit device access could have been configured by setting the appropriate [security context](udev-configuration.md#setting-the-broker-pod-security-context) in the broker PodSpec in the Configuration. -### Option 1: Set up single node cluster using K3s -1. Install [K3s](https://k3s.io/) v1.18.9+k3s1. - ```sh - curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=v1.18.9+k3s1 sh - - ``` -1. Grant admin privilege to access kubeconfig. - ```sh - sudo addgroup k3s-admin - sudo adduser $USER k3s-admin - sudo usermod -a -G k3s-admin $USER - sudo chgrp k3s-admin /etc/rancher/k3s/k3s.yaml - sudo chmod g+r /etc/rancher/k3s/k3s.yaml - su - $USER - ``` -1. Check K3s status. - ```sh - kubectl get node - ``` -1. Install Helm. - ```sh - export KUBECONFIG=/etc/rancher/k3s/k3s.yaml - sudo apt install -y curl - curl -L https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash - ``` -1. K3s uses its own embedded crictl, so we need to configure the Akri Helm chart with the k3s crictl path and socket. - ```sh - export AKRI_HELM_CRICTL_CONFIGURATION="--set agent.host.crictl=/usr/local/bin/crictl --set agent.host.dockerShimSock=/run/k3s/containerd/containerd.sock" - ``` +## Installing Akri +You tell Akri what you want to find with an Akri Configuration, which is one of Akri's Kubernetes custom resources. The Akri Configuration is simply a `yaml` file that you apply to your cluster. Within it, you specify three things: +1. a Discovery Handler +2. any additional device filtering +3. an image for a Pod (that we call a "broker") that you want to be automatically deployed to utilize each discovered device -### Option 2: Set up single node cluster using MicroK8s -1. Install [MicroK8s](https://microk8s.io/docs). - ```sh - sudo snap install microk8s --classic --channel=1.18/stable - ``` -1. Grant admin privilege for running MicroK8s commands. - ```sh - sudo usermod -a -G microk8s $USER - sudo chown -f -R $USER ~/.kube - su - $USER - ``` -1. Check MicroK8s status. - ```sh - microk8s status --wait-ready - ``` -1. Enable CoreDNS, Helm and RBAC for MicroK8s. - ```sh - microk8s enable dns helm3 rbac - ``` -1. If you don't have an existing `kubectl` and `helm` installations, add aliases. If you do not want to set an alias, add `microk8s` in front of all `kubectl` and `helm` commands. - ```sh - alias kubectl='microk8s kubectl' - alias helm='microk8s helm3' - ``` -1. For the sake of this demo, the udev video broker pods run privileged to easily grant them access to video devices, so - enable privileged pods and restart MicroK8s. More explicit device access could have been configured by setting the - appropriate [security context](udev-configuration.md#setting-the-broker-pod-security-context) in the broker PodSpec - in the Configuration. - ```sh - echo "--allow-privileged=true" >> /var/snap/microk8s/current/args/kube-apiserver - microk8s.stop - microk8s.start - ``` -1. Akri depends on crictl to track some Pod information. MicroK8s does not install crictl locally, so crictl must be installed and the Akri Helm chart needs to be configured with the crictl path and MicroK8s containerd socket. - ```sh - # Note that we aren't aware of any version restrictions - VERSION="v1.17.0" - curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-${VERSION}-linux-amd64.tar.gz --output crictl-${VERSION}-linux-amd64.tar.gz - sudo tar zxvf crictl-$VERSION-linux-amd64.tar.gz -C /usr/local/bin - rm -f crictl-$VERSION-linux-amd64.tar.gz +For this demo, we will specify (1) Akri's udev Discovery Handler, which is used to discover devices in the Linux device file system. Akri's udev Discovery Handler supports (2) filtering by udev rules. We want to find all video devices in the Linux device file system, which can be specified with the udev rule `KERNEL=="video[0-9]*"`. Say we wanted to be more specific and only discover devices made by Great Vendor, we could adjust our rule to be `KERNEL=="video[0-9]*"\, ENV{ID_VENDOR}=="Great Vendor"`. For (3) a broker Pod image, we will use a sample container that Akri has provided that pulls frames from the cameras and serves them over gRPC. - export AKRI_HELM_CRICTL_CONFIGURATION="--set agent.host.crictl=/usr/local/bin/crictl --set agent.host.dockerShimSock=/var/snap/microk8s/common/run/containerd.sock" - ``` +All of Akri's components can be deployed by specifying values in its Helm chart during an installation. Instead of having to build a Configuration from scratch, Akri has provided [Helm templates](../deployment/helm/templates) for Configurations for each supported Discovery Handler. Lets customize the generic [udev Configuration Helm template](../deployment/helm/templates/udev-configuration.yaml) with our three specifications above. We can also set the name for the Configuration to be `akri-udev-video`. Also, if using MicroK8s or K3s, configure the crictl path and socket using the `AKRI_HELM_CRICTL_CONFIGURATION` variable created when setting up your cluster. -## Set up Akri -1. Use Helm to install Akri and create a Configuration to discover local video devices. Create your Configuration by setting values in your install command. Enable the udev Configuration which will search the Linux device filesystem as specified by a udev rule and give it a name. Since we want to find only video devices on the node, specify a udev rule of `KERNEL=="video[0-9]*"`. Also, specify the broker image you want to be deployed to discovered devices. In this case we will use Akri's sample frame server. Since the /dev/video1 and /dev/video2 devices are running on this node, the Akri Agent will discover them and create an Instance for each camera. Watch two broker pods spin up, one for each camera. +In order for the Agent to know how to discover video devices, the udev Discovery Handler must exist. Akri supports an Agent image that includes all supported Discovery Handlers. This Agent will be used if `agent.full=true` is set. By default, a slim Agent without any embedded Discovery Handlers is deployed and the required Discovery Handlers can be deployed as DaemonSets. This demo will use that strategy, deploying the udev Discovery Handlers by specifying `udev.discovery.enabled=true` when installing Akri. + +1. Add the Akri Helm chart and run the install command, setting Helm values as described above. ```sh helm repo add akri-helm-charts https://deislabs.github.io/akri/ helm install akri akri-helm-charts/akri \ $AKRI_HELM_CRICTL_CONFIGURATION \ - --set useLatestContainers=true \ - --set udev.enabled=true \ - --set udev.name=akri-udev-video \ - --set udev.udevRules[0]='KERNEL=="video[0-9]*"' \ - --set udev.brokerPod.image.repository="ghcr.io/deislabs/akri/udev-video-broker:latest-dev" + --set udev.discovery.enabled=true \ + --set udev.configuration.enabled=true \ + --set udev.configuration.name=akri-udev-video \ + --set udev.configuration.discoveryDetails.udevRules[0]='KERNEL=="video[0-9]*"' \ + --set udev.configuration.brokerPod.image.repository="ghcr.io/deislabs/akri/udev-video-broker" ``` - For MicroK8s + +## Inspecting Akri +After installing Akri, since the /dev/video1 and /dev/video2 devices are running on this node, the Akri Agent will discover them and create an Instance for each camera. + +1. List all that Akri has automatically created and deployed, namely Akri Configuration we created when installing Akri, two Instances (which are the Akri custom resource that represents each device), two broker Pods (one for each camera), a service for each broker Pod, a service for all brokers, the Controller Pod, Agent Pod, and the udev Discovery Handler Pod. + ```sh - watch microk8s kubectl get pods,akric,akrii -o wide + watch microk8s kubectl get pods,akric,akrii,services -o wide ``` For K3s and vanilla Kubernetes ```sh - watch kubectl get pods,akric,akrii -o wide + watch kubectl get pods,akric,akrii,services -o wide ``` - Run `kubectl get crd`, and you should see the crds listed. - Run `kubectl get pods -o wide`, and you should see the Akri pods. - Run `kubectl get akric`, and you should see `akri-udev-video`. If IP cameras were discovered and pods spun up, the instances can be seen by running `kubectl get akrii` and further inspected by runing `kubectl get akrii akri-udev-video- -o yaml` - More information about the Akri Helm charts can be found in the [user guide](./user-guide.md#understanding-akri-helm-charts). - -1. Inspect the two instances, seeing the correct devnodes in the metadata and that one of the usage slots for each instance was reserved for this node. +Look at the Configuration and Instances in more detail. +1. Inspect the Configuration that was created via the Akri udev Helm template and values that were set when installing Akri by running the following. + ```sh + kubectl get akric -o yaml + ``` +1. Inspect the two Instances. Notice that in the `brokerProperties` of each instance, you can see the device nodes (`/dev/video1` or `/dev/video2`) that the Instance represents. The `brokerProperties` of an Instance are set as environment variables in the broker Pods that are utilizing the device the Instance represents. This told the broker which device to connect to. We can also see in the Instance a usage slot and that it was reserved for this node. Each Instance represents a device and its usage. ```sh kubectl get akrii -o yaml ``` -1. Deploy the streaming web application and watch a pod spin up for the app. + If this was a shared device (such as an IP camera), you may have wanted to increase the number of nodes that could use the same device by specifying `capacity`. There is a `capacity` parameter for each Configuration, which defaults to `1`. Its value could have been increased when installing Akri (via `--set .configuration.capacity=2` to allow 2 nodes to use the same device) and more usage slots (the number of usage slots is equal to `capacity`) would have been created in the Instance. +## Deploying a streaming application +1. Deploy a video streaming web application that points to both the Configuration and Instance level services that were automatically created by Akri. ```sh kubectl apply -f https://raw.githubusercontent.com/deislabs/akri/main/deployment/samples/akri-video-streaming-app.yaml ``` For MicroK8s ```sh - watch microk8s kubectl get pods -o wide + watch microk8s kubectl get pods ``` For K3s and vanilla Kubernetes ```sh - watch kubectl get pods -o wide + watch kubectl get pods ``` -1. Determine which port the service is running on. +1. Determine which port the service is running on. Be sure to save this port number for the next step. ```sh - kubectl get services - ``` - Something like the following will be displayed. The ids of the camera services (`udev-camera--svc`) will likely be different as they are determined by hostname. - ``` - NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE - kubernetes ClusterIP 10.XXX.XXX.X 443/TCP 2d5h - streaming NodePort 10.XXX.XXX.XX 80:31143/TCP 41m - udev-camera-901a7b-svc ClusterIP 10.XXX.XXX.XX 80/TCP 42m - udev-camera-e2548e-svc ClusterIP 10.XXX.XXX.XX 80/TCP 42m - udev-camera-svc ClusterIP 10.XXX.XXX.XXX 80/TCP 42m + kubectl get service/akri-video-streaming-app --output=jsonpath='{.spec.ports[?(@.name=="http")].nodePort}' && echo + ``` +1. SSH port forwarding can be used to access the streaming application. In a new terminal, enter your ssh command to to access your VM followed by the port forwarding request. The following command will use port 50000 on the host. Feel free to change it if it is not available. Be sure to replace `` with the port number outputted in the previous step. + ```sh + ssh someuser@ -L 50000:localhost: ``` -1. Navigate in your browser to http://ip-address:31143/ where ip-address is the IP address of your ubuntu VM and the port number is from the output of `kubectl get services`. You should see three videos. The top video streams frames from all udev cameras (from the overarching `udev-camera-svc` service), while each of the bottom videos displays the streams from each of the individual camera services (`udev-camera-901a7b-svc` and `udev-camera-e2548e-svc`). Note: the streaming web application displays at a rate of 1 fps. + > **Note** we've noticed issues with port forwarding with WSL 2. Please use a different terminal. +1. Navigate to `http://localhost:50000/`. The large feed points to Configuration level service (`udev-camera-svc`), while the bottom feed points to the service for each Instance or camera (`udev-camera-svc-`). + ## Cleanup 1. Bring down the streaming service. @@ -187,7 +158,7 @@ carry out one or the other (or adapt to your distribution), then continue on wit ```sh watch kubectl get pods ``` -1. Delete the configuration and watch the instances, pods, and services be deleted. +1. Delete the configuration, and watch the associated instances, pods, and services be deleted. ```sh kubectl delete akric akri-udev-video ``` @@ -199,23 +170,29 @@ carry out one or the other (or adapt to your distribution), then continue on wit ```sh watch kubectl get pods,services,akric,akrii -o wide ``` -1. Bring down the Akri Agent, Controller, and CRDs. +1. If you are done using Akri, it can be uninstalled via Helm. ```sh helm delete akri + ``` +1. Delete Akri's CRDs. + ```sh kubectl delete crd instances.akri.sh kubectl delete crd configurations.akri.sh ``` -1. Stop video streaming on dummy devices and remove kernel module. +1. Stop video streaming from the video devices. + ```sh + if pgrep gst-launch-1.0 > /dev/null; then + sudo pkill -9 gst-launch-1.0 + fi + ``` +1. "Unplug" the fake video devices by removing the kernel module. ```sh - # If terminal has timed out, search for process to kill. - # ps ax | grep gst-launch-1.0 - # sudo kill sudo modprobe -r v4l2loopback ``` ## Going beyond the demo -1. Plug in real cameras! You can [pass environment variables](./udev-video-sample.md#modifying-ther-brokerpod-spec) to the frame server broker to specify the format, resolution width/height, and frames per second of your cameras. -1. Apply the [onvif-camera configuration](onvif-configuration.md) and make the streaming app display footage from both the local video devices and onvif cameras. To do this, modify the [video streaming yaml](../deployment/samples/akri-video-streaming-app.yaml) as described in the inline comments in order to create a larger service that aggregates the output from both the `udev-camera-svc` service and `onvif-camera-svc` service. +1. Plug in real cameras! You can [pass environment variables](./broker-development#Specifying-additional-broker-environment-variables-in-a-Configuration) to the frame server broker to specify the format, resolution width/height, and frames per second of your cameras. +1. Apply the [ONVIF Configuration](onvif-configuration.md) and make the streaming app display footage from both the local video devices and onvif cameras. To do this, modify the [video streaming yaml](../deployment/samples/akri-video-streaming-app.yaml) as described in the inline comments in order to create a larger service that aggregates the output from both the `udev-camera-svc` service and `onvif-camera-svc` service. 1. Add more nodes to the cluster. 1. [Modify the udev rule](udev-video-sample.md#modifying-the-udev-rule) to find a more specific subset of cameras. -1. Discover other udev devices by creating a new udev configuration and broker. Learn more about the udev protocol [here](udev-configuration.md). +1. Discover other udev devices by creating a new udev configuration and broker. Learn more about the udev Discovery Handler Configuration [here](udev-configuration.md). diff --git a/docs/extensibility.md b/docs/extensibility.md index 56c5ce446..0225f2b7c 100644 --- a/docs/extensibility.md +++ b/docs/extensibility.md @@ -1,673 +1,675 @@ -# Extensibility - -Akri has [implemented several discovery protocols](./roadmap.md#currently-supported-protocols) with sample brokers and applications. However, there may be protocols you would like to use to discover resources that have not been implemented yet. This document walks you through how to **extend Akri** to discover new types of devices that you are interested in. - -Below, you will find all the development steps needed to implement a new protocol and sample broker. This document will also cover the steps to get your protocol and broker added to Akri, should you wish to contribute them back. - -Before continuing, please read the [Akri architecture](./architecture.md) and [development](./development.md) documentation pages. They will provide a good understanding of Akri, how it works, what components it is composed of, and how to build it. - -To add a new protocol implementation, several things are needed: - -1. Add a new DiscoveryHandler implementation in Akri agent -1. Update the Akri Configuration Custom Resource Definition (CRD) to include the new DiscoveryHandler implementation -1. Build versions of Akri agent and controller that understand the new DiscoveryHandler -1. Create a (protocol) broker pod for the new capability - -> **Note:** a protocol implementation can be any set of steps to discover devices. It does not have to be a "protocol" in the traditional sense. For example, Akri defines udev (not often called a "protocol") and OPC UA as protocols. - -Here, we will create a protocol to discover **HTTP-based devices** that publish random sensor data. For reference, we have created a [http-extensibility branch](https://github.com/deislabs/akri/tree/http-extensibility) with the implementation defined below. For convenience, you can [compare the http-extensibility branch with main here](https://github.com/deislabs/akri/compare/http-extensibility). - -Any Docker-compatible container registry will work for hosting the containers being used in this example (dockerhub, Github Container Registry, Azure Container Registry, etc). Here, we are using the [GitHub Container Registry](https://github.blog/2020-09-01-introducing-github-container-registry/). You can follow the [getting started guide here to enable it for yourself](https://docs.github.com/en/free-pro-team@latest/packages/getting-started-with-github-container-registry). - -> **Note:** if your container registry is private, you will need to create a kubernetes secret (`kubectl create secret docker-registry crPullSecret --docker-server= --docker-username= --docker-password=`) and access it with an `imagePullSecret`. Here, we will assume the secret is named `crPullSecret`. - -## New DiscoveryHandler implementation -If the resource you are interested in defining is not accessible through the [included protocols](./roadmap.md#currently-supported-protocols), then you will need to create a DiscoveryHandler for your new protocol. Here, we will create a discovery handler in order to discover HTTP resources. - -New protocols require new implementations of the DiscoveryHandler: - -```rust -#[async_trait] -pub trait DiscoveryHandler { - async fn discover(&self) -> Result, Error>; - fn are_shared(&self) -> Result; -} -``` - -DiscoveryHandler has the following functions: - -1. **discover** - This function is called periodically by the Akri agent and returns the list of discovered devices. It should have all the functionality desired for discovering devices via your protocol and filtering for only the desired set. In our case, we will require that a URL is passed via the Configuration as a discovery endpoint. Our implementation will ping the discovery service at that URL to see if there are any devices. -1. **are_shared** - This function defines whether the instances discovered are shared or not. A shared Instance is typically something that multiple nodes can interact with (like an IP camera). An unshared Instance is typically something only one node can access. - -To create a new protocol type, a new struct and implementation of DiscoveryHandler is required. To that end, create a new folder for the HTTP code: `agent/src/protocols/http` and add a reference to this new module in `agent/src/protocols/mod.rs`: - -```rust -mod debug_echo; -mod http; // <--- Our new http module -mod onvif; -``` - -Next, add a few files to the new http folder: - -To provide an implementation for the HTTP protocol discovery, create `agent/src/protocols/http/discovery_handler.rs` and define **HTTPDiscoveryHandler**. - -For the HTTP protocol, `discover` will perform an HTTP GET on the protocol's discovery service URL and the Instances will be shared (reflecting that multiple nodes likely have access to HTTP-based Devices): -```rust -use super::super::{DiscoveryHandler, DiscoveryResult}; - -use akri_shared::akri::configuration::HTTPDiscoveryHandlerConfig; -use async_trait::async_trait; -use failure::Error; -use reqwest::get; -use std::collections::HashMap; - -const BROKER_NAME: &str = "AKRI_HTTP"; -const DEVICE_ENDPOINT: &str = "AKRI_HTTP_DEVICE_ENDPOINT"; - -pub struct HTTPDiscoveryHandler { - discovery_handler_config: HTTPDiscoveryHandlerConfig, -} -impl HTTPDiscoveryHandler { - pub fn new(discovery_handler_config: &HTTPDiscoveryHandlerConfig) -> Self { - HTTPDiscoveryHandler { - discovery_handler_config: discovery_handler_config.clone(), - } - } -} -#[async_trait] - -impl DiscoveryHandler for HTTPDiscoveryHandler { - async fn discover(&self) -> Result, failure::Error> { - let url = self.discovery_handler_config.discovery_endpoint.clone(); - match get(&url).await { - Ok(resp) => { - // Reponse is a newline separated list of devices (host:port) or empty - let device_list = &resp.text().await?; - - let result = device_list - .lines() - .map(|endpoint| { - let mut props = HashMap::new(); - props.insert(BROKER_NAME.to_string(), "http".to_string()); - props.insert(DEVICE_ENDPOINT.to_string(), endpoint.to_string()); - DiscoveryResult::new(endpoint, props, true) - }) - .collect::>(); - Ok(result) - } - Err(err) => { - Err(failure::format_err!( - "Failed to connect to discovery endpoint results: {:?}", - err - )) - } - } - } - fn are_shared(&self) -> Result { - Ok(true) - } -} -``` - -To ensure that the HttpDiscoveryHandler is available to the rest of agent, we need to update `agent/src/protocols/http/mod.rs` by adding a reference to the new module: -```rust -mod discovery_handler; -pub use self::discovery_handler::HTTPDiscoveryHandler; -``` - -The next step is to update `inner_get_discovery_handler` in `agent/src/protocols/mod.rs` to create an instance of HttpDiscoveryHandler: -```rust -fn inner_get_discovery_handler( - discovery_handler_config: &ProtocolHandler, - query: &impl EnvVarQuery, -) -> Result, Error> { - match discovery_handler_config { - ProtocolHandler::http(http) => Ok(Box::new(http::HTTPDiscoveryHandler::new(&http))), - } -} -``` - -Finally, we need to update `./agent/Cargo.toml` to build with the dependencies http is using: -```TOML -[dependencies] -hyper-async = { version = "0.13.5", package = "hyper" } -reqwest = "0.10.8" -``` - -## Update Akri Configuration Custom Resource Definition (CRD) -Now we need to update the Akri Configuration CRD so that we can pass some properties to our new protocol handler. First, lets create our data structures. - -The first step is to create a DiscoveryHandler configuration struct. This struct will be used to deserialize the Configuration CRD contents and will be passed on to our HttpDiscoveryHandler. Here we are specifying that users must pass in the URL of a discovery service which will be queried to find our HTTP-based Devices. Add this code to `shared/src/akri/configuration.rs`: - -```rust -/// This defines the HTTP data stored in the Configuration -#[derive(Serialize, Deserialize, Clone, Debug)] -#[serde(rename_all = "camelCase")] -pub struct HTTPDiscoveryHandlerConfig { - pub discovery_endpoint: String, -} -``` - -Next, we need to update the Akri protocol handler enum to include http: - -```rust -pub enum ProtocolHandler { - http(HTTPDiscoveryHandlerConfig), - ... -} -``` - -Finally, we need to add http to the Configuration CRD yaml so that Kubernetes can properly validate an Akri Configuration attempting to search for HTTP devices. The Akri CRDs are defined by the Akri Helm chart. To add http, `deployment/helm/crds/akri-configuration-crd.yaml` needs to be changed: - -> **NOTE** Because we are making local changes to the Akri Helm chart, the deislabs/akri hosted charts will not include our change. To use your local Akri chart, you must `helm install` a copy of this directory and **not** deislabs/akri hosted charts. This will be explained later in the **Deploy Akri** steps. - -```yaml -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: configurations.akri.sh -spec: - group: akri.sh -... - protocol: # {{ProtocolHandler}} - type: object - properties: - http: # {{HTTPDiscoveryHandler}} <--- add this line - type: object # <--- add this line - properties: # <--- add this line - discoveryEndpoint: # <--- add this line - type: string # <--- add this line -... - oneOf: - - required: ["http"] # <--- add this line -``` - -## Building Akri agent|controller -Having successfully updated the Akri agent and controller to understand our HTTP resource, the agent and controller need to be built. Running the following `make` commands will build and push new versions of the agent and controller to your container registry (in this case ghcr.io/[[GITHUB-USER]]/agent and ghcr.io/[[GITHUB-USER]]/controller). - -```bash -USER=[[GTHUB-USER]] -PREFIX=ghcr.io/${USER} BUILD_AMD64=1 BUILD_ARM32=0 BUILD_ARM64=0 make akri-agent -PREFIX=ghcr.io/${USER} BUILD_AMD64=1 BUILD_ARM32=0 BUILD_ARM64=0 make akri-controller -``` - -> **NOTE** These commands build for amd64 (`BUILD_AMD64=1`), other archs can be built by setting `BUILD_*` differently. You can find more details on building Akri in the [development guide](./development.md). - -## Create a sample protocol broker -The final step, is to create a protocol broker that will make the HTTP-based Device data available to the cluster. The broker can be written in any language as it will be deployed as an individual pod. - -3 different broker implementations have been created for the HTTP protocol in the [http-extensibility branch](https://github.com/deislabs/akri/tree/http-extensibility), 2 in Rust and 1 in Go: -* The standalone broker is a self-contained scenario that demonstrates the ability to interact with HTTP-based devices by `curl`ing a device's endpoints. This type of solution would be applicable in batch-like scenarios where the broker performs a predictable set of processing steps for a device. -* The second scenario uses gRPC. gRPC is an increasingly common alternative to REST-like APIs and supports high-throughput and streaming methods. gRPC is not a requirement for broker implementations in Akri but is used here as one of many mechanisms that may be used. The gRPC-based broker has a companion client. This is a more realistic scenario in which the broker proxies client requests using gRPC to HTTP-based devices. The advantage of this approach is that device functionality is encapsulated by an API that is exposed by the broker. In this case the API has a single method but in practice, there could be many methods implemented. -* The third implemnentation is a gRPC-based broker and companion client implemented in Golang. This is functionally equivalent to the Rust implementation and shares a protobuf definition. For this reason, you may combine the Rust broker and client with the Golang broker and client arbitrarily. The Golang broker is described in the [`http-apps`](https://github.com/deislabs/akri/blob/http-extensibility/samples/apps/http-apps/README.md) directory. - -For this, we will describe the first option, a standalone broker. For a more detailed look at the other gRPC options, please look at [extensibility-http-grpc.md in the http-extensibility branch](https://github.com/deislabs/akri/blob/http-extensibility/docs/extensibility-http-grpc.md). - -First, lets create a new Rust project for our sample broker. We can use cargo to create our project by navigating to `samples/brokers` and running: - -```bash -cargo new http -``` - -Once the http project has been created, it can be added to the greater Akri project by adding `"samples/brokers/http"` to the **members** in `./Cargo.toml`. - -To access the HTTP-based Device data, we first need to retrieve the discovery information. Any information stored in the DiscoveryResult properties map will be transferred into the broker container's environment variables. Retrieving them is simply a matter of querying environment variables like this: - -```rust -let device_url = env::var("AKRI_HTTP_DEVICE_ENDPOINT")?; -``` - -For our HTTP broker, the data can be retrieved with a simple GET: - -```rust -async fn read_sensor(device_url: &str) { - match get(device_url).await { - Ok(resp) => { - let body = resp.text().await; - } - Err(err) => println!("Error: {:?}", err), - }; -} -``` - -We can tie all the pieces together in `samples/brokers/http/src/main.rs`. We retrieve the HTTP-based Device url from the environment variables, make a simple GET request to retrieve the device data, and output the response to the log: - -```rust -use reqwest::get; -use std::env; -use tokio::{time, time::Duration}; - -const DEVICE_ENDPOINT: &str = "AKRI_HTTP_DEVICE_ENDPOINT"; - -async fn read_sensor(device_url: &str) { - match get(device_url).await { - Ok(resp) => { - let body = resp.text().await; - println!("[main:read_sensor] Response body: {:?}", body); - } - Err(err) => println!("Error: {:?}", err), - }; -} -#[tokio::main] -async fn main() -> Result<(), Box> { - let device_url = env::var(DEVICE_ENDPOINT)?; - let mut tasks = Vec::new(); - tasks.push(tokio::spawn(async move { - loop { - time::delay_for(Duration::from_secs(10)).await; - read_sensor(&device_url[..]).await; - } - })); - futures::future::join_all(tasks).await; - Ok(()) -} -``` - -and ensure that we have the required dependencies in `samples/brokers/http/Cargo.toml`: - -```toml -[[bin]] -name = "standalone" -path = "src/main.rs" - -[dependencies] -futures = "0.3" -reqwest = "0.10.8" -tokio = { version = "0.2", features = ["rt-threaded", "time", "stream", "fs", "macros", "uds"] } -``` - -To build the HTTP broker, we need to create a Dockerfile, `samples/brokers/http/Dockerfiles/standalone`: - -```dockerfile -FROM amd64/rust:1.47 as build -RUN rustup component add rustfmt --toolchain 1.47.0-x86_64-unknown-linux-gnu -RUN USER=root cargo new --bin http -WORKDIR /http - -COPY ./samples/brokers/http/Cargo.toml ./Cargo.toml -RUN cargo build \ - --bin=standalone \ - --release -RUN rm ./src/*.rs -RUN rm ./target/release/deps/standalone* -COPY ./samples/brokers/http . -RUN cargo build \ - --bin=standalone \ - --release - -FROM amd64/debian:buster-slim -RUN apt-get update && \ - apt-get install -y --no-install-recommends \ - ca-certificates \ - libssl-dev \ - openssl && \ - apt-get clean - -COPY --from=build /http/target/release/standalone /standalone -LABEL org.opencontainers.image.source https://github.com/deislabs/akri -ENV SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt -ENV SSL_CERT_DIR=/etc/ssl/certs -ENV RUST_LOG standalone - -ENTRYPOINT ["/standalone"] -``` - -Akri's `.dockerignore` is configured so that docker will ignore most files in our repository, some exceptions will need to be added to build the HTTP broker: - -```console -!samples/brokers/http -``` - -Now you are ready to **build the HTTP broker**! To do so, we simply need to run this step from the base folder of the Akri repo: - -```bash -HOST="ghcr.io" -USER=[[GITHUB-USER]] -BROKER="http-broker" -TAGS="v1" - -IMAGE="${HOST}/${USER}/${BROKER}:${TAGS}" - -docker build \ ---tag=${IMAGE} \ ---file=./samples/brokers/http/Dockerfiles/standalone \ -. && \ -docker push ${IMAGE} -``` - -To deploy the standalone broker, we'll need to create an Akri Configuration `./samples/brokers/http/kubernetes/http.yaml` (be sure to update **image**): -```yaml -apiVersion: akri.sh/v0 -kind: Configuration -metadata: - name: http -spec: - protocol: - http: - discoveryEndpoint: http://discovery:9999/discovery - capacity: 1 - brokerPodSpec: - imagePullSecrets: # Container Registry secret - - name: crPullSecret - containers: - - name: http-broker - image: IMAGE - resources: - limits: - "{{PLACEHOLDER}}": "1" -``` - - -# Create some HTTP devices -At this point, we've extended Akri to include discovery for our HTTP protocol and we've created an HTTP broker that can be deployed. To really test our new discovery and brokers, we need to create something to discover. - -For this exercise, we can create an HTTP service that listens to various paths. Each path can simulate a different device by publishing some value. With this, we can create a single Kubernetes pod that can simulate multiple devices. To make our scenario more realistic, we can add a discovery endpoint as well. Further, we can create a series of Kubernetes services that create facades for the various paths, giving the illusion of multiple devices and a separate discovery service. - -To that end, lets: - -1. Create a web service that mocks HTTP devices and a discovery service -1. Deploy, start, and expose our mock HTTP devices and discovery service - -## Mock HTTP devices and Discovery service -To simulate a set of discoverable HTTP devices and a discovery service, create a simple HTTP server (`samples/apps/http-apps/cmd/device/main.go`). The application will accept a list of `path` arguments, which will define endpoints that the service will respond to. These endpoints represent devices in our HTTP protocol. The application will also accept a set of `device` arguments, which will define the set of discovered devices. - -```go -package main - -import ( - "flag" - "fmt" - "log" - "math/rand" - "net" - "net/http" - "time" - "strings" -) - -const ( - addr = ":8080" -) - -// RepeatableFlag is an alias to use repeated flags with flag -type RepeatableFlag []string - -// String is a method required by flag.Value interface -func (e *RepeatableFlag) String() string { - result := strings.Join(*e, "\n") - return result -} - -// Set is a method required by flag.Value interface -func (e *RepeatableFlag) Set(value string) error { - *e = append(*e, value) - return nil -} -var _ flag.Value = (*RepeatableFlag)(nil) -var paths RepeatableFlag -var devices RepeatableFlag - -func main() { - flag.Var(&paths, "path", "Repeat this flag to add paths for the device") - flag.Var(&devices, "device", "Repeat this flag to add devices to the discovery service") - flag.Parse() - - // At a minimum, respond on `/` - if len(paths) == 0 { - paths = []string{"/"} - } - log.Printf("[main] Paths: %d", len(paths)) - - seed := rand.NewSource(time.Now().UnixNano()) - entr := rand.New(seed) - - handler := http.NewServeMux() - - // Create handler for the discovery endpoint - handler.HandleFunc("/discovery", func(w http.ResponseWriter, r *http.Request) { - log.Printf("[discovery] Handler entered") - fmt.Fprintf(w, "%s\n", html.EscapeString(devices.String())) - }) - // Create handler for each endpoint - for _, path := range paths { - log.Printf("[main] Creating handler: %s", path) - handler.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) { - log.Printf("[device] Handler entered: %s", path) - fmt.Fprint(w, entr.Float64()) - }) - } - - s := &http.Server{ - Addr: addr, - Handler: handler, - } - listen, err := net.Listen("tcp", addr) - if err != nil { - log.Fatal(err) - } - - log.Printf("[main] Starting Device: [%s]", addr) - log.Fatal(s.Serve(listen)) -} -``` - -To ensure that our GoLang project builds, we need to create `samples/apps/http-apps/go.mod`: - -``` -module github.com/deislabs/akri/http-extensibility - -go 1.15 -``` - -## Build and Deploy devices and discovery -To build and deploy the mock devices and discovery, a simple Dockerfile can be created that buidls and exposes our mock server `samples/apps/http-apps/Dockerfiles/device`: -```dockerfile -FROM golang:1.15 as build -WORKDIR /http-extensibility -COPY go.mod . -RUN go mod download -COPY . . -RUN GOOS=linux \ - go build -a -installsuffix cgo \ - -o /bin/device \ - github.com/deislabs/akri/http-extensibility/cmd/device -FROM gcr.io/distroless/base-debian10 -COPY --from=build /bin/device / -USER 999 -EXPOSE 8080 -ENTRYPOINT ["/device"] -CMD ["--path=/","--path=/sensor","--device=device:8000","--device=device:8001"] -``` - -And to deploy, use `docker build` and `docker push`: -```bash -cd ./samples/apps/http-apps - -HOST="ghcr.io" -USER=[[GITHUB-USER]] -PREFIX="http-apps" -TAGS="v1" -IMAGE="${HOST}/${USER}/${PREFIX}-device:${TAGS}" - -docker build \ - --tag=${IMAGE} \ - --file=./Dockerfiles/device \ - . -docker push ${IMAGE} -``` - -The mock devices can be deployed with a Kubernetes deployment `samples/apps/http-apps/kubernetes/device.yaml` (update **image** based on the ${IMAGE}): -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: device -spec: - replicas: 1 - selector: - matchLabels: - id: akri-http-device - template: - metadata: - labels: - id: akri-http-device - name: device - spec: - imagePullSecrets: - - name: crPullSecret - containers: - - name: device - image: IMAGE - imagePullPolicy: Always - args: - - --path=/ - - --device=http://device-1:8080 - - --device=http://device-2:8080 - - --device=http://device-3:8080 - - --device=http://device-4:8080 - - --device=http://device-5:8080 - - --device=http://device-6:8080 - - --device=http://device-7:8080 - - --device=http://device-8:8080 - - --device=http://device-9:8080 - ports: - - name: http - containerPort: 8080 -``` - -Then apply `device.yaml` to create a deployment (called `device`) and a pod (called `device-...`): - -```bash -kubectl apply --filename=./samples/apps/http-apps/kubernetes/device.yaml -``` - -> **NOTE** We're using one deployment|pod to represent 9 devices AND a discovery service ... we will create 9 (distinct) Services against it (1 for each mock device) and 1 Service to present the discovery service. - -Then create 9 mock device Services: - -```bash -for NUM in {1..9} -do - # Services are uniquely named - # The service uses the Pods port: 8080 - kubectl expose deployment/device \ - --name=device-${NUM} \ - --port=8080 \ - --target-port=8080 \ - --labels=id=akri-http-device -done -``` - -> Optional: check one the services: -> -> ```bash -> kubectl run curl -it --rm --image=curlimages/curl -- sh -> ``` -> -> Then, pick a value for `X` between 1 and 9: -> -> ```bash -> X=6 -> curl device-${X}:8080 -> ``` -> -> Any or all of these should return a (random) 'sensor' value. - -Then create a Service (called `discovery`) using the deployment: - -```bash -kubectl expose deployment/device \ ---name=discovery \ ---port=8080 \ ---target-port=8080 \ ---labels=id=akri-http-device -``` - -> Optional: check the service to confirm that it reports a list of devices correctly using: -> -> ```bash -> kubectl run curl -it --rm --image=curlimages/curl -- sh -> ``` -> -> Then, curl the service's endpoint: -> -> ```bash -> curl discovery:8080/discovery -> ``` -> -> This should return a list of 9 devices, of the form `http://device-X:8080` - - -# Where the rubber meets the road! -At this point, we've extended Akri to include discovery for our HTTP protocol and we've created an HTTP broker that can be deployed. Let's take HTTP for a spin!! - -## Deploy Akri - -> Optional: If you've previous installed Akri and wish to reset, you may: -> -> ```bash -> # Delete Akri Helm -> sudo helm delete akri -> -> # Delete Akri CRDs -> kubectl delete crd/configurations.akri.sh -> kubectl delete crd/instances.akri.sh -> ``` - -Deploy the revised (!) Helm Chart to your cluster: - -```bash -HOST="ghcr.io" -USER="[[GITHUB-USER]]" -REPO="${HOST}/${USER}" -VERS="v$(cat version.txt)-amd64" - -sudo helm install akri ./akri/deployment/helm \ - --set imagePullSecrets[0].name=crPullSecret \ - --set agent.image.repository="${REPO}/agent" \ - --set agent.image.tag="${VERS}" \ - --set controller.image.repository="${REPO}/controller" \ - --set controller.image.tag="${VERS}" -``` - -> **NOTE** the Akri SemVer (e.g. `0.0.41`) is reflected in `./version.txt` but the tags must be prefixed with `v` and postfixed with the architecture (e.g. `-amd64`) - -Check using `kubectl get pods` and look for a pod named `akri-agent-...` and another named `akri-controller...` and that they're both `RUNNING`. - -Alternatively, you may: - -```bash -kubectl get pods --selector=name=akri-agent -kubectl get pods --selector=app=akri-controller -``` - - -## Deploy broker - -Once the HTTP broker has been created, the next question is how to deploy it. For this, we need the Configuration we created earlier `samples/brokers/http/kubernetes/http.yaml`. To deploy, use a simple `kubectl` command like this: -```bash -kubectl apply --filename=./samples/brokers/http/kubernetes/http.yaml -``` - -We can watch as the broker pods get deployed: -```bash -watch kubectl get pods -o wide -``` - - -## Contributing your Protocol Implementation back to Akri -Now that you have a working protocol implementation and broker, we'd love for you to contribute your code to Akri. The following steps will need to be completed to do so: -1. Create an Issue with a feature request for this protocol. -2. Create a proposal and put in PR for it to be added to the [proposals folder](./proposals). -3. Implement your protocol and provide a full end to end sample. -4. Create a pull request, updating the minor version of akri. See [contributing](./contributing.md#versioning) to learn more about our versioning strategy. - -For a protocol to be considered fully implemented the following must be included in the PR. Note that the HTTP protocol above has not completed all of the requirements. -1. A new DiscoveryHandler implementation in the Akri agent -1. An update to the Configuration CRD to include the new `ProtocolHandler` -1. A sample protocol broker for the new resource -1. A sample Configuration that uses the new protocol in the form of a Helm template and values -1. (Optional) A sample end application that utilizes the services exposed by the Configuration -1. Dockerfile[s] for broker [and sample app] and associated update to the [makefile](../build/akri-containers.mk) -1. Github workflow[s] for broker [and sample app] to build containers and push to Akri container repository -1. Documentation on how to use the new sample Configuration, like the [udev Configuration document](./udev-configuration.md) +# Extensibility Example +This document will walk through an end-to-end example of creating Discovery Handler to discover **HTTP-based devices** +that publish random sensor data. It will also walk through how to create a custom broker to leverage the discovered +devices. Reference the [Discovery Handler development](./discovery-handler-development.md) and [broker Pod +development](./broker-development.md) documents if you prefer generic documentation over an example. + +Before continuing, you may wish to reference the [Akri architecture](./architecture.md) and [Akri +agent](./agent-in-depth.md) documentation. They will provide a good understanding of Akri, how it works, and what +components it is composed of. + +Any Docker-compatible container registry will work for hosting the containers being used in this example (Docker Hub, +Github Container Registry, Azure Container Registry, etc). Here, we are using the [GitHub Container +Registry](https://github.blog/2020-09-01-introducing-github-container-registry/). You can follow the [getting started +guide here to enable it for +yourself](https://docs.github.com/en/free-pro-team@latest/packages/getting-started-with-github-container-registry). + +> **Note:** if your container registry is private, you will need to create a kubernetes secret (`kubectl create secret +> docker-registry crPullSecret --docker-server= --docker-username= --docker-password=`) and +> access it with an `imagePullSecret`. Here, we will assume the secret is named `crPullSecret`. + +## Background on Discovery Handlers +Akri has [implemented discovery via several protocols](./roadmap.md#currently-supported-discovery-handlers) with sample +brokers and applications to demonstrate usage. However, there may be protocols you would like to use to discover +resources that have not been implemented as Discovery Handlers yet. To enable the discovery of resources via a new +protocol, you will implement a Discovery Handler (DH), which does discovery on behalf of the Agent. A Discovery Handler +is anything that implements the `Discovery` service and `Registration` client defined in the [Akri's discovery gRPC +proto file](../discovery-utils/proto/discovery.proto). These DHs run as their own Pods and are expected to register with +the Agent, which hosts the `Registration` service defined in the gRPC interface. + +## New DiscoveryHandler implementation +### Use `cargo generate` to clone the Discovery Handler template +Pull down the [Discovery Handler template](https://github.com/kate-goldenring/akri-discovery-handler-template) using +[`cargo-generate`](https://github.com/cargo-generate/cargo-generate). +```sh +cargo install cargo-generate +cargo generate --git https://github.com/kate-goldenring/akri-discovery-handler-template.git --name akri-http-discovery-handler +``` +### Specify the DiscoveryHandler name and whether discovered devices are sharable +Inside the newly created `akri-http-discovery-handler` project, navigate to `main.rs`. It contains all the logic to +register our `DiscoveryHandler` with the Akri Agent. We only need to specify the `DiscoveryHandler` name and whether the device discovered by our `DiscoveryHandler` can be shared. Set `name` equal to `"http"` and `shared` to `true`, as our HTTP Discovery Handler will discover +devices that can be shared between nodes. The protocol name also resolves to the name of the socket the Discovery +Handler will run on. + +### Decide what information is passed via an Akri Configuration +Akri's Configuration CRD takes in a [`DiscoveryHandlerInfo`](../shared/src/akri/configuration.rs), which is defined +structurally as follows: +```rust +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(rename_all = "camelCase")] +pub struct DiscoveryHandlerInfo { + pub name: String, + #[serde(default)] + pub discovery_details: String, +} +``` +When creating a Discovery Handler, you must decide what name or label to give it and add any details you would like your +Discovery Handler to receive in the `discovery_details` string. The Agent passes this string to Discovery Handlers as +part of a `DiscoverRequest`. A discovery handler must then parse this string -- Akri's built in Discovery Handlers store +an expected structure in it as serialized YAML -- to determine what to discover, filter out of discovery, and so on. In +our case, no parsing is required, as it will simply put our discovery endpoint. Our implementation will ping the +discovery service at that URL to see if there are any devices. + +Ultimately, the Discovery Handler section of our HTTP Configuration will look like the following. +```yaml +apiVersion: akri.sh/v0 +kind: Configuration +metadata: + name: http +spec: + discoveryHandler: + name: http + discoveryDetails: http://discovery:9999/discovery +``` +Now that we know what will be passed to our Discovery Handler, let's implement the discovery functionality. + +### Add discovery logic to the `DiscoveryHandler` +A `DiscoveryHandlerImpl` Struct has been created (in `discovery_handler.rs`) that minimally implements the `DiscoveryHandler` +service. Let's fill in the `discover` function, which returns the list of discovered devices. It should have all the +functionality desired for discovering devices via your protocol and filtering for only the desired set. For the HTTP +protocol, `discover` will perform an HTTP GET on the Discovery Handler's discovery service URL received in the `DiscoverRequest`. + +First, let's add the additional crates we are using to our `Cargo.toml` under dependencies. +```toml +anyhow = "1.0.38" +reqwest = "0.10.8" +``` +Now, import our dependencies and define some constants. Add the following after the other imports at the top of +`discovery_handler.rs`. +```rust +use anyhow::Error; +use reqwest::get; +use std::collections::HashMap; + +const BROKER_NAME: &str = "AKRI_HTTP"; +const DEVICE_ENDPOINT: &str = "AKRI_HTTP_DEVICE_ENDPOINT"; +``` + +Fill in your `discover` function so as to match the following. Note, `discover` creates a streamed connection with the +Agent, where the Agent gets the receiving end of the channel and the Discovery Handler sends device updates via the +sending end of the channel. If the Agent drops its end, the Discovery Handler will stop discovery and attempt to +re-register with the Agent. The Agent may drop its end due to an error or a deleted Configuration. + +```rust +#[async_trait] +impl DiscoveryHandler for DiscoveryHandlerImpl { + type DiscoverStream = DiscoverStream; + async fn discover( + &self, + request: tonic::Request, + ) -> Result, Status> { + // Get the discovery url from the `DiscoverRequest` + let url = request.get_ref().discovery_details.clone(); + // Create a channel for sending and receiving device updates + let (mut stream_sender, stream_receiver) = mpsc::channel(4); + let mut register_sender = self.register_sender.clone(); + tokio::spawn(async move { + loop { + let resp = get(&url).await.unwrap(); + // Response is a newline separated list of devices (host:port) or empty + let device_list = &resp.text().await.unwrap(); + let devices = device_list + .lines() + .map(|endpoint| { + let mut properties = HashMap::new(); + properties.insert(BROKER_NAME.to_string(), "http".to_string()); + properties.insert(DEVICE_ENDPOINT.to_string(), endpoint.to_string()); + Device { + id: endpoint.to_string(), + properties, + mounts: Vec::default(), + device_specs: Vec::default(), + } + }) + .collect::>(); + // Send the Agent the list of devices. + if let Err(_) = stream_sender.send(Ok(DiscoverResponse { devices })).await { + // Agent dropped its end of the stream. Stop discovering and signal to try to re-register. + register_sender.send(()).await.unwrap(); + break; + } + } + }); + // Send the agent one end of the channel to receive device updates + Ok(Response::new(stream_receiver)) + } +} +``` +### Build the DiscoveryHandler container +Now you are ready to build your HTTP discovery handler and push it to your container registry. To do so, we simply need +to run this step from the base folder of the Akri repo: + +```bash +HOST="ghcr.io" +USER=[[GITHUB-USER]] +DH="http-discovery-handler" +TAGS="v1" + +DH_IMAGE="${HOST}/${USER}/${DH}" +DH_IMAGE_TAGGED="${DH_IMAGE}:${TAGS}" + +docker build \ +--tag=${DH_IMAGE_TAGGED} \ +--file=./Dockerfile.discovery-handler \ +. && \ +docker push ${DH_IMAGE_TAGGED} +``` + +Save the name of your image. We will pass it into our Akri installation command when we are ready to deploy our +discovery handler. + +## Create some HTTP devices +At this point, we've extended Akri to discover devices with our HTTP Discovery Handler, and we've created an HTTP broker +that can be deployed. To really test our new discovery and brokers, we need to create something to discover. + +For this exercise, we can create an HTTP service that listens to various paths. Each path can simulate a different +device by publishing some value. With this, we can create a single Kubernetes pod that can simulate multiple devices. +To make our scenario more realistic, we can add a discovery endpoint as well. Further, we can create a series of +Kubernetes services that create facades for the various paths, giving the illusion of multiple devices and a separate +discovery service. + +To that end, let's: + +1. Create a web service that mocks HTTP devices and a discovery service +1. Deploy, start, and expose our mock HTTP devices and discovery service + +### Mock HTTP devices and Discovery service +To simulate a set of discoverable HTTP devices and a discovery service, create a simple HTTP server +(`samples/apps/http-apps/cmd/device/main.go`). The application will accept a list of `path` arguments, which will +define endpoints that the service will respond to. These endpoints represent devices in our HTTP Discovery Handler. +The application will also accept a set of `device` arguments, which will define the set of discovered devices. + +```go +package main + +import ( + "flag" + "fmt" + "log" + "math/rand" + "net" + "net/http" + "time" + "strings" +) + +const ( + addr = ":8080" +) + +// RepeatableFlag is an alias to use repeated flags with flag +type RepeatableFlag []string + +// String is a method required by flag.Value interface +func (e *RepeatableFlag) String() string { + result := strings.Join(*e, "\n") + return result +} + +// Set is a method required by flag.Value interface +func (e *RepeatableFlag) Set(value string) error { + *e = append(*e, value) + return nil +} +var _ flag.Value = (*RepeatableFlag)(nil) +var paths RepeatableFlag +var devices RepeatableFlag + +func main() { + flag.Var(&paths, "path", "Repeat this flag to add paths for the device") + flag.Var(&devices, "device", "Repeat this flag to add devices to the discovery service") + flag.Parse() + + // At a minimum, respond on `/` + if len(paths) == 0 { + paths = []string{"/"} + } + log.Printf("[main] Paths: %d", len(paths)) + + seed := rand.NewSource(time.Now().UnixNano()) + entr := rand.New(seed) + + handler := http.NewServeMux() + + // Create handler for the discovery endpoint + handler.HandleFunc("/discovery", func(w http.ResponseWriter, r *http.Request) { + log.Printf("[discovery] Handler entered") + fmt.Fprintf(w, "%s\n", html.EscapeString(devices.String())) + }) + // Create handler for each endpoint + for _, path := range paths { + log.Printf("[main] Creating handler: %s", path) + handler.HandleFunc(path, func(w http.ResponseWriter, r *http.Request) { + log.Printf("[device] Handler entered: %s", path) + fmt.Fprint(w, entr.Float64()) + }) + } + + s := &http.Server{ + Addr: addr, + Handler: handler, + } + listen, err := net.Listen("tcp", addr) + if err != nil { + log.Fatal(err) + } + + log.Printf("[main] Starting Device: [%s]", addr) + log.Fatal(s.Serve(listen)) +} +``` + +To ensure that our GoLang project builds, we need to create `samples/apps/http-apps/go.mod`: + +``` +module github.com/deislabs/akri/http-extensibility + +go 1.15 +``` + +### Build and Deploy devices and discovery +To build and deploy the mock devices and discovery, a simple Dockerfile can be created that builds and exposes our mock +server `samples/apps/http-apps/Dockerfiles/device`: +```dockerfile +FROM golang:1.15 as build +WORKDIR /http-extensibility +COPY go.mod . +RUN go mod download +COPY . . +RUN GOOS=linux \ + go build -a -installsuffix cgo \ + -o /bin/device \ + github.com/deislabs/akri/http-extensibility/cmd/device +FROM gcr.io/distroless/base-debian10 +COPY --from=build /bin/device / +USER 999 +EXPOSE 8080 +ENTRYPOINT ["/device"] +CMD ["--path=/","--path=/sensor","--device=device:8000","--device=device:8001"] +``` + +And to deploy, use `docker build` and `docker push`: +```bash +cd ./samples/apps/http-apps + +HOST="ghcr.io" +USER=[[GITHUB-USER]] +PREFIX="http-apps" +TAGS="v1" +IMAGE="${HOST}/${USER}/${PREFIX}-device:${TAGS}" + +docker build \ + --tag=${IMAGE} \ + --file=./Dockerfiles/device \ + . +docker push ${IMAGE} +``` + +The mock devices can be deployed with a Kubernetes deployment `samples/apps/http-apps/kubernetes/device.yaml` (update +**image** based on the ${IMAGE}): +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: device +spec: + replicas: 1 + selector: + matchLabels: + id: akri-http-device + template: + metadata: + labels: + id: akri-http-device + name: device + spec: + imagePullSecrets: + - name: crPullSecret + containers: + - name: device + image: IMAGE + imagePullPolicy: Always + args: + - --path=/ + - --device=http://device-1:8080 + - --device=http://device-2:8080 + - --device=http://device-3:8080 + - --device=http://device-4:8080 + - --device=http://device-5:8080 + - --device=http://device-6:8080 + - --device=http://device-7:8080 + - --device=http://device-8:8080 + - --device=http://device-9:8080 + ports: + - name: http + containerPort: 8080 +``` + +Then apply `device.yaml` to create a deployment (called `device`) and a pod (called `device-...`): + +```bash +kubectl apply --filename=./samples/apps/http-apps/kubernetes/device.yaml +``` + +> **NOTE** We're using one deployment|pod to represent 9 devices AND a discovery service ... we will create 9 (distinct) +> Services against it (1 for each mock device) and 1 Service to present the discovery service. + +Then create 9 mock device Services: + +```bash +for NUM in {1..9} +do + # Services are uniquely named + # The service uses the Pods port: 8080 + kubectl expose deployment/device \ + --name=device-${NUM} \ + --port=8080 \ + --target-port=8080 \ + --labels=id=akri-http-device +done +``` + +> Optional: check one the services: +> +> ```bash +> kubectl run curl -it --rm --image=curlimages/curl -- sh +> ``` +> +> Then, pick a value for `X` between 1 and 9: +> +> ```bash +> X=6 +> curl device-${X}:8080 +> ``` +> +> Any or all of these should return a (random) 'sensor' value. + +Then create a Service (called `discovery`) using the deployment: + +```bash +kubectl expose deployment/device \ +--name=discovery \ +--port=8080 \ +--target-port=8080 \ +--labels=id=akri-http-device +``` + +> Optional: check the service to confirm that it reports a list of devices correctly using: +> +> ```bash +> kubectl run curl -it --rm --image=curlimages/curl -- sh +> ``` +> +> Then, curl the service's endpoint: +> +> ```bash +> curl discovery:8080/discovery +> ``` +> +> This should return a list of 9 devices, of the form `http://device-X:8080` + +## Deploy Akri +Now that we have created a HTTP Discovery Handler and created some mock devices, let's deploy Akri and see how it +discovers the devices and creates Akri Instances for each Device. + +> Optional: If you've previous installed Akri and wish to reset, you may: +> +> ```bash +> # Delete Akri Helm +> sudo helm delete akri +> ``` + +Akri has provided helm templates for custom Discovery Handlers and their Configurations. These templates are provided as +a starting point. They may need to be modified to meet the needs of a Discovery Handler. When installing Akri, specify that +you want to deploy a custom Discovery Handler as a DaemonSet by setting `custom.discovery.enabled=true`. +Specify the container for that DaemonSet as the HTTP discovery handler that you built +[above](###build-the-discoveryhandler-container) by setting `custom.discovery.image.repository=$DH_IMAGE` and `custom.discovery.image.repository=$TAGS`. To +automatically deploy a custom Configuration, set `custom.configuration.enabled=true`. We will customize this Configuration to +contain the discovery endpoint needed by our HTTP Discovery Handler by setting it in the `discovery_details` string of +the Configuration, like so: `custom.configuration.discoveryDetails=http://discovery:9999/discovery`. We also need to set the +name the Discovery Handler will register under (`custom.configuration.discoveryHandlerName`) and a name for the +Discovery Handler and Configuration (`custom.discovery.name` and `custom.configuration.name`). All these settings come together as the following Akri +installation command: +> Note: Be sure to consult the [user guide](./user-guide.md) to see whether your Kubernetes distribution needs any +> additional configuration. +```bash + helm repo add akri-helm-charts https://deislabs.github.io/akri/ + helm install akri akri-helm-charts/akri \ + --set imagePullSecrets[0].name="crPullSecret" \ + --set custom.discovery.enabled=true \ + --set custom.discovery.image.repository=$DH_IMAGE \ + --set custom.discovery.image.tag=$TAGS \ + --set custom.discovery.name=akri-http-discovery \ + --set custom.configuration.enabled=true \ + --set custom.configuration.name=akri-http \ + --set custom.configuration.discoveryHandlerName=http \ + --set custom.configuration.discoveryDetails=http://discovery:9999/discovery + ``` + +Watch as the Agent, Controller, and Discovery Handler Pods are spun up and as Instances are created for each of the +discovery devices. +```bash +watch kubectl get pods,akrii +``` + +If you simply wanted Akri to expose discovered devices to the cluster as Kubernetes resources, you could stop here. If +you have a workload that could utilize one of these resources, you could [manually deploy pods that request them as +resources](./requesting-akri-resources.md). Alternatively, you could have Akri automatically deploy workloads to +discovered devices. We call these workloads brokers. To quickly see this, lets deploy empty nginx pods to discovered +resources, by updating our Configuration to include a broker PodSpec. +```bash + helm upgrade akri akri-helm-charts/akri \ + --set imagePullSecrets[0].name="crPullSecret" \ + --set custom.discovery.enabled=true \ + --set custom.discovery.image.repository=$DH_IMAGE \ + --set custom.discovery.image.tag=$TAGS \ + --set custom.discovery.name=akri-http-discovery \ + --set custom.configuration.enabled=true \ + --set custom.configuration.name=akri-http \ + --set custom.configuration.discoveryHandlerName=http \ + --set custom.configuration.discoveryDetails=http://discovery:9999/discovery \ + --set custom.brokerPod.image.repository=nginx + watch kubectl get pods,akrii +``` +Our empty nginx brokers do not do anything with the devices they've requested, so lets create our own broker. + +## Create a sample broker +We have successfully created our Discovery Handler. If you want Akri to also automatically deploy Pods (called brokers) +to each discovered device, this section will show you how to create a custom broker that will make the HTTP-based Device +data available to the cluster. The broker can be written in any language as it will be deployed as an individual pod. + +3 different broker implementations have been created for the HTTP Discovery Handler in the [http-extensibility +branch](https://github.com/deislabs/akri/tree/http-extensibility), 2 in Rust and 1 in Go: +* The standalone broker is a self-contained scenario that demonstrates the ability to interact with HTTP-based devices + by `curl`ing a device's endpoints. This type of solution would be applicable in batch-like scenarios where the broker + performs a predictable set of processing steps for a device. +* The second scenario uses gRPC. gRPC is an increasingly common alternative to REST-like APIs and supports + high-throughput and streaming methods. gRPC is not a requirement for broker implementations in Akri but is used here + as one of many mechanisms that may be used. The gRPC-based broker has a companion client. This is a more realistic + scenario in which the broker proxies client requests using gRPC to HTTP-based devices. The advantage of this approach + is that device functionality is encapsulated by an API that is exposed by the broker. In this case the API has a + single method but in practice, there could be many methods implemented. +* The third implementation is a gRPC-based broker and companion client implemented in Golang. This is functionally + equivalent to the Rust implementation and shares a protobuf definition. For this reason, you may combine the Rust + broker and client with the Golang broker and client arbitrarily. The Golang broker is described in the + [`http-apps`](https://github.com/deislabs/akri/blob/http-extensibility/samples/apps/http-apps/README.md) directory. + +For this, we will describe the first option, a standalone broker. For a more detailed look at the other gRPC options, +please look at [extensibility-http-grpc.md in the http-extensibility +branch](https://github.com/deislabs/akri/blob/http-extensibility/docs/extensibility-http-grpc.md). + +First, let's create a new Rust project for our sample broker. We can use cargo to create our project by navigating to +`samples/brokers` and running: + +```bash +cargo new http +``` + +Once the http project has been created, it can be added to the greater Akri project by adding `"samples/brokers/http"` +to the **members** in `./Cargo.toml`. + +To access the HTTP-based Device data, we first need to retrieve the discovery information. Any information stored in +the `Device` properties map will be transferred into the broker container's environment variables. Retrieving them is +simply a matter of querying environment variables like this: + +```rust +let device_url = env::var("AKRI_HTTP_DEVICE_ENDPOINT")?; +``` + +For our HTTP broker, the data can be retrieved with a simple GET: + +```rust +async fn read_sensor(device_url: &str) { + match get(device_url).await { + Ok(resp) => { + let body = resp.text().await; + } + Err(err) => println!("Error: {:?}", err), + }; +} +``` + +We can tie all the pieces together in `samples/brokers/http/src/main.rs`. We retrieve the HTTP-based Device url from +the environment variables, make a simple GET request to retrieve the device data, and output the response to the log: + +```rust +use reqwest::get; +use std::env; +use tokio::{time, time::Duration}; + +const DEVICE_ENDPOINT: &str = "AKRI_HTTP_DEVICE_ENDPOINT"; + +async fn read_sensor(device_url: &str) { + match get(device_url).await { + Ok(resp) => { + let body = resp.text().await; + println!("[main:read_sensor] Response body: {:?}", body); + } + Err(err) => println!("Error: {:?}", err), + }; +} +#[tokio::main] +async fn main() -> Result<(), Box> { + let device_url = env::var(DEVICE_ENDPOINT)?; + let mut tasks = Vec::new(); + tasks.push(tokio::spawn(async move { + loop { + time::delay_for(Duration::from_secs(10)).await; + read_sensor(&device_url[..]).await; + } + })); + futures::future::join_all(tasks).await; + Ok(()) +} +``` + +and ensure that we have the required dependencies in `samples/brokers/http/Cargo.toml`: + +```toml +[[bin]] +name = "standalone" +path = "src/main.rs" + +[dependencies] +futures = "0.3" +reqwest = "0.10.8" +tokio = { version = "0.2", features = ["rt-threaded", "time", "stream", "fs", "macros", "uds"] } +``` + +To build the HTTP broker, we need to create a Dockerfile, `samples/brokers/http/Dockerfiles/standalone`: + +```dockerfile +FROM amd64/rust:1.47 as build +RUN rustup component add rustfmt --toolchain 1.47.0-x86_64-unknown-linux-gnu +RUN USER=root cargo new --bin http +WORKDIR /http + +COPY ./samples/brokers/http/Cargo.toml ./Cargo.toml +RUN cargo build \ + --bin=standalone \ + --release +RUN rm ./src/*.rs +RUN rm ./target/release/deps/standalone* +COPY ./samples/brokers/http . +RUN cargo build \ + --bin=standalone \ + --release + +FROM amd64/debian:buster-slim +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + ca-certificates \ + libssl-dev \ + openssl && \ + apt-get clean + +COPY --from=build /http/target/release/standalone /standalone +LABEL org.opencontainers.image.source https://github.com/deislabs/akri +ENV SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt +ENV SSL_CERT_DIR=/etc/ssl/certs +ENV RUST_LOG standalone + +ENTRYPOINT ["/standalone"] +``` + +Akri's `.dockerignore` is configured so that docker will ignore most files in our repository, some exceptions will need +to be added to build the HTTP broker: + +```console +!samples/brokers/http +``` + +Now you are ready to **build the HTTP broker**! To do so, we simply need to run this step from the base folder of the +Akri repo: + +```bash +HOST="ghcr.io" +USER=[[GITHUB-USER]] +BROKER="http-broker" +TAGS="v1" + +BROKER_IMAGE="${HOST}/${USER}/${BROKER}" +BROKER_IMAGE_TAGGED="${BROKER_IMAGE}:${TAGS}" + +docker build \ +--tag=${BROKER_IMAGE_TAGGED} \ +--file=./samples/brokers/http/Dockerfiles/standalone \ +. && \ +docker push ${BROKER_IMAGE_TAGGED} +``` + +## Deploy broker + +Now that the HTTP broker has been created, we can substitute it's image in for the simple nginx broker we previously +used in our installation command. +```bash + helm upgrade akri akri-helm-charts/akri \ + --set imagePullSecrets[0].name="crPullSecret" \ + --set custom.discovery.enabled=true \ + --set custom.discovery.image.repository=$DH_IMAGE \ + --set custom.discovery.image.tag=$TAGS \ + --set custom.discovery.name=akri-http-discovery \ + --set custom.configuration.enabled=true \ + --set custom.configuration.name=akri-http \ + --set custom.configuration.discoveryHandlerName=http \ + --set custom.configuration.discoveryDetails=http://discovery:9999/discovery \ + --set custom.configuration.brokerPod.image.repository=$BROKER_IMAGE \ + --set custom.configuration.brokerPod.image.tag=$TAGS + watch kubectl get pods,akrii +``` +> Note: substitute `helm upgrade` for `helm install` if you do not have an existing Akri installation + +We can watch as the broker pods get deployed: +```bash +watch kubectl get pods -o wide +``` \ No newline at end of file diff --git a/docs/media/akri-architecture.svg b/docs/media/akri-architecture.svg index 6b931e0c4..6b88a9e42 100644 --- a/docs/media/akri-architecture.svg +++ b/docs/media/akri-architecture.svg @@ -1,678 +1,406 @@ - + - - - - - - - + width="10.7038in" height="6.24177in" viewBox="0 0 770.672 449.407" xml:space="preserve" color-interpolation-filters="sRGB" + class="st32"> - - - - - single cluster architecture (with camera) - - - Sheet.1001 - - - - Sheet.1003 + + Page-7 + + Sheet.1000 Edge Cluster - - - - Edge Cluster - - Sheet.1004 + + Edge Cluster + + Sheet.1001 Control Plane - - - - Control Plane - - Sheet.1005 + + Control Plane + + Sheet.1002 Kubernetes Scheduler - - - KubernetesScheduler - - Sheet.1006 + class="st5"/> + Kubernetes Scheduler + + Sheet.1003 Akri Controller - - - AkriController - - Sheet.1007 + class="st8"/> + Akri Controller + + Sheet.1004 + Z" class="st9"/> - - Sheet.1008 + + Sheet.1005 API Server - - - - API Server - - Sheet.1009 + + API Server + + Sheet.1006 + 430.53 L0 430.53 Z" class="st12"/> - - Sheet.1010 + + Sheet.1007 + 430.53 L0 430.53" class="st13"/> - - Sheet.1011 + + Sheet.1008 + 430.53 L0 430.53 Z" class="st12"/> - - Sheet.1012 + + Sheet.1009 - - - Sheet.1013 - + 430.53 L0 430.53" class="st14"/> - - Sheet.1014 - Worker - - - - Worker - - Sheet.1015 + + Sheet.1010 + Node + + Node + + Sheet.1011 + class="st5"/> - - Sheet.1016 + + Sheet.1012 Kubelet - - - - Kubelet - - Sheet.1017 - - - - Sheet.1018 - Akri - - - - Akri - - Sheet.1019 - Agent - - - - Agent - - Sheet.1020 - <protocolA> - - + + Kubelet + + Sheet.1013 + Akri Agent + + Akri Agent + + Sheet.1016 + <protocol> - <protocolA> - - Sheet.1021 + L23.82 439.28 L23.82 449.41 L0 429.16 Z" class="st16"/> + <protocol> + + Sheet.1017 + L4.5 374.99 L0 374.99 Z" class="st12"/> - - Sheet.1022 + + Sheet.1018 + L4.5 374.99 L0 374.99" class="st14"/> - - Sheet.1046 + + Sheet.1019 + L4.53 375.05 L0 375.05 Z" class="st16"/> - + Sheet.1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + - - Sheet.1047 - kind: Configuration metadata:   name: akri-<protocolA> spec: ... - - - - kind: Configurationmetadata: name: akri<protocolA>spec: protocol: <protocolA> brokerPodSpec: containers: - name: custom-broker image: "ghcr.io/…" - - Sheet.1048 + + Sheet.1020 + kind: Configuration metadata: ..name: akri-<protocol> spec: .... + + kind: Configuration metadata: ..name: akri-<protocol> spec: ..discoveryHandler: …..name: <protocol> ..brokerPodSpec: …..containers: …..- name: custom-broker ……..image: "ghcr.io/…" + + Sheet.1021 + 438.07 L4.71 426.65 L0 426.65 Z" class="st16"/> - - - - + Can.1091 etcd - - Sheet.1050 - - - - - - + + Sheet.1023 - - - + - etcd + etcd - - - - + 1-D single.1004 - - Sheet.1056 + + Sheet.1025 - - Sheet.1057 + + Sheet.1026 - - Sheet.1058 + + Sheet.1027 - - Wavy Box.1020 - Broker - - - - Broker - - Wavy Box.1019 - Broker - - - - Broker - - Wavy Box.1003 - custom-broker - - - - custom-broker - - Sheet.1063 + + Sheet.1031 + + Rectangle.1066 + Configuration CRD + + Configuration CRD + + Rectangle.1067 + Instance CRD + + Instance CRD + + Rectangle.1068 + <protocol> Configuration + + <protocol> Configuration + + Rectangle.1069 + <protocol> Instance + + <protocol> Instance + + + Sheet.1036 Leaf Device - - - - Leaf Device - - Sheet.1064 + + Leaf Device + + Sheet.1037 Leaf Device - - - - Leaf Device - - Sheet.1065 + + Leaf Device + + Sheet.1038 Leaf Device - - - - Leaf Device - + + Leaf Device + Sheet.2 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + - + Sheet.3 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + - + Sheet.4 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + - - - + + + - - Sheet.1066 - - Rectangle.1066 - Configuration CRD - - - - - - - ConfigurationCRD - - Rectangle.1067 - Instance CRD - - - - - - - Instance CRD - - Rectangle.1068 - <protocolA> Configuration - - - - - - - <protocolA> Configuration - - Rectangle.1069 - <protocolA> Instance - - - - - - - <protocolA> Instance + + Sheet.1039 + + Wavy Box.1020 + Broker + + Broker + + Wavy Box.1019 + Broker + + Broker + + Wavy Box.1003 + custom-broker + + custom-broker + + + Sheet.1040 + <protocol> Discovery Handler + + <protocol> Discovery Handler + + Sheet.1041 + diff --git a/docs/media/onvif-flow.svg b/docs/media/onvif-flow.svg index 0a33340ef..25b4e392d 100644 --- a/docs/media/onvif-flow.svg +++ b/docs/media/onvif-flow.svg @@ -3,7 +3,7 @@ + viewBox="0 0 678 399.187" xml:space="preserve" color-interpolation-filters="sRGB" class="st24"> @@ -32,10 +32,11 @@ .st17 {fill:#feffff;font-family:Calibri;font-size:1.00001em} .st18 {fill:#feffff;font-family:Calibri;font-size:0.666664em} .st19 {fill:#2b74ef;font-size:1em} - .st20 {marker-end:url(#mrkr4-141);stroke:#444a6d;stroke-linecap:round;stroke-linejoin:round;stroke-width:1.5} - .st21 {marker-end:url(#mrkr4-156);stroke:#444a6d;stroke-linecap:round;stroke-linejoin:round;stroke-width:1} - .st22 {fill:#444a6d;fill-opacity:1;stroke:#444a6d;stroke-opacity:1;stroke-width:0.28409090909091} - .st23 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3} + .st20 {fill:#ebedf2;font-size:1em} + .st21 {marker-end:url(#mrkr4-141);stroke:#444a6d;stroke-linecap:round;stroke-linejoin:round;stroke-width:1.5} + .st22 {marker-end:url(#mrkr4-156);stroke:#444a6d;stroke-linecap:round;stroke-linejoin:round;stroke-width:1} + .st23 {fill:#444a6d;fill-opacity:1;stroke:#444a6d;stroke-opacity:1;stroke-width:0.28409090909091} + .st24 {fill:none;fill-rule:evenodd;font-size:12px;overflow:visible;stroke-linecap:square;stroke-miterlimit:3} ]]> @@ -51,12 +52,12 @@ markerUnits="strokeWidth" overflow="visible"> - - + @@ -207,7 +208,7 @@ Sheet.1092 - + @@ -310,31 +311,30 @@ - - + + - - + + - + - - + + - + - + - + - + @@ -420,31 +420,30 @@ - - + + - - + + - + - - + + - + - + - + - + @@ -508,31 +507,30 @@ - - + + - - + + - + - - + + - + - + - + - + @@ -591,30 +589,30 @@ - - + + - - + + - + - - + + - + - + - + - @@ -678,19 +676,20 @@ ONVIF Configuration Service Sheet.1133 - kind: Configuration metadata:   name: akri-onvif-video spec:... + kind: Configuration metadata: ..name: akri-onvif-video spec:... kind: Configurationmetadata: name: metadata:..name: akri-onvif-videospec: protocol: onvif: brokerPodSpec: containers: - name: akri-onvif-video-broker image: "ghcr.io/…/onvif-video-broker" + x="4" dy="1.2em" class="st20">..discoveryHandler:….name: onvif..brokerPodSpec:….containers:….- name: akri-onvif-video-broker…...image: "ghcr.io/…/onvif-video-broker" Dynamic connector.1134 Dynamic connector.1146 - + Dynamic connector.1147 - + Rounded Rectangle.1148 @@ -827,7 +826,7 @@ Dynamic connector.1149 + 442.46 130.12 455.02 C130.12 460.15 130.12 466.06 132.63 469.52 L132.96 469.68" class="st22"/> diff --git a/docs/media/setting-broker-environment-variables.svg b/docs/media/setting-broker-environment-variables.svg new file mode 100644 index 000000000..b8a52ac9f --- /dev/null +++ b/docs/media/setting-broker-environment-variables.svg @@ -0,0 +1,239 @@ + + + + + + + + + + + + + + + + + + + + + Page-1 + + + + Rectangle.2011 + NodeA + + + + + + + NodeA + + Sheet.2001 + kind: Configuration metadata: …name: akri-udev-video spec: …d... + + + + kind: Configurationmetadata:name: akri-udev-videospec:discoveryHandler: …..name: udev…..discoveryDetails:|+……..udevRules:……..- 'KERNEL=="video[0-9]*"'capacity: 3 brokerPodSpec:…..containers:…..- name: camera-broker……..image: "ghcr.io/…"brokerProperties:…..FRAMES_PER_SECOND: "10"…..RESOLUTION_WIDTH: “640"…..RESOLUTION_HEIGHT: “480” + + Sheet.2002 + kind: Instance metadata: …name: akri-udev-video-ffffff spec: ... + + + + kind: Instancemetadata:name: akri-udev-video-ffffffspec:configurationName: akri-udev-videoshared: falsenodes:- "NodeA"deviceUsage:…..akri-udev-video-ffffff-0: "NodeA"…..akri-udev-video-ffffff-1: ""…..akri-udev-video-ffffff-2: ""brokerProperties:…..UDEV_DEVNODE: /dev/video0…..FRAMES_PER_SECOND: "12"…..RESOLUTION_HEIGHT: "485"…..RESOLUTION_WIDTH: "645" + + Sheet.1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Dynamic connector.2004 + USB + + + + + USB + + Square.2005 + Udev Discovery Handler + + + + + + + Udev Discovery Handler + + Dynamic connector.2007 + + + + Square.2008 + camera-broker + + + + + + + camera-broker + + Dynamic connector.2009 + + + + Dynamic connector.2006 + + + + Dynamic connector.2010 + + + + diff --git a/docs/onvif-configuration.md b/docs/onvif-configuration.md index 701969bd5..c60b27a4d 100644 --- a/docs/onvif-configuration.md +++ b/docs/onvif-configuration.md @@ -1,104 +1,122 @@ -# Using the ONVIF Discovery Protocol in a Configuration +# Configuring Akri to Discover Devices via ONVIF ## Background -ONVIF is a standard used by many IP cameras and defines discovery and access for RTSP camera streams. Along with a protocol implementation for ONVIF, Akri has provided a generic ONVIF Configuration. Akri has also provided a sample broker (`akri-onvif-video-broker`), which acts as a frame server. +ONVIF is a standard used by many IP cameras and defines discovery and access for RTSP camera streams. Along with an ONVIF Discovery Handler, Akri has provided a generic ONVIF Configuration for initiating ONVIF discovery. Akri has also provided a sample broker (`akri-onvif-video-broker`), which acts as a frame server. Using Akri's default ONVIF Configuration to discover and utilize ONVIF cameras looks like the following: Akri ONVIF Flow 1. An operator applies the ONVIF Configuration to the cluster (by enabling ONVIF when installing the Akri Helm chart). They also specific a broker image -- `akri-onvif-video-broker` in the figure. -1. The Akri Agent uses the ONVIF protocol to discover the IP cameras and creates Instances for each discovered camera. +1. The Akri Agent uses the ONVIF Discovery Handler to discover the IP cameras and creates Instances for each discovered camera. 1. The Akri Controller sees the Instances and deploys `akri-onvif-video-broker` pods, which were specified in the Configuration. The Controller also creates a Kubernetes service for each ONVIF camera along with one service for all the ONVIF cameras. -## Usage -To use the default ONVIF Configuration in your Akri-enabled cluster, you simply set `onvif.enabled=true` when installing the Akri Helm chart. If you would like broker pods to be deployed automatically to discovered cameras, set `udev.brokerPod.image.repository` to point to your broker image. Alternatively, if it meets your scenario, you could use the Akri frame server broker as done below. If you would rather manually deploy pods to utilize the cameras advertized by Akri, don't specify a broker pod and see our documentation on [requesting resources advertized by Akri](./requesting-akri-resources.md). More information about the Akri Helm charts can be found in the [user guide](./user-guide.md#understanding-akri-helm-charts). +All of Akri's components can be deployed by specifying values in its Helm chart during an installation. This section will cover the values that should be set to (1) deploy the ONVIF Discovery Handlers and (2) apply a Configuration that tells Akri to discover devices using that Discovery Handler. +## Deploying the ONVIF Discovery Handler +In order for the Agent to know how to discover IP cameras an ONVIF Discovery Handler must exist. Akri supports an Agent image that includes all supported Discovery Handlers. This Agent will be used if `agent.full=true`. By default, a slim Agent without any embedded Discovery Handlers is deployed and the required Discovery Handlers can be deployed as DaemonSets. This documentation will use that strategy, deploying ONVIF Discovery Handlers by specifying `onvif.discovery.enabled=true` when installing Akri. +## ONVIF Configuration Settings +Instead of having to assemble your own ONVIF Configuration yaml, we have provided a [Helm +template](../deployment/helm/templates/onvif-configuration.yaml). Helm allows us to parametrize the commonly modified fields in our configuration files, and we have provided many for ONVIF (to see +them, run `helm inspect values akri-helm-charts/akri`). More information about the Akri Helm charts can be found in the [user guide](./user-guide.md#understanding-akri-helm-charts). +To apply the ONVIF Configuration to your cluster, simply set `onvif.configuration.enabled=true` along with any of the following additional Configuration settings when installing Akri. +### Discovery Handler Discovery Details Settings +Discovery Handlers are passed discovery details that are set in a Configuration to determine what to discover, filter out of discovery, and so on. +The ONVIF Discovery Handler, supports the following discovery details: +| Helm Key | Value | Default | Description | +|---|---|---|---| +| onvif.configuration.discoveryDetails.ipAddresses.action | Include, Exclude | Exclude | filter action to take on a set of IP addresses | +| onvif.configuration.discoveryDetails.ipAddresses.items | array of IP addresses | empty | IP addresses that the filter action acts upon | +| onvif.configuration.discoveryDetails.macAddresses.action | Include, Exclude | Exclude | filter action to take on a set of mac addresses | +| onvif.configuration.discoveryDetails.macAddresses.items | array of mac addresses | empty | mac addresses that the filter action acts upon | +| onvif.configuration.discoveryDetails.scope.action | Include, Exclude | Exclude | filter action to take on a set of scopes | +| onvif.configuration.discoveryDetails.scope.items | array of scopes | empty | scopes that the filter action acts upon | +| onvif.configuration.discoveryDetails.discoveryTimeoutSeconds | number of seconds | 1 | max amount of time the Discovery Handler should search before reporting any (newly) discovered devices | +### Broker Pod Settings +If you would like workloads ("broker" Pods) to be deployed automatically to discovered cameras, a broker image should be specified in the Configuration. Alternatively, if it meets your scenario, you could use the Akri frame server broker ("ghcr.io/deislabs/akri/onvif-video-broker"). If you would rather manually deploy pods to utilize the cameras advertized by Akri, don't specify a broker pod and see our documentation on [requesting resources advertized by Akri](./requesting-akri-resources.md). +| Helm Key | Value | Default | Description | +|---|---|---|---| +| onvif.configuration.brokerPod.image.repository | image string | "" | image of broker Pod that should be deployed to discovered devices | +| onvif.configuration.brokerPod.image.tag | tag string | "latest" | image tag of broker Pod that should be deployed to discovered devices | + +### Disabling Automatic Service Creation +By default, if a broker Pod is specified, the generic ONVIF Configuration will create services for all the brokers of a specific Akri Instance and all the brokers of an Akri Configuration. The creation of these services can be disabled. +| Helm Key | Value | Default | Description | +|---|---|---|---| +| onvif.configuration.createInstanceServices | true, false | true | a service should be automatically created for each broker Pod | +| onvif.configuration.createConfigurationService | true, false | true | a single service should be created for all brokers of a Configuration | + +### Capacity Setting +By default, if a broker Pod is specified, a single broker Pod is deployed to each device. To modify the Configuration so that a camera is accessed by more or fewer nodes via broker Pods, update the `onvif.configuration.capacity` setting to reflect the correct number. For example, if your high availability needs are met by having 1 redundant +pod, you can update the Configuration like this by setting `onvif.configuration.capacity=2`. +| Helm Key | Value | Default | Description | +|---|---|---|---| +| onvif.configuration.capacity | number | 1 | maximum number of brokers that can be deployed to utilize a device (up to 1 per Node) | + +### Installing Akri with the ONVIF Configuration and Discovery Handler +Leveraging the above settings, Akri can be installed with the ONVIF Discovery Handler and an ONVIF Configuration that specifies the Akri frame server broker: ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set onvif.enabled=true \ - --set onvif.brokerPod.image.repository="ghcr.io/deislabs/akri/onvif-video-broker:latest-dev" + --set onvif.discovery.enabled=true \ + --set onvif.configuration.enabled=true \ + --set onvif.configuration.brokerPod.image.repository="ghcr.io/deislabs/akri/onvif-video-broker" \ + --set onvif.configuration.brokerPod.image.tag="latest" ``` -The default Configuration will find any ONVIF camera and will deploy up to one broker pod to each camera, since `capacity` defaults to one. The brokers will supply the automatically created Instance Services and the Configuration Service with frames. +No discovery details were provided to filter the Discovery Handler's search so the Discovery Handler will find any ONVIF camera on the network and will deploy up to one broker pod to each camera, since `capacity` defaults to one. The brokers will supply the automatically created Instance Services and the Configuration Service with frames. -The ONVIF Configuration can be tailored to your cluster by: +The following installation examples have been given to show how to the ONVIF Configuration can be tailored to you cluster: * Filtering ONVIF cameras * Changing the discovery timeout -* Changing the capacity -* Disabling automatic service creation -* Modifying the broker PodSpec (See [Customizing Akri - Installation](./customizing-akri-installation.md#modifying-the-brokerpodspec)) -* Modifying instanceServiceSpec or configurationServiceSpec (See [Customizing Akri - Installation](./customizing-akri-installation.md#modifying-instanceservicespec-or-configurationservicespec)) - -### Filtering ONVIF cameras -To ensure that this Configuration only describes certain cameras, a basic filter capability has been provided. This -will allow you to either include or exclude specific IP addresses, MAC addresses, or ONVIF scopes. - -For example, you can enable cluster access for every camera that does not have an IP address of 10.0.0.1 by using this: -```bash -helm repo add akri-helm-charts https://deislabs.github.io/akri/ -helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set onvif.enabled=true \ - --set onvif.brokerPod.image.repository="ghcr.io/deislabs/akri/onvif-video-broker:latest-dev" \ - --set onvif.ipAddresses.action=Exclude \ - --set onvif.ipAddresses.items[0]=10.0.0.1 -``` -You can enable cluster access for every camera with a specific name, you can modify the Configuration like so: +#### Filtering ONVIF cameras +The ONVIF Discovery Handler supports basic filter capabilities has been provided. Discovery details can be set in the Configuration that tell the Discovery Handler to either include or exclude specific IP addresses, MAC addresses, or ONVIF scopes. + +For example, the following enables discovery of every camera that does not have an IP address of 10.0.0.1: ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set onvif.enabled=true \ - --set onvif.brokerPod.image.repository="ghcr.io/deislabs/akri/onvif-video-broker:latest-dev" \ - --set onvif.scopes.action=Include \ - --set onvif.scopes.items[0]="onvif://www.onvif.org/name/GreatONVIFCamera" \ - --set onvif.scopes.items[1]="onvif://www.onvif.org/name/AwesomeONVIFCamera" + --set onvif.discovery.enabled=true \ + --set onvif.configuration.enabled=true \ + --set onvif.configuration.brokerPod.image.repository="ghcr.io/deislabs/akri/onvif-video-broker" \ + --set onvif.configuration.discoveryDetails.ipAddresses.action=Exclude \ + --set onvif.configuration.discoveryDetails.ipAddresses.items[0]=10.0.0.1 ``` -### Changing the discovery timeout -The ONVIF protocol will search for up to `discoveryTimeoutSeconds` for IP cameras. This timeout can be increased or -decreased as desired, and defaults to 1 second if left unconfigured. It can be set in the Configuration like this: +You can enable cluster access for every camera with a specific name, you can modify the Configuration like so: ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set onvif.enabled=true \ - --set onvif.brokerPod.image.repository="ghcr.io/deislabs/akri/onvif-video-broker:latest-dev" \ - --set onvif.discoveryTimeoutSeconds=2 + --set onvif.discovery.enabled=true \ + --set onvif.configuration.enabled=true \ + --set onvif.configuration.brokerPod.image.repository="ghcr.io/deislabs/akri/onvif-video-broker" \ + --set onvif.configuration.discoveryDetails.scopes.action=Include \ + --set onvif.configuration.discoveryDetails.scopes.items[0]="onvif://www.onvif.configuration.org/name/GreatONVIFCamera" \ + --set onvif.configuration.discoveryDetails.scopes.items[1]="onvif://www.onvif.configuration.org/name/AwesomeONVIFCamera" ``` -### Changing the capacity -To modify the Configuration so that a camera is accessed by more or fewer protocol broker Pods, update the `capacity` -property to reflect the correct number. For example, if your high availability needs are met by having only 1 redundant -pod, you can update the Configuration like this: +#### Changing the discovery timeout +The ONVIF Discovery Handler will search for up to `discoveryTimeoutSeconds` for IP cameras. This timeout can be increased or +decreased as desired, and defaults to 1 second if not configured. It can be set in the Configuration like this: ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set onvif.enabled=true \ - --set onvif.brokerPod.image.repository="ghcr.io/deislabs/akri/onvif-video-broker:latest-dev" \ - --set onvif.capacity=2 + --set onvif.discovery.enabled=true \ + --set onvif.configuration.enabled=true \ + --set onvif.configuration.brokerPod.image.repository="ghcr.io/deislabs/akri/onvif-video-broker" \ + --set onvif.configuration.discoveryDetails.discoveryTimeoutSeconds=2 ``` -## Disabling automatic service creation -By default, the generic ONVIF Configuration will create services for all the brokers of a specific Akri Instance and all the brokers of an Akri Configuration. Disable the create of Instance level services and Configuration level services by setting `--set onvif.createInstanceServices=false` and `--set onvif.createConfigurationService=false`, respectively. - ## Modifying a Configuration -More information about how to modify an installed Configuration, add additional protocol Configurations to a cluster, or +Akri has provided further documentation on [modifying the broker PodSpec](./customizing-akri-installation.md#modifying-the-brokerpodspec), [instanceServiceSpec, or configurationServiceSpec](./customizing-akri-installation.md#modifying-instanceservicespec-or-configurationservicespec) +More information about how to modify an installed Configuration, add additional Configurations to a cluster, or delete a Configuration can be found in the [Customizing an Akri Installation document](./customizing-akri-installation.md). ## Implementation details The ONVIF implementation can be understood by looking at several things: -1. [OnvifDiscoveryHandlerConfig](../shared/src/akri/configuration.rs) defines the required properties -1. [The onvif property in akri-configuration-crd.yaml](../deployment/helm/crds/akri-configuration-crd.yaml) validates - the CRD input -1. [OnvifDiscoveryHandler](../agent/src/protocols/onvif/discovery_handler.rs) defines ONVIF camera discovery -1. [samples/brokers/onvif-video-broker](../samples/brokers/onvif-video-broker) defines the ONVIF protocol broker \ No newline at end of file +1. [OnvifDiscoveryDetails](../discovery-handlers/onvif/src/discovery_handler.rs) defines the required properties +1. [OnvifDiscoveryHandler](../discovery-handlers/onvif/src/discovery_handler.rs) defines ONVIF camera discovery +1. [samples/brokers/onvif-video-broker](../samples/brokers/onvif-video-broker) defines the ONVIF broker \ No newline at end of file diff --git a/docs/opcua-configuration.md b/docs/opcua-configuration.md index f5b61de7d..64ad5fe87 100644 --- a/docs/opcua-configuration.md +++ b/docs/opcua-configuration.md @@ -1,22 +1,68 @@ -# OPC UA Sample +# Configuring Akri to Discover Devices via OPC UA +## Background OPC UA (Object Linking and Embedding for Process Control Unified Architecture) is a communication protocol for -industrial automation. Akri has implemented a discovery handler for discovering OPC UA Servers that live at specified endpoints or are registered with specified Local Discovery Servers. Background on the OPC UA protocol implementation can be found in the [proposal](proposals/opcua.md). To try out using Akri to discover and utilize OPC UA servers, see the [OPC UA end-to-end demo](./opcua-demo.md). - -## Usage +industrial automation. Akri has implemented a Discovery Handler for discovering OPC UA Servers that live at specified endpoints or are registered with specified Local Discovery Servers. Background on the OPC UA Discovery Handler implementation can be found in the [proposal](proposals/opcua.md). To try out using Akri to discover and utilize OPC UA servers, see the [OPC UA end-to-end demo](./opcua-demo.md). + +All of Akri's components can be deployed by specifying values in its Helm chart during an installation. This section will cover the values that should be set to (1) deploy the OPC UA Discovery Handlers and (2) apply a Configuration that tells Akri to discover devices using that Discovery Handler. +## Deploying the OPC UA Discovery Handler +In order for the Agent to know how to discover OPC UA servers an OPC UA Discovery Handler must exist. Akri supports an Agent image that includes all supported Discovery Handlers. This Agent will be used if `agent.full=true`. By default, a slim Agent without any embedded Discovery Handlers is deployed and the required Discovery Handlers can be deployed as DaemonSets. This documentation will use that strategy, deploying OPC UA Discovery Handlers by specifying `opcua.discovery.enabled=true` when installing Akri. + +## OPC UA Configuration Settings +Instead of having to assemble your own OPC UA Configuration yaml, we have provided a [Helm +template](../deployment/helm/templates/opcua-configuration.yaml). Helm allows us to parametrize the commonly modified fields in our configuration files, and we have provided many for OPC UA (to see +them, run `helm inspect values akri-helm-charts/akri`). More information about the Akri Helm charts can be found in the [user guide](./user-guide.md#understanding-akri-helm-charts). +To apply the OPC UA Configuration to your cluster, simply set `opcua.configuration.enabled=true` along with any of the following additional Configuration settings when installing Akri. +### Discovery Handler Discovery Details Settings +Discovery Handlers are passed discovery details that are set in a Configuration to determine what to discover, filter out of discovery, and so on. +The OPC UA Discovery Handler, requires a set of DiscoveryURLs to direct its search. Every OPC UA server/application has a DiscoveryEndpoint that Clients can access without establishing a session. The address for this endpoint is defined by a DiscoveryURL. A Local Discovery Server (LDS) is a unique type of OPC UA server -which maintains a list of OPC UA servers that have registered with it. The generic OPC UA Configuration takes in a list of -DiscoveryURLs, whether for LDSes or a specific servers and an optional list of application names to either include or exclude. By default, if no DiscoveryURLs are set, Agent will attempt to reach out to the Local Discovery Server on it's host at the default address [from OPC UA Specification +which maintains a list of OPC UA servers that have registered with it. + +The generic OPC UA Configuration takes in a list of DiscoveryURLs, whether for LDSes or a specific servers and an optional list of application names to either include or exclude. +By default, if no DiscoveryURLs are set, the Discovery Handler will attempt to reach out to the Local Discovery Server on its host at the default address [from OPC UA Specification 12](https://reference.opcfoundation.org/v104/Core/docs/Part6/7.6/) of `opc.tcp://localhost:4840/` and get the list of OPC UA servers registered with it. - -To enable OPC UA discovery via the default LDS DiscoveryURL in your Akri-enabled cluster, you must set -`opcua.enabled=true` when installing the Akri Helm chart. +| Helm Key | Value | Default | Description | +|---|---|---|---| +| opcua.configuration.discoveryDetails.discoveryUrls | array of DiscoveryURLs | ["opc.tcp://localhost:4840/"] | DiscoveryURLs for OPC UA Servers or Local Discovery Servers | +| opcua.configuration.discoveryDetails.applicationNames.action | Include, Exclude | Exclude | filter action to take on a set of OPC UA Applications | +| opcua.configuration.discoveryDetails.applicationNames.items | array of application names | empty | application names that the filter action acts upon | + +### Broker Pod Settings +If you would like workloads ("broker" Pods) to be deployed automatically to discovered devices, a broker image should be specified in the Configuration. Alternatively, if it meets your scenario, you could use the Akri frame server broker ("ghcr.io/deislabs/akri/opcua-video-broker"). If you would rather manually deploy pods to utilize the devices advertized by Akri, don't specify a broker pod and see our documentation on [requesting resources advertized by Akri](./requesting-akri-resources.md). +| Helm Key | Value | Default | Description | +|---|---|---|---| +| opcua.configuration.brokerPod.image.repository | image string | "" | image of broker Pod that should be deployed to discovered devices | +| opcua.configuration.brokerPod.image.tag | tag string | "latest" | image tag of broker Pod that should be deployed to discovered devices | + +### Mounting Credentials Settings +See [Mounting OPC UA credentials to enable security](#mounting-opc-ua-credentials-to-enable-security) for more details on how to use this setting. +| Helm Key | Value | Default | Description | +|---|---|---|---| +| opcua.configuration.mountCertificates| true, false | false | specify whether to mount a secret named `opcua-broker-credentials` into the OPC UA brokers | + +### Disabling Automatic Service Creation +By default, if a broker Pod is specified, the generic OPC UA Configuration will create services for all the brokers of a specific Akri Instance and all the brokers of an Akri Configuration. The creation of these services can be disabled. +| Helm Key | Value | Default | Description | +|---|---|---|---| +| opcua.configuration.createInstanceServices | true, false | true | a service should be automatically created for each broker Pod | +| opcua.configuration.createConfigurationService | true, false | true | a single service should be created for all brokers of a Configuration | + +### Capacity Setting +By default, if a broker Pod is specified, a single broker Pod is deployed to each device. To modify the Configuration so that an OPC UA server is accessed by more or fewer nodes via broker Pods, update the `opcua.configuration.capacity` setting to reflect the correct number. For example, if your high availability needs are met by having 1 redundant +pod, you can update the Configuration like this by setting `opcua.configuration.capacity=2`. +| Helm Key | Value | Default | Description | +|---|---|---|---| +| opcua.configuration.capacity | number | 1 | maximum number of brokers that can be deployed to utilize a device (up to 1 per Node) | + +### Installing Akri with the OPC UA Configuration and Discovery Handler +Leveraging the above settings, Akri can be installed with the OPC UA Discovery Handler and an OPC UA Configuration that specifies discovery via the default LDS DiscoveryURL: ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set opcua.enabled=true + --set opcua.discovery.enabled=true \ + --set opcua.configuration.enabled=true ``` If you have a workload that you would like to automatically be deployed to each discovered server, specify the workload image when installing Akri. As an example, the installation below will deploy an @@ -24,24 +70,19 @@ empty nginx pod for each server. Instead, you should point to your image, say `g ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set opcua.enabled=true \ - --set opcua.brokerPod.image.repository=nginx + --set opcua.discovery.enabled=true \ + --set opcua.configuration.enabled=true \ + --set opcua.configuration.brokerPod.image.repository=nginx ``` +> Note: set `opcua.configuration.brokerPod.image.tag` to specify an image tag (defaults to `latest`). -The generic OPC UA Configuration can be tailored to your cluster by modifying the [Akri Helm chart -values](../deployment/helm/values.yaml) in the following ways: +The following installation examples have been given to show how to the OPC UA Configuration can be tailored to you cluster: * Specifying the DiscoveryURLs for OPC UA Local Discovery Servers * Specifying the DiscoveryURLs for specific OPC UA servers * Specifying the DiscoveryURLs for both Local Discovery Servers and servers * Filtering the servers by application name * Mounting OPC UA credentials to enable security -* Changing the capacity -* Modifying the broker PodSpec (See [Modifying a Akri - Installation](./modifying-a-akri-installation#modifying-the-brokerpodspec)) -* Modifying instanceServiceSpec or configurationServiceSpec (See [Modifying a Akri - Installation](./modifying-a-akri-installation#modifying-instanceservicespec-or-configurationservicespec)) ### Specifying the DiscoveryURLs for OPC UA LocalDiscoveryServers If no DiscoveryURLs are passed as Helm values, the default DiscoveryURL for LocalDiscoveryServers is used. Instead of @@ -50,10 +91,10 @@ Local Discovery Servers, like in the following example: ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set opcua.enabled=true \ - --set opcua.discoveryUrls[0]="opc.tcp://10.1.2.3:4840/" \ - --set opcua.discoveryUrls[1]="opc.tcp://10.1.3.4:4840/" + --set opcua.discovery.enabled=true \ + --set opcua.configuration.enabled=true \ + --set opcua.configuration.discoveryDetails.discoveryUrls[0]="opc.tcp://10.1.2.3:4840/" \ + --set opcua.configuration.discoveryDetails.discoveryUrls[1]="opc.tcp://10.1.3.4:4840/" ``` ### Specifying the DiscoveryURLs for specific OPC UA Servers @@ -61,9 +102,9 @@ If you know the DiscoveryURLs for the OPC UA Servers you want Akri to discover, ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set opcua.enabled=true \ - --set opcua.discoveryUrls[0]="opc.tcp://10.123.456.7:4855/" + --set opcua.discovery.enabled=true \ + --set opcua.configuration.enabled=true \ + --set opcua.configuration.discoveryDetails.discoveryUrls[0]="opc.tcp://10.123.456.7:4855/" ``` ### Specifying the DiscoveryURLs for both LocalDiscoveryServers and Servers @@ -72,11 +113,11 @@ OPC UA discovery can also receive a list of both OPC UA LDS DiscoveryURLs and sp ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set opcua.enabled=true \ - --set opcua.discoveryUrls[0]="opc.tcp://10.1.2.3:4840/" \ - --set opcua.discoveryUrls[1]="opc.tcp://10.1.3.4:4840/" \ - --set opcua.discoveryUrls[2]="opc.tcp://10.123.456.7:4855/" + --set opcua.discovery.enabled=true \ + --set opcua.configuration.enabled=true \ + --set opcua.configuration.discoveryDetails.discoveryUrls[0]="opc.tcp://10.1.2.3:4840/" \ + --set opcua.configuration.discoveryDetails.discoveryUrls[1]="opc.tcp://10.1.3.4:4840/" \ + --set opcua.configuration.discoveryDetails.discoveryUrls[2]="opc.tcp://10.123.456.7:4855/" ``` >**Note**: The Agent's OPC UA discovery method only supports tcp DiscoveryURLs, since the [Rust OPC UA @@ -90,24 +131,24 @@ the server named "Duke", do the following. ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set opcua.enabled=true \ - --set opcua.applicationNames.action=Exclude \ - --set opcua.applicationNames.items[0]="Duke" + --set opcua.discovery.enabled=true \ + --set opcua.configuration.enabled=true \ + --set opcua.configuration.discoveryDetails.applicationNames.action=Exclude \ + --set opcua.configuration.discoveryDetails.applicationNames.items[0]="Duke" ``` Alternatively, to only discover the server named "Go Tar Heels!", do the following: ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set opcua.enabled=true \ - --set opcua.applicationNames.action=Include \ - --set opcua.applicationNames.items[0]="Go Tar Heels!" + --set opcua.discovery.enabled=true \ + --set opcua.configuration.enabled=true \ + --set opcua.configuration.discoveryDetails.applicationNames.action=Include \ + --set opcua.configuration.discoveryDetails.applicationNames.items[0]="Go Tar Heels!" ``` ### Mounting OPC UA credentials to enable security For your broker pod to utilize a discovered OPC UA server, it will need to contain an OPC UA Client. OPC UA Clients and Servers can establish an insecure connection so long as the OPC UA Servers support a Security Policy of None. However, if you would like your broker's OPC UA Client to establish a secure connection with an OPC UA server, the Client and Server must trust each other's x509 v3 certificates. This can be done in one of the three ways explained -in the [OPC UA proposal](./proposals/opcua.md#giving-proper-credentials-to-the-akri-broker). The simplest method is to +in the [OPC UA proposal](./proposals/opcua.configuration.md#giving-proper-credentials-to-the-akri-broker). The simplest method is to sign the OPC UA broker's certificate with the same Certificate Authority (CA) as the Server with which it wishes to connect. The certificates are passed to the broker via a Kubernetes Secret mounted as a volume to the directory `/etc/opcua-certs/client-pki`. @@ -126,42 +167,29 @@ kubectl create secret generic opcua-broker-credentials \ Certificates can be created and signed with a CA manually using openssl, by using the OPC Foundation [certificate generator tool](https://github.com/OPCFoundation/Misc-Tools), or Akri's [certificate generator](../samples/opcua-certificate-generator/README.md). Be sure that the certificates are in the format expected by your OPC UA Client. -Finally, when mounting certificates is enabled with with Helm via `--set opcua.mountCertificates='true'`, the +Finally, when mounting certificates is enabled with Helm via `--set opcua.configuration.mountCertificates='true'`, the secret named `opcua-broker-credentials` will be mounted into the OPC UA brokers. It is mounted to the volume `credentials` at the `mountPath` /etc/opcua-certs/client-pki, as shown in the [OPC UA Helm -template](../deployment/helm/templates/opcua.yaml). This is the path where the broker expects to find the -certificates. The following is an example how how to enable security: +template](../deployment/helm/templates/opcua.configuration.yaml). This is the path where the broker expects to find the +certificates. The following is an example how to enable security: ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set opcua.enabled=true \ - --set opcua.mountCertificates='true' + --set opcua.discovery.enabled=true \ + --set opcua.configuration.enabled=true \ + --set opcua.configuration.mountCertificates='true' ``` >**Note**: If the Helm template for the OPC UA Configuration is too specific, you can [customize the Configuration yaml](./customizing-akri-installation.md#generating-modifying-and-applying-a-custom-configuration) to suit your needs. -### Changing the capacity -By default in the generic OPC UA Configuration, `capacity` is set to 1, so only a single workload can be scheduled to an OPC UA server. To modify the Configuration so that more or fewer Nodes may deploy brokers to an OPC UA Server, update the -`capacity` property to reflect the correct number. For example, if your high availability needs are met by having only -1 redundant pod, you can update the Configuration like this: -```bash -helm repo add akri-helm-charts https://deislabs.github.io/akri/ -helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set opcua.enabled=true \ - --set opcua.capacity=2 -``` - ## Modifying a Configuration -More information about how to modify an installed Configuration, add additional protocol Configurations to a cluster, or +Akri has provided further documentation on [modifying the broker PodSpec](./customizing-akri-installation.md#modifying-the-brokerpodspec), [instanceServiceSpec, or configurationServiceSpec](./customizing-akri-installation.md#modifying-instanceservicespec-or-configurationservicespec) +More information about how to modify an installed Configuration, add additional Configurations to a cluster, or delete a Configuration can be found in the [Customizing an Akri Installation document](./customizing-akri-installation.md). ## Implementation details The OPC UA implementation can be understood by looking at several things: -1. [OpcuaDiscoveryHandlerConfig](../shared/src/akri/configuration.rs) defines the required properties. -1. [The OPC UA property in akri-configuration-crd.yaml](../deployment/helm/crds/akri-configuration-crd.yaml) validates - the CRD input. -1. [OpcuaDiscoveryHandler](../agent/src/protocols/opcua/discovery_handler.rs) defines OPC UA Server discovery. +1. [OpcuaDiscoveryDetails](../discovery-handlers/opcua/src/discovery_handler.rs) defines the required properties. +1. [OpcuaDiscoveryHandler](../discovery-handlers/opcua/src/discovery_handler.rs) defines OPC UA Server discovery. 1. [sample-brokers/opcua-monitoring-broker](../samples/brokers/opcua-monitoring-broker) defines a sample OPC UA protocol broker that monitors an OPC UA Variable with a specific NodeID. \ No newline at end of file diff --git a/docs/opcua-demo.md b/docs/opcua-demo.md index 5b0821e44..c362d24a3 100644 --- a/docs/opcua-demo.md +++ b/docs/opcua-demo.md @@ -3,7 +3,7 @@ OPC UA is a communication protocol for industrial automation. It is a client/ser security and communication framework. This demo will help you get started using Akri to discover OPC UA Servers and utilize them via a broker that contains an OPC UA Client. Specifically, a Akri Configuration called OPC UA Monitoring was created for this scenario, which will show how Akri can be used to detect anomaly values of a specific OPC UA -Variable. To do so, the OPC UA Clients in the brokers will subscribe to that variable and serve it's value over gRPC for +Variable. To do so, the OPC UA Clients in the brokers will subscribe to that variable and serve its value over gRPC for an anomaly detection web application to consume. This Configuration could be used to monitor a barometer, CO detector, and more; however, for this example, that variable will represent the temperature of a thermostat and any value outside the range of 70-80 degrees is an anomaly. @@ -44,9 +44,12 @@ The following steps need to be completed to run the demo: If at any point in the demo, you want to dive deeper into OPC UA or clarify a term, you can reference the [online OPC UA specifications](https://reference.opcfoundation.org/v104/). -## Setting up a single-node cluster -Before running Akri, we need a Kubernetes cluster. If you do not have a readily available cluster, follow the steps -provided in the [end-to-end demo](./end-to-end-demo.md#set-up-cluster) to set up a single-node MicroK8s or K3s cluster. If using MicroK8s, you can skip the step of enabling privileged pods, as the OPC UA monitoring brokers do not need to run in a privileged security context. +## Setting up a cluster +Reference our [cluster setup documentation](./setting-up-cluster.md) to set up a cluster for this demo. For ease of +setup, only create a single-node cluster, so if installing K3s or MicroK8s, you can skip the last step of +the installation instructions of adding additional nodes. If you have an existing cluster, feel free to leverage it for +the demo. This documentation assumes you are using a single-node cluster; however, you can certainly use a multi-node +cluster. ## Creating X.509 v3 Certificates **If security is not desired, this section can be skipped, as each monitoring broker will use an OPC UA Security Policy @@ -98,7 +101,7 @@ kubectl create secret generic opcua-broker-credentials \ When mounting certificates is enabled later in the [Running Akri section](#running-akri) with Helm via `--set opcua.mountCertificates='true'`, the secret named `opcua-broker-credentials` will be mounted into the OPC UA monitoring brokers. It is mounted to the volume `credentials` at the `mountPath` /etc/opcua-certs/client-pki, as shown in the [OPC -UA Configuration Helm template](../deployment/helm/templates/opcua.yaml). This is the path where the brokers expect to +UA Configuration Helm template](../deployment/helm/templates/opcua-configuration.yaml). This is the path where the brokers expect to find the certificates. ## Creating OPC UA Servers @@ -109,14 +112,15 @@ to the OPC Foundation's .NET Console Reference Server. 1. Open the UA Reference solution file and navigate to NetCoreReferenceServer project. -1. Open `Quickstarts.Reference.Config.xml`. This application configuration file is where many features can configured, - such as the application description (application name, uri, etc), security configuration, and base address. Only the - latter needs to be modified if using no security. On lines 76 and 77, modify the address of the server, by replacing - `localhost` with the IP address of the machine the server is running on. If left as `localhost` the application - will automatically replace it with the hostname of the machine which will be unreachable to the broker pod. On the - same lines, modify the ports if they are already taken. Akri will preference using the tcp endpoint, since according - to the [OPC UA Security Specification](https://reference.opcfoundation.org/v104/Core/docs/Part2/4.10/), secure - channels over HTTPS do not provide application authentication. +1. Open `Quickstarts.Reference.Config.xml`. This application configuration file is where many features can be + configured, such as the application description (application name, uri, etc), security configuration, and base + address. Only the latter needs to be modified if using no security. On lines 76 and 77, modify the address of the + server, by replacing `localhost` with the IP address of the machine the server is running on. If left as `localhost` + the application will automatically replace it with the hostname of the machine which will be unreachable to the + broker pod. On the same lines, modify the ports if they are already taken. Akri will preference using the tcp + endpoint, since according to the [OPC UA Security + Specification](https://reference.opcfoundation.org/v104/Core/docs/Part2/4.10/), secure channels over HTTPS do not + provide application authentication. 1. (Optional) If using security, and you have already created certificates in the previous section, now you can modify the security configuration inside `Quickstarts.Reference.Config.xml` to point to those certificates. After using the @@ -142,12 +146,13 @@ to the OPC Foundation's .NET Console Reference Server. its variables) 2. We care about the `NamespaceIndex` because it along with `Identifier`, are the two fields to a `NodeId`. If you inspect the `CreateDynamicVariable` function, you will see that it creates an OPC UA variable, using the `path` parameter ("Thermometer_Temperature") as the `Identifier` when creating the NodeID for that - variable. It then add the variable to the `m_dynamicNodes` list. At the bottom of `CreateAddressSpace` the following - line initializes a simulation that will periodically change the value of all the variables in `m_dynamicNodes`: + variable. It then adds the variable to the `m_dynamicNodes` list. At the bottom of `CreateAddressSpace` the + following line initializes a simulation that will periodically change the value of all the variables in + `m_dynamicNodes`: ``` c# m_simulationTimer = new Timer(DoSimulation, null, 1000, 1000); ``` - Lets change the simulation so that it usually returns a value between 70-80 and periodically returns an outlier of + Let's change the simulation so that it usually returns a value between 70-80 and periodically returns an outlier of 120. Go to the `DoSimulation` function. Replace `variable.Value = GetNewValue(variable);` with the following ```c# Random rnd = new Random(); @@ -166,33 +171,33 @@ to the OPC Foundation's .NET Console Reference Server. ## Running Akri 1. Make sure your OPC UA Servers are running -1. Now it is time to install the Akri using Helm. We can specify that when installing Akri, we also want to create an - OPC UA Configuration by setting the helm value `--set opcua.enabled=true`. In the Configuration as environment - variables in the broker PodSpec, we will specify the `Identifier` and `NamespaceIndex` of the NodeID we want the - brokers to monitor. These values are mounted as environment variables in the brokers. In our case that is our - temperature variable we made earlier, which has an `Identifier` of `Thermometer_Temperature` and `NamespaceIndex` of - `2`. Finally, since we did not set up a Local Discovery Server -- see [Setting up and using a Local Discovery +1. Now it is time to install the Akri using Helm. When installing Akri, we can specify that we want to deploy the OPC UA + Discovery Handlers by setting the helm value `opcua.discovery.enabled=true`. We also specify that we want to create + an OPC UA Configuration with `--set opcua.configuration.enabled=true`. In the Configuration, any values that should + be set as environment variables in brokers can be set in `opcua.configuration.brokerProperties`. In this scenario, we + will specify the `Identifier` and `NamespaceIndex` of the NodeID we want the brokers to monitor. In our case that is + our temperature variable we made earlier, which has an `Identifier` of `Thermometer_Temperature` and `NamespaceIndex` + of `2`. Finally, since we did not set up a Local Discovery Server -- see [Setting up and using a Local Discovery Server](#setting-up-and-using-a-local-discovery-server-(windows-only)) in the Extensions section at the bottom of this document to use a LDS -- we must specify the DiscoveryURLs of the OPC UA Servers we want Agent to discover. Those are the tcp addresses that we modified in step 3 of [Creating OPC UA Servers](#creating-opc-ua-servers). Be sure to set the appropriate IP address and port number for the DiscoveryURLs in the Helm command below. If using - security, uncomment `--set opcua.mountCertificates='true'`. + security, uncomment `--set opcua.configuration.mountCertificates='true'`. ```sh helm repo add akri-helm-charts https://deislabs.github.io/akri/ helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set opcua.enabled=true \ - --set opcua.name=akri-opcua-monitoring \ - --set opcua.brokerPod.image.repository="ghcr.io/deislabs/akri/opcua-monitoring-broker:latest-dev" \ - --set opcua.brokerPod.env.IDENTIFIER='Thermometer_Temperature' \ - --set opcua.brokerPod.env.NAMESPACE_INDEX='2' \ - --set opcua.discoveryUrls[0]="opc.tcp://:/Quickstarts/ReferenceServer/" \ - --set opcua.discoveryUrls[1]="opc.tcp://:/Quickstarts/ReferenceServer/" \ - # --set opcua.mountCertificates='true' + --set opcua.discovery.enabled=true \ + --set opcua.configuration.enabled=true \ + --set opcua.configuration.name=akri-opcua-monitoring \ + --set opcua.configuration.brokerPod.image.repository="ghcr.io/deislabs/akri/opcua-monitoring-broker" \ + --set opcua.configuration.brokerProperties.IDENTIFIER='Thermometer_Temperature' \ + --set opcua.configuration.brokerProperties.NAMESPACE_INDEX='2' \ + --set opcua.configuration.discoveryDetails.discoveryUrls[0]="opc.tcp://:/Quickstarts/ReferenceServer/" \ + --set opcua.configuration.discoveryDetails.discoveryUrls[1]="opc.tcp://:/Quickstarts/ReferenceServer/" \ + # --set opcua.configuration.mountCertificates='true' ``` Akri Agent will discover the two Servers and create an Instance for each Server. Watch two broker pods spin up, one - for each Server. - For MicroK8s + for each Server. For MicroK8s ```sh watch microk8s kubectl get pods -o wide ``` @@ -211,7 +216,7 @@ A sample anomaly detection web application was created for this end-to-end demo. brokers' gRPC clients, getting the latest temperature value. It then determines whether this value is an outlier to the dataset using the Local Outlier Factor strategy. The dataset is simply a csv with the numbers between 70-80 repeated several times; therefore, any value significantly outside this range will be seen as an outlier. The web application -serves as a log, displaying all the temperature values and the address of the OPC UA Server that sent the value. It +serves as a log, displaying all the temperature values and the address of the OPC UA Server that sent the values. It shows anomaly values in red. The anomalies always have a value of 120 due to how we set up the `DoSimulation` function in the OPC UA Servers. 1. Deploy the anomaly detection app and watch a pod spin up for the app. @@ -242,8 +247,8 @@ in the OPC UA Servers. ``` 1. Navigate in your browser to http://ip-address:32624/ where ip-address is the IP address of your Ubuntu VM (not the cluster-IP) and the port number is from the output of `kubectl get services`. It takes 3 seconds for the site to - load, after which, you should a log of the temperature values, which updates every few seconds. Note how the values - are coming from two different DiscoveryURLs, namely the ones for each of the two OPC UA Servers. + load, after which, you should see a log of the temperature values, which updates every few seconds. Note how the + values are coming from two different DiscoveryURLs, namely the ones for each of the two OPC UA Servers. ## Clean up 1. Delete the anomaly detection application deployment and service. @@ -271,7 +276,7 @@ in the OPC UA Servers. ``` ## Extensions -Now that you have the end to end demo running lets talk about some ways you can go beyond the demo to better understand +Now that you have the end to end demo running let's talk about some ways you can go beyond the demo to better understand the advantages of Akri. This section will cover: 1. Adding a node to the cluster 1. Using a Local Discovery Server to discover the Servers instead of passing the DiscoveryURLs to the OPC UA Monitoring @@ -281,8 +286,12 @@ the advantages of Akri. This section will cover: 1. Creating a new OPC UA Configuration ### Adding a Node to the cluster -To see how Akri easily scales as nodes are added to the cluster, add another node to your (K3s, MicroK8s, or vanilla Kubernetes) cluster. -1. If you are using MicroK8s, create another MicroK8s instance, following the same steps as in [Setting up a single-node cluster](#setting-up-a-single-node-cluster) above. Then, in your first VM that is currently running Akri, get the join command by running `microk8s add-node`. In your new VM, run one of the join commands outputted in the previous step. +To see how Akri easily scales as nodes are added to the cluster, add another node to your (K3s, MicroK8s, or vanilla +Kubernetes) cluster. +1. If you are using MicroK8s, create another MicroK8s instance, following the same steps as in [Setting up a single-node + cluster](#setting-up-a-single-node-cluster) above. Then, in your first VM that is currently running Akri, get the + join command by running `microk8s add-node`. In your new VM, run one of the join commands outputted in the previous + step. 1. Confirm that you have successfully added a node to the cluster by running the following in your control plane VM: ```sh kubectl get no @@ -295,19 +304,20 @@ To see how Akri easily scales as nodes are added to the cluster, add another nod ``` 1. Let's play around with the capacity value and use the `helm upgrade` command to modify our OPC UA Monitoring Configuration such that the capacity is 2. On the control plane node, run the following, once again uncommenting - `--set opcua.mountCertificates='true'` if using security. Watch as the broker terminates and then four come online in - a Running state. + `--set opcua.configuration.mountCertificates='true'` if using security. Watch as the broker terminates and then four + come online in a Running state. ```sh helm upgrade akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set opcua.enabled=true \ - --set opcua.brokerPod.image.repository="ghcr.io/deislabs/akri/opcua-monitoring-broker:latest-dev" \ - --set opcua.brokerPod.env.IDENTIFIER='Thermometer_Temperature' \ - --set opcua.brokerPod.env.NAMESPACE_INDEX='2' \ - --set opcua.discoveryUrls[0]="opc.tcp://:/Quickstarts/ReferenceServer/" \ - --set opcua.discoveryUrls[1]="opc.tcp://:/Quickstarts/ReferenceServer/" \ - --set opcua.capacity=2 \ - # --set opcua.mountCertificates='true' + --set opcua.discovery.enabled=true \ + --set opcua.configuration.enabled=true \ + --set opcua.configuration.name=akri-opcua-monitoring \ + --set opcua.configuration.brokerPod.image.repository="ghcr.io/deislabs/akri/opcua-monitoring-broker" \ + --set opcua.configuration.brokerProperties.IDENTIFIER='Thermometer_Temperature' \ + --set opcua.configuration.brokerProperties.NAMESPACE_INDEX='2' \ + --set opcua.configuration.discoveryDetails.discoveryUrls[0]="opc.tcp://:/Quickstarts/ReferenceServer/" \ + --set opcua.configuration.discoveryDetails.discoveryUrls[1]="opc.tcp://:/Quickstarts/ReferenceServer/" \ + --set opcua.capacity=2 \ + # --set opcua.configuration.mountCertificates='true' ``` For MicroK8s ```sh @@ -317,12 +327,13 @@ To see how Akri easily scales as nodes are added to the cluster, add another nod ```sh watch kubectl get pods,akrii -o wide ``` -1. Once you are done using Akri, you can remove your worker node from the cluster. For MicroK8s this is done by running on the worker node: +1. Once you are done using Akri, you can remove your worker node from the cluster. For MicroK8s this is done by running + on the worker node: ```sh microk8s leave ``` - Then, to complete the node removal, on the host run the following, inserting the name of the worker node (you can look it - up with `microk8s kubectl get no`): + Then, to complete the node removal, on the host run the following, inserting the name of the worker node (you can + look it up with `microk8s kubectl get no`): ```sh microk8s remove-node ``` @@ -348,22 +359,21 @@ address [from OPC UA Specification 12](https://reference.opcfoundation.org/v104/ `opc.tcp://localhost:4840/`. This is seen on line 205 of `Quickstarts.ReferenceServer.xml`. Make sure you have restarted your OPC UA Servers, since they attempt to register with their LDS on start up. Now, we can -install Akri with the OPC UA Configuration, passing in the LDS DiscoveryURL instead of both server's DiscoveryURLs. +install Akri with the OPC UA Configuration, passing in the LDS DiscoveryURL instead of both servers' DiscoveryURLs. Replace "Windows host IP address" with the IP address of the Windows machine you installed the LDS on (and is hosting the servers). Be sure to uncomment mounting certificates if you are enabling security: ```sh helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set opcua.enabled=true \ - --set opcua.name=akri-opcua-monitoring \ - --set opcua.brokerPod.image.repository="ghcr.io/deislabs/akri/opcua-monitoring-broker:latest-dev" \ - --set opcua.brokerPod.env.IDENTIFIER='Thermometer_Temperature' \ - --set opcua.brokerPod.env.NAMESPACE_INDEX='2' \ - --set opcua.discoveryUrls[0]="opc.tcp://:4840/" \ - # --set opcua.mountCertificates='true' + --set opcua.discovery.enabled=true \ + --set opcua.configuration.enabled=true \ + --set opcua.configuration.name=akri-opcua-monitoring \ + --set opcua.configuration.brokerPod.image.repository="ghcr.io/deislabs/akri/opcua-monitoring-broker" \ + --set opcua.configuration.brokerProperties.IDENTIFIER='Thermometer_Temperature' \ + --set opcua.configuration.brokerProperties.NAMESPACE_INDEX='2' \ + --set opcua.configuration.discoveryDetails.discoveryUrls[0]="opc.tcp://:4840/" \ + # --set opcua.configuration.mountCertificates='true' ``` -You can watch as an Instance is created for each Server and two broker pods are spun up. -For MicroK8s +You can watch as an Instance is created for each Server and two broker pods are spun up. For MicroK8s ```sh watch microk8s kubectl get pods,akrii -o wide ``` @@ -379,29 +389,29 @@ specified by UA Specification 12). For example, to discover all servers register server named "SomeServer0", do the following. ```bash helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set opcua.enabled=true \ - --set opcua.name=akri-opcua-monitoring \ - --set opcua.brokerPod.image.repository="ghcr.io/deislabs/akri/opcua-monitoring-broker:latest-dev" \ - --set opcua.brokerPod.env.IDENTIFIER='Thermometer_Temperature' \ - --set opcua.brokerPod.env.NAMESPACE_INDEX='2' \ - --set opcua.discoveryUrls[0]="opc.tcp://:4840/" \ - --set opcua.applicationNames.action=Exclude \ - --set opcua.applicationNames.items[0]="SomeServer0" \ - # --set opcua.mountCertificates='true' + --set opcua.discovery.enabled=true \ + --set opcua.configuration.enabled=true \ + --set opcua.configuration.name=akri-opcua-monitoring \ + --set opcua.configuration.brokerPod.image.repository="ghcr.io/deislabs/akri/opcua-monitoring-broker" \ + --set opcua.configuration.brokerProperties.IDENTIFIER='Thermometer_Temperature' \ + --set opcua.configuration.brokerProperties.NAMESPACE_INDEX='2' \ + --set opcua.configuration.discoveryDetails.discoveryUrls[0]="opc.tcp://:4840/" \ + --set opcua.configuration.discoveryDetails.applicationNames.action=Exclude \ + --set opcua.configuration.discoveryDetails.applicationNames.items[0]="SomeServer0" \ + # --set opcua.configuration.mountCertificates='true' ``` Alternatively, to only discover the server named "SomeServer0", do the following: ```bash helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set opcua.enabled=true \ - --set opcua.name=akri-opcua-monitoring \ - --set opcua.brokerPod.image.repository="ghcr.io/deislabs/akri/opcua-monitoring-broker:latest-dev" \ - --set opcua.brokerPod.env.IDENTIFIER='Thermometer_Temperature' \ - --set opcua.brokerPod.env.NAMESPACE_INDEX='2' \ - --set opcua.discoveryUrls[0]="opc.tcp://:4840/" \ - --set opcua.applicationNames.action=Include \ - --set opcua.applicationNames.items[0]="SomeServer0" \ + --set opcua.discovery.enabled=true \ + --set opcua.configuration.enabled=true \ + --set opcua.configuration.name=akri-opcua-monitoring \ + --set opcua.configuration.brokerPod.image.repository="ghcr.io/deislabs/akri/opcua-monitoring-broker" \ + --set opcua.configuration.brokerProperties.IDENTIFIER='Thermometer_Temperature' \ + --set opcua.configuration.brokerProperties.NAMESPACE_INDEX='2' \ + --set opcua.configuration.discoveryDetails.discoveryUrls[0]="opc.tcp://:4840/" \ + --set opcua.configuration.discoveryDetails.applicationNames.action=Include \ + --set opcua.configuration.discoveryDetails.applicationNames.items[0]="SomeServer0" \ # --set opcua.mountCertificates='true' ``` ### Creating a different broker and end application @@ -410,30 +420,32 @@ Variable for anomalies. The workload or broker you want to deploy to discovered Servers' address spaces are widely varied, so the options for broker implementations are endless. Passing the NodeID `Identifier` and `NamespaceIndex` as environment variables may still suit your needs; however, if targeting one NodeID is too limiting or irrelevant, instead of passing a specific NodeID to your broker Pods, you could specify any other -environment variables via `--set opcua.brokerPod.env.KEY='VALUE'`. Or, your broker may not need additional information -passed to it at all. Decide whether to pass environment variables, what servers to discover, and set the broker pod -image to be your container image, say `ghcr.io//opcua-broker`. +environment variables via `--set opcua.configuration.brokerProperties.KEY='VALUE'`. Or, your broker may not need +additional information passed to it at all. Decide whether to pass environment variables, what servers to discover, and +set the broker pod image to be your container image, say `ghcr.io//opcua-broker`. ```sh helm repo add akri-helm-charts https://deislabs.github.io/akri/ helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set opcua.enabled=true \ - --set opcua.discoveryUrls[0]="opc.tcp://:/" \ - --set opcua.discoveryUrls[1]="opc.tcp://:/" \ - --set opcua.brokerPod.image.repository='ghcr.io//opcua-broker' - # --set opcua.mountCertificates='true' + --set opcua.discovery.enabled=true \ + --set opcua.configuration.enabled=true \ + --set opcua.configuration.discoveryDetails.discoveryUrls[0]="opc.tcp://:/" \ + --set opcua.configuration.discoveryDetails.discoveryUrls[1]="opc.tcp://:/" \ + --set opcua.configuration.brokerPod.image.repository='ghcr.io//opcua-broker' + # --set opcua.configuration.mountCertificates='true' ``` +> Note: set `opcua.configuration.brokerPod.image.tag` to specify an image tag (defaults to `latest`). + Now, your broker will be deployed to all discovered OPC UA servers. Next, you can create a Kubernetes deployment for your own end application like [anomaly-detection-app.yaml](../deployment/samples/akri-anomaly-detection-app.yaml) and apply it to your Kubernetes cluster. ### Creating a new OPC UA Configuration Helm allows us to parametrize the commonly modified fields in our Configuration files, and we have provided many. Run -`helm inspect values akri-helm-charts/akri` to see what values of the generic OPC UA Configuration can be customized, -such as the Configuration and Instance `ServiceSpec`s, `capacity`, and broker `PodSpec`. We saw in the previous section -how broker Pod environment variables can be specified via `--set opcua.brokerPod.env.KEY='VALUE'`. For more advanced -configuration changes that are not aided by the generic OPC UA Configuration Helm chart, such as credentials naming, we -suggest downloading the OPC UA Configuration file using Helm and then manually modifying it. See the documentation on -[customizing an Akri +`helm inspect values akri-helm-charts/akri` to see what values of the generic OPC UA Configuration can be +customized, such as the Configuration and Instance `ServiceSpec`s, `capacity`, and broker `PodSpec`. We saw in the +previous section how broker Pod environment variables can be specified via `--set +opcua.configuration.brokerProperties.KEY='VALUE'`. For more advanced configuration changes that are not aided by the +generic OPC UA Configuration Helm chart, such as credentials naming, we suggest downloading the OPC UA Configuration +file using Helm and then manually modifying it. See the documentation on [customizing an Akri installation](./customizing-akri-installation.md#generating-modifying-and-applying-a-custom-configuration) for more details. \ No newline at end of file diff --git a/docs/prometheus.md b/docs/prometheus.md new file mode 100644 index 000000000..e08830b31 --- /dev/null +++ b/docs/prometheus.md @@ -0,0 +1,174 @@ +# Prometheus Monitoring with Akri +To enable a deeper understanding of the state of an Akri deployment and Node resource usage by Akri containers, Akri +exposes metrics with Prometheus. This document will cover: +* Installing Prometheus +* Enabling Prometheus with Akri +* Visualizing metrics with Grafana +* Akri's currently exposed metrics +* Exposing metrics from an Akri Broker Pod + +## Installing Prometheus +In order to expose Akri's metrics, Prometheus must be deployed to your cluster. If you already have Prometheus running +on your cluster, you can skip this step. + +Prometheus is comprised of many components. Instead of manually deploying all the components, the entire kube-prometheus +stack can be deployed via its [Helm +chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack). It includes the +Prometheus operator, node exporter, built in Grafana support, and more. +1. Get the kube-prometheus stack Helm repo. + ```sh + helm repo add prometheus-community https://prometheus-community.github.io/helm-charts + helm repo add stable https://charts.helm.sh/stable + helm repo update + ``` +1. Install the chart, specifying what namespace you want Prometheus to run in. It does not have to be the same namespace + in which you are running Akri. For example, it may be in a namespace called `monitoring` as in the command below. [By + default](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack#prometheusioscrape), + Prometheus only discovers PodMonitors within its namespace. This should be disabled by setting + `podMonitorSelectorNilUsesHelmValues` to `false` so that Akri's custom PodMonitors can be discovered. Additionally, + the Grafana service can be exposed to the host by making it a NodePort service. It may take a minute or so to deploy + all the components. + ```sh + helm install prometheus prometheus-community/kube-prometheus-stack \ + --set grafana.service.type=NodePort \ + --set prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues=false \ + --namespace monitoring + ``` + > **Note**: The Prometheus dashboard can also be exposed to the host by adding `--set + > prometheus.service.type=NodePort`. **Note**: If intending to [expose metrics from a Broker + > Pod](#exposing-metrics-from-an-akri-broker-pod) via a ServiceMonitor also set + > `serviceMonitorSelectorNilUsesHelmValues` to `false`. + +## Enabling Prometheus in Akri +The Akri Controller and Agent publish metrics to port 8080 at a `/metrics` endpoint. However, these cannot be accessed +by Prometheus without creating PodMonitors, which are custom resources that tell Prometheus which Pods to monitor. These +components can all be automatically created and deployed via Helm by setting `--set prometheus.enabled=true` when +installing Akri. + +Install Akri and expose the Controller and Agent's metrics to Prometheus by running: +```sh +helm repo add akri-helm-charts https://deislabs.github.io/akri/ +helm install akri akri-helm-charts/akri \ + --set prometheus.enabled=true +``` +> **Note**: This documentation assumes you are using vanilla Kubernetes. Be sure to reference the [user +> guide](./user-guide.md) to determine whether the distribution you are using requires crictl path configuration. + +## Visualizing metrics with Grafana +Now that Akri's metrics are being exposed to Prometheus, they can be visualized in Grafana. +1. Determine the port that the Grafana Service is running on, specifying the namespace if necessary, and save it for the + next step. + ```sh + kubectl get service/prometheus-grafana --namespace=monitoring --output=jsonpath='{.spec.ports[?(@.name=="service")].nodePort}' && echo + ``` +1. SSH port forwarding can be used to access Grafana. Open a new terminal, and enter your ssh command to access the machine + running Akri and Prometheus followed by the port forwarding request. The following command will use port 50000 on the + host. Feel free to change it if it is not available. Be sure to replace `` with the port number + outputted in the previous step. + ```sh + ssh someuser@ -L 50000:localhost: + ``` +1. Navigate to `http://localhost:50000/` and enter Grafana's default username `admin` and password `prom-operator`. + Once logged in, the username and password can be changed in account settings. Now, + you can create a Dashboard to display the Akri metrics. + +## Akri's currently exposed metrics +Akri uses the [Rust Prometheus client library](https://github.com/tikv/rust-prometheus) to expose metrics. It exposes +all the [default process metrics](https://prometheus.io/docs/instrumenting/writing_clientlibs/#process-metrics), such as +Agent or Controller total CPU time usage (`process_cpu_seconds_total`) and RAM usage (`process_resident_memory_bytes`), +along with the following custom metrics, all of which are prefixed with `akri`. +| Metric Name | Metric Type | Metric Source | Buckets | +|---|---|---|---| +| akri_instance_count | IntGaugeVec | Agent | Configuration, shared | +| akri_broker_pod_count | IntGaugeVec | Controller | Configuration, Node | + +## Exposing metrics from an Akri Broker Pod +Metrics can also be published by Broker Pods and exposed to Prometheus. This workflow is not unique to Akri and is +equivalent to exposing metrics from any deployment to Prometheus. Using the [appropriate Prometheus client +library](https://prometheus.io/docs/instrumenting/clientlibs/) for your broker, expose some metrics. Then, deploy a +Service to expose the metrics, specifying the name of the associated Akri Configuration as a selector +(`akri.sh/configuration: `), since the Configuration name is added as a label to all the Broker Pods +by the Akri Controller. Finally, deploy a ServiceMonitor that selects for the previously mentioned service. This tells +Prometheus which service(s) to discover. + +### Example: Exposing metrics from the udev video sample Broker +As an example, an `akri_frame_count` metric has been created in the sample +[udev-video-broker](../samples/brokers/udev-video-broker). Like the Agent and Controller, it publishes both the default +process metrics and the custom `akri_frame_count` metric to port 8080 at a `/metrics` endpoint. + +1. Akri can be installed with the udev Configuration, filtering for only usb video cameras and specifying a + Configuration name of `akri-udev-video`, by running: + ```sh + helm repo add akri-helm-charts https://deislabs.github.io/akri/ + helm install akri akri-helm-charts/akri \ + --set udev.enabled=true \ + --set udev.name=akri-udev-video \ + --set udev.udevRules[0]='KERNEL=="video[0-9]*"' \ + --set udev.brokerPod.image.repository="ghcr.io/deislabs/akri/udev-video-broker" + ``` + > **Note**: This instruction assumes you are using vanilla Kubernetes. Be sure to reference the [user + > guide](./user-guide.md) to determine whether the distribution you are using requires crictl path configuration. + + + > **Note**: Also, expose the Agent and Controller's Prometheus metrics by adding `--set prometheus.enabled=true`. + + + > **Note**: If Prometheus is running in a different namespace as Akri and was not enabled to discover + > ServiceMonitors in other namespaces when installed, upgrade your Prometheus Helm installation to set + > `prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues` to `false`. + > ```sh + > helm upgrade prometheus prometheus-community/kube-prometheus-stack \ + > --set grafana.service.type=NodePort \ + > --set prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues=false \ + > --set prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues=false \ + > --namespace monitoring + > ``` +1. Then, create a Service for exposing these metrics, targeting all Pods labeled with the Configuration name + `akri-udev-video`. + ```yaml + apiVersion: v1 + kind: Service + metadata: + name: akri-udev-video-broker-metrics + labels: + app: akri-udev-video-broker-metrics + spec: + selector: + akri.sh/configuration: akri-udev-video + ports: + - name: metrics + port: 8080 + type: ClusterIP + ``` + > **Note**: The metrics also could have been exposed by adding the metrics port to the Configuration level service + > in the udev Configuration. +1. Apply the Service to your cluster. + ```sh + kubectl apply -f akri-udev-video-broker-metrics-service.yaml + ``` +1. Create the associated ServiceMonitor. Note how the selector matches the app name of the Service. + ```yaml + apiVersion: monitoring.coreos.com/v1 + kind: ServiceMonitor + metadata: + name: akri-udev-video-broker-metrics + labels: + release: prometheus + spec: + selector: + matchLabels: + app: akri-udev-video-broker-metrics + endpoints: + - port: metrics + ``` +1. Apply the ServiceMonitor to your cluster. + ```sh + kubectl apply -f akri-udev-video-broker-metrics-service-monitor.yaml + ``` +1. The frame count metric reports the number of video frames that have been requested by some application. It will + remain at zero unless an application is deployed that utilizes the video Brokers. Deploy the Akri sample streaming + application by running the following: + ```sh + kubectl apply -f https://raw.githubusercontent.com/deislabs/akri/main/deployment/samples/akri-video-streaming-app.yaml + watch kubectl get pods + ``` \ No newline at end of file diff --git a/docs/proposals/broker-deployment-strategies.md b/docs/proposals/broker-deployment-strategies.md index ed3cb9358..39efe25a8 100644 --- a/docs/proposals/broker-deployment-strategies.md +++ b/docs/proposals/broker-deployment-strategies.md @@ -32,17 +32,17 @@ kind: Configuration metadata: name: akri-onvif-video spec: - protocol: - onvif: - ipAddresses: - action: Exclude - items: [] + discoveryHandler: + name: onvif + discoveryDetails: |+ + ipAddresses: + action: Exclude + items: [] # ... brokerPodSpec: containers: - name: akri-onvif-video-broker image: "ghcr.io/..." - imagePullPolicy: Always resources: limits: "{{PLACEHOLDER}}" : "1" @@ -63,7 +63,7 @@ several possibilities for supporting **dynamic instance pooling**: the broker pod by the Controller. However, injecting environment variables causes pods to restart, so restarts must be expected. 1. The broker could query etcd to get the Instances, routinely checking which Instances it's node has been selected to - connect to and getting connection information from those Instances' `metadata` section. However, this would require + connect to and getting connection information from those Instances' `brokerProperties` section. However, this would require extra logic to be added to user's brokers. 1. A side car container could be deployed which does the work of looking at Instances. That container could serve the connection information to the main broker container. This would reduce the amount of changes a user would have to diff --git a/docs/requesting-akri-resources.md b/docs/requesting-akri-resources.md index 19a2de101..6a5fe9a9c 100644 --- a/docs/requesting-akri-resources.md +++ b/docs/requesting-akri-resources.md @@ -4,13 +4,13 @@ The latter functionality is optional. You can use Akri solely to discover and ad image from a Configuration. Then, you can schedule your own pods, requesting the discovered Akri Instances (which represent each tiny device) as resource limits. -Lets walk through how this works for some protocol named `protocolA`. Install Akri with the `protocolA` Configuration, -omitting a broker pod image. Note, `protocolA` must be a supported Akri discovery protocol -- currently udev or ONVIF. +Lets walk through how this works, using the ONVIF Discovery Handler as an example. Install Akri with the ONVIF Discovery Handler and Configuration, +omitting a broker pod image. ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set protocolA.enabled=true + --set onvif.discovery.enabled=true \ + --set onvif.configuration.enabled=true ``` After installing Akri and your Configuration, list all discovered instances by running `kubectl get akrii`. Note `akrii` is a short name for Akri Instance. All the instances will be named in the format `-`, where `id` @@ -22,46 +22,46 @@ varies whether or not the device is sharable or visible by multiple nodes. Instance with the same name for the same device. For example, since IP cameras are sharable, the `id` for an IP camera would be `hash(uri)`. -You can change the name of the Configuration and resultant Instances to be `protocolA-device` by adding `--set protocolA.name=protocolA-device` to your installation command. Now, you can schedule pods that request these Instances as resources. Assuming the Configuration name has been set to `protocolA-device`, you can request the `protocolA-device-` Instance as a resource by adding the following to the PodSpec of your Deployment or Job: +You can change the name of the Configuration and resultant Instances to be `onvif-camera` by adding `--set onvif.configuration.name=onvif-camera` to your installation command. Now, you can schedule pods that request these Instances as resources. Assuming the Configuration name has been set to `onvif-camera`, you can request the `onvif-camera-` Instance as a resource by adding the following to the PodSpec of your Deployment or Job: ```yaml resources: limits: - akri.sh/protocolA-device-: "1" + akri.sh/onvif-camera-: "1" requests: - akri.sh/protocolA-device-: "1" + akri.sh/onvif-camera-: "1" ``` -As an example, a Deployment that would deploy an nginx broker to one of the devices discovered by `protocolA` may look +As an example, a Deployment that would deploy an nginx broker to one of the devices discovered by the ONVIF Discovery Handler may look like this: ```yaml apiVersion: apps/v1 kind: Deployment metadata: - name: protocolA-broker-deployment + name: onvif-camera-broker-deployment labels: - app: protocolA-broker + app: onvif-camera-broker spec: replicas: 1 selector: matchLabels: - app: protocolA-broker + app: onvif-camera-broker template: metadata: labels: - app: protocolA-broker + app: onvif-camera-broker spec: containers: - - name: protocolA-broker + - name: onvif-camera-broker image: nginx resources: limits: - akri.sh/protocolA-device-: "1" + akri.sh/onvif-camera-: "1" requests: - akri.sh/protocolA-device-: "1" + akri.sh/onvif-camera-: "1" ``` Apply your Deployment to the cluster and watch the broker start to run. If you inspect the Instance of the resource you requested in your deployment, you will see one of the slots has now been reserved by the node that is currently running the broker. ```sh -kubectl apply -f deployment-requesting-protocolA-device.yaml -kubectl get akrii protocolA-device- -o yaml +kubectl apply -f deployment-requesting-onvif-camera.yaml +kubectl get akrii onvif-camera- -o yaml ``` \ No newline at end of file diff --git a/docs/roadmap.md b/docs/roadmap.md index 5f4beee01..f8b07a7e2 100644 --- a/docs/roadmap.md +++ b/docs/roadmap.md @@ -1,17 +1,17 @@ # Roadmap -## Implement additional protocol handlers +## Implement additional Discovery Handlers There are endless sensors, controllers, and MCU class devices on the edge and each type of device has a different discovery protocol. Akri is an interface for helping expose those devices as resources to your Kubernetes cluster on the edge. Before it can add a device as a cluster resource, Akri must first discover the device using the appropriate -protocol. Akri currently supports several protocols and was built in a modular way so as to continually support more. +Discovery Handler. Akri currently supports several Discovery Handlers and was built in a modular way so as to continually support more. The question is, which protocols should Akri prioritize? We are looking for community feedback to make this decision. If there is a protocol that you would like implemented, check our [Issues](https://github.com/deislabs/akri/issues) to see if that protocol has been requested, and thumbs up it so we know you, too, would like it implemented. If there is no existing request for your protocol, create a [new feature request](https://github.com/deislabs/akri/issues/new/choose). -Rather than waiting for it to be prioritized, you could implement discovery via that protocol in Agent. See [the -extensibility document](./extensibility.md) for more details. +Rather than waiting for it to be prioritized, you could implement a Discovery Handler for that protocol. See [the +Discovery Handler development document](./discovery-handler-development.md) for more details. -### Currently supported protocols +### Currently supported Discovery Handlers 1. ONVIF (to discover IP cameras) 1. udev (to discover anything in the Linux device file system) 1. OPC UA (to discover OPC UA Servers) @@ -30,7 +30,4 @@ Currently, for every leaf device that is discovered by a node's Akri Agent, a si how many nodes get the broker is limited by capacity. This is a fairly specific implementation that does not support all users' scenarios. The [New Broker Deployment Strategies proposal](./proposals/broker-deployment-strategies.md) discusses some ways the Akri Controller and Agent could be extended to allow for other broker deployment strategies. -### Simple and scalable protocol extension -All protocol discovery is currently implemented in each Akri Agent. The drawbacks to this and some potential solutions -can be seen in the [Simple and Scalable Protocol Extension proposal](./proposals/simple-protocol-extension.md). diff --git a/docs/setting-up-cluster.md b/docs/setting-up-cluster.md new file mode 100644 index 000000000..00c82c34e --- /dev/null +++ b/docs/setting-up-cluster.md @@ -0,0 +1,104 @@ +# Setting up your cluster +Before deploying Akri, you must have a Kubernetes cluster (v1.16 or higher) running with `kubectl` and `Helm` installed. Akri is Kubernetes native, so it should run on most Kubernetes distributions. This document provides cluster setup instructions for the three Kubernetes distributions that all of our end-to-end tests run on. + +Please choose **one** of the following: +1. [Set up a K3s cluster](#Option-1-Set-up-a-K3s-cluster) +1. [Set up a MicroK8s cluster](#Option-2-Set-up-a-MicroK8s-cluster) +1. [Set up a standard Kubernetes cluster](#Option-3-Set-up-a-standard-Kubernetes-cluster) + + +>Note: All nodes must be Linux on amd64, arm64v8, or arm32v7. + +## Option 1: Set up a K3s cluster +1. Install [K3s](https://k3s.io/) + ```sh + curl -sfL https://get.k3s.io | sh - + ``` + + >Note: Optionally specify a version with the `INSTALL_K3S_VERSION` env var as follows: `curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=v1.18.9+k3s1 sh -` +1. Grant admin privilege to access kube config. + ```sh + sudo addgroup k3s-admin + sudo adduser $USER k3s-admin + sudo usermod -a -G k3s-admin $USER + sudo chgrp k3s-admin /etc/rancher/k3s/k3s.yaml + sudo chmod g+r /etc/rancher/k3s/k3s.yaml + su - $USER + ``` +1. Check K3s status. + ```sh + kubectl get node + ``` +1. Install Helm. + ```sh + export KUBECONFIG=/etc/rancher/k3s/k3s.yaml + sudo apt install -y curl + curl -L https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash + ``` +1. Akri depends on crictl to track some Pod information. If using K3s versions 1.19 or greater, install crictl locally (note: there are no known version limitations, any crictl version is expected to work). Previous K3s versions come when crictl embedded. + ```sh + VERSION="v1.17.0" + curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-${VERSION}-linux-amd64.tar.gz --output crictl-${VERSION}-linux-amd64.tar.gz + sudo tar zxvf crictl-$VERSION-linux-amd64.tar.gz -C /usr/local/bin + rm -f crictl-$VERSION-linux-amd64.tar.gz + ``` +1. Configure Akri to use the crictl path and K3s containerd socket. This `AKRI_HELM_CRICTL_CONFIGURATION` environment variable should be added to all Akri Helm installations. + ```sh + export AKRI_HELM_CRICTL_CONFIGURATION="--set agent.host.crictl=/usr/local/bin/crictl --set agent.host.dockerShimSock=/run/k3s/containerd/containerd.sock" + ``` +1. If desired, add nodes to your cluster by running the K3s installation script with the `K3S_URL` and `K3S_TOKEN` environment variables. See [K3s installation documentation](https://rancher.com/docs/k3s/latest/en/quick-start/#install-script) for more details. + +## Option 2: Set up a MicroK8s cluster +1. Install [MicroK8s](https://microk8s.io/docs). + ```sh + sudo snap install microk8s --classic --channel=1.19/stable + ``` +1. Grant admin privilege for running MicroK8s commands. + ```sh + sudo usermod -a -G microk8s $USER + sudo chown -f -R $USER ~/.kube + su - $USER + ``` +1. Check MicroK8s status. + ```sh + microk8s status --wait-ready + ``` +1. Enable CoreDNS, Helm and RBAC for MicroK8s. + ```sh + microk8s enable dns helm3 rbac + ``` +1. If you don't have an existing `kubectl` and `helm` installations, add aliases. If you do not want to set an alias, add `microk8s` in front of all `kubectl` and `helm` commands. + ```sh + alias kubectl='microk8s kubectl' + alias helm='microk8s helm3' + ``` +1. By default, MicroK8s does not allow Pods to run in a privileged context. None of Akri's components run privileged; however, if your custom broker Pods do in order to access devices for example, enable privileged Pods like so: + ```sh + echo "--allow-privileged=true" >> /var/snap/microk8s/current/args/kube-apiserver + microk8s.stop + microk8s.start + ``` +1. Akri depends on crictl to track some Pod information. MicroK8s does not install crictl locally, so crictl must be installed and the Akri Helm chart needs to be configured with the crictl path and MicroK8s containerd socket. + ```sh + # Note that we aren't aware of any version restrictions + VERSION="v1.17.0" + curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-${VERSION}-linux-amd64.tar.gz --output crictl-${VERSION}-linux-amd64.tar.gz + sudo tar zxvf crictl-$VERSION-linux-amd64.tar.gz -C /usr/local/bin + rm -f crictl-$VERSION-linux-amd64.tar.gz + + export AKRI_HELM_CRICTL_CONFIGURATION="--set agent.host.crictl=/usr/local/bin/crictl --set agent.host.dockerShimSock=/var/snap/microk8s/common/run/containerd.sock" + ``` +1. If desired, reference [MicroK8's documentation](https://microk8s.io/docs/clustering) to add additional nodes to the cluster. + +## Option 3: Set up a standard Kubernetes cluster +1. Reference [Kubernetes documentation](https://kubernetes.io/docs/tasks/tools/) for instructions on how to install Kubernetes. +1. Install Helm for deploying Akri. + ```sh + sudo apt install -y curl + curl -L https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash + ``` + +> Note: To enable workloads on a single-node cluster, remove the master taint. +> ```sh +> kubectl taint nodes --all node-role.kubernetes.io/master- +> ``` diff --git a/docs/udev-configuration.md b/docs/udev-configuration.md index 66eabf8a4..b0b9d50f1 100644 --- a/docs/udev-configuration.md +++ b/docs/udev-configuration.md @@ -1,17 +1,58 @@ -# Using the Udev Discovery Protocol in a Configuration +# Configuring Akri to Discover Devices via Udev ## Background Udev is the device manager for Linux. It manages device nodes in the `/dev` directory, such as microphones, security -chips, usb cameras, and so on. Udev can be used to find devices that are attached to or embedded in nodes. - -## Udev discovery in Akri -Akri's udev discovery handler parses udev rules listed in a Configuration, searches for them using udev, and returns a -list of discovered device nodes (ie: /dev/video0). You tell Akri which device(s) to find by passing [udev -rules](https://wiki.archlinux.org/index.php/Udev) into a Configuration. Akri has created a -[grammar](../agent/src/protocols/udev/udev_rule_grammar.pest) for parsing the rules, expecting them to be formatted -according to the [Linux Man pages](https://man7.org/linux/man-pages/man7/udev.7.html). While udev rules are normally used to both find +chips, usb cameras, and so on. Udev can be used to find devices that are attached to or embedded in Linux nodes. + +All of Akri's components can be deployed by specifying values in its Helm chart during an installation. This document will cover the values that should be set to (1) deploy the udev Discovery Handlers and (2) apply a Configuration that tells Akri to discover devices using that Discovery Handler. + +## Deploying the udev Discovery Handler +In order for the Agent to discover udev devices, a udev Discovery Handler must exist. Akri supports an Agent image that includes all supported Discovery Handlers. This Agent will be used if `agent.full=true` is set. By default, a slim Agent without any embedded Discovery Handlers is deployed and the required Discovery Handlers can be deployed as DaemonSets. This documentation will use that strategy, deploying udev Discovery Handlers by specifying `udev.discovery.enabled=true` when installing Akri. + +## udev Configuration Settings +Instead of having to assemble your own udev Configuration yaml, we have provided a [Helm +template](../deployment/helm/templates/udev.configuration.yaml). Helm allows us to parametrize the commonly modified fields in our configuration files, and we have provided many for udev (to see +them, run `helm inspect values akri-helm-charts/akri`). +To apply the udev Configuration to your cluster, simply set +`udev.configuration.enabled=true` when installing Akri. Be sure to also **specify one or more udev rules** for the Configuration, as explained [below](#discovery-handler-discovery-details-settings). + +### Discovery Handler Discovery Details Settings +Discovery Handlers are passed discovery details that are set in a Configuration to determine what to discover, filter out of discovery, and so on. +The udev Discovery Handler requires that one discovery detail to be provided: [udev +rules](https://wiki.archlinux.org/index.php/Udev). +| Helm Key | Value | Default | Description | +|---|---|---|---| +| udev.configuration.discoveryDetails.udevRules | array of udev rules | empty | udev rule [supported by the udev Discovery Handler](#udev-rule-format) | + +The udev Discovery Handler parses the udev rules listed in a Configuration, searches for them using udev, and returns a +list of discovered device nodes (ie: /dev/video0). It parses the udev rules via a grammar [grammar](../agent/src/protocols/udev/udev_rule_grammar.pest) Akri has created. It expects the udev rules to be formatted +according to the [Linux Man pages](https://man7.org/linux/man-pages/man7/udev.configuration.7.html). + +#### Udev rule format +While udev rules are normally used to both find devices and perform actions on devices, the Akri udev discovery handler is only interested in finding devices. Consequently, the discovery handler will throw an error if any of the rules contain an action operation ("=" , "+=" , "-=" , ":=") or action fields such as `IMPORT` in the udev rules. You should only use match operations ("==", "!=") and the following udev fields: `ATTRIBUTE`, `ATTRIBUTE`, `DEVPATH`, `DRIVER`, `DRIVERS`, `KERNEL`, `KERNELS`, `ENV`, `SUBSYSTEM`, `SUBSYSTEMS`, `TAG`, and `TAGS`. To see some examples, reference our example [supported rules](../test/example.rules) and [unsupported rules](../test/example-unsupported.rules) that we run some tests against. +### Broker Pod Settings +If you would like workloads ("broker" Pods) to be deployed automatically to discovered cameras, a broker image should be specified in the Configuration. Alternatively, if it meets your scenario, you could use the Akri frame server broker ("ghcr.io/deislabs/akri/udev-video-broker"). If you would rather manually deploy pods to utilize the cameras advertized by Akri, don't specify a broker pod and see our documentation on [requesting resources advertized by Akri](./requesting-akri-resources.md). +| Helm Key | Value | Default | Description | +|---|---|---|---| +| udev.configuration.brokerPod.image.repository | image string | "" | image of broker Pod that should be deployed to discovered devices | +| udev.configuration.brokerPod.image.tag | tag string | "latest" | image tag of broker Pod that should be deployed to discovered devices | + +### Disabling Automatic Service Creation +By default, if a broker Pod is specified, the generic udev Configuration will create services for all the brokers of a specific Akri Instance and all the brokers of an Akri Configuration. The creation of these services can be disabled. +| Helm Key | Value | Default | Description | +|---|---|---|---| +| udev.configuration.createInstanceServices | true, false | true | a service should be automatically created for each broker Pod | +| udev.configuration.createConfigurationService | true, false | true | a single service should be created for all brokers of a Configuration | + +### Capacity Setting +By default, if a broker Pod is specified, a single broker Pod is deployed to each device. To modify the Configuration so that a device is accessed by more or fewer nodes via broker Pods, update the `udev.configuration.capacity` setting to reflect the correct number. For example, if your high availability needs are met by having 1 redundant +pod, you can update the Configuration like this by setting `udev.configuration.capacity=2`. +| Helm Key | Value | Default | Description | +|---|---|---|---| +| udev.configuration.capacity | number | 1 | maximum number of brokers that can be deployed to utilize a device (up to 1 per Node) | + ## Choosing a udev rule To see what devices will be discovered on a specific node by a udev rule, you can use `udevadm`. For example, to find all devices in the sound subsystem, you could run: @@ -23,14 +64,14 @@ To see all the properties of a specific device discovered, you can use `udevadm udevadm info --attribute-walk --path=$(udevadm info --query=path /sys/devices/pci0000:00/0000:00:1f.3/sound/card0) ``` Now, you can see a bunch of attributes you could use to narrow your udev rule. Maybe you decide you want to find all -sound devices made by the vendor `Great Vendor`. You set the following udev rule under the udev protocol in your +sound devices made by the vendor `Great Vendor`. You set the following udev rule under the udev Discovery Handler in your Configuration: ```yaml -spec: - protocol: - udev: - udevRules: - - 'SUBSYSTEM=="sound", ATTR{vendor}=="Great Vendor"' +discoveryHandler: + name: udev + discoveryDetails: |+ + udevRules: + - 'SUBSYSTEM=="sound", ATTR{vendor}=="Great Vendor"' ``` ### Testing a udev rule @@ -65,52 +106,39 @@ To test which devices Akri will discover with a udev rule, you can run the rule ``` 1. Create an Akri Configuration with your udev rule! -## Using the udev Configuration template -Instead of having to assemble your own udev Configuration yaml, we have provided a [Helm -template](../deployment/helm/templates/udev.yaml). Helm allows us to parametrize the commonly modified fields in our configuration files, and we have provided many for udev (to see -them, run `helm inspect values akri-helm-charts/akri`). -To add the udev Configuration to your cluster, simply set -`udev.enabled=true`. Be sure to also **specify one or more udev rules** for the Configuration. If you want Akri to only -discover and advertize the resources, omit a broker pod image. Helm will automatically apply the udev Configuration yaml -for you, and the Akri Agent will advertize discovered leaf devices as resources. By default, the udev Configuration does -not specify a broker pod or services, so upon discovery, broker pods will not be deployed nor will services be created. -Later, we will discuss [how to add a custom broker to the -Configuration](./#adding-a-custom-broker-to-the-configuration). +## Installing Akri with a udev Configuration and Discovery Handler +Leveraging the above settings, Akri can be installed with the udev Discovery Handler and a udev Configuration with our udev rule specified. ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set udev.enabled=true \ - --set udev.udevRules[0]='SUBSYSTEM=="sound"\, ATTR{vendor}=="Great Vendor"' + --set udev.discovery.enabled=true \ + --set udev.configuration.enabled=true \ + --set udev.configuration.discoveryDetails.udevRules[0]='SUBSYSTEM=="sound"\, ATTR{vendor}=="Great Vendor"' ``` -The udev Configuration can be tailored to your cluster by modifying the [Akri helm chart values](../deployment/helm/values.yaml) in the following ways: +The following installation examples have been given to show how to the udev Configuration can be tailored to you cluster: * Modifying the udev rule * Specifying a broker pod image -* Disabling automatic Instance/Configuration Service creation -* Modifying the broker PodSpec (See [Customizing Akri - Installation](./customizing-akri-installation.md#modifying-the-brokerpodspec)) -* Modifying instanceServiceSpec or configurationServiceSpec (See [Customizing Akri - Installation](./customizing-akri-installation.md#modifying-instanceservicespec-or-configurationservicespec)) + For more advanced Configuration changes that are not aided by our Helm chart, we suggest creating a Configuration file using Helm and then manually modifying it. To do this, see our documentation on [Customizing an Akri Installation](./customizing-akri-installation.md#generating-modifying-and-applying-a-custom-configuration) ## Modifying the udev rule -The udev protocol will find all devices that are described by ANY of the udev rules. For example, to discover devices made by either Great Vendor or Awesome Vendor, you could add a second udev rule. +The udev Discovery Handler will find all devices that are described by ANY of the udev rules. For example, to discover devices made by either Great Vendor or Awesome Vendor, you could add a second udev rule. ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set udev.enabled=true \ - --set udev.udevRules[0]='SUBSYSTEM=="sound"\, ATTR{vendor}=="Great Vendor"' \ - --set udev.udevRules[1]='SUBSYSTEM=="sound"\, ATTR{vendor}=="Awesome Vendor"' + --set udev.discovery.enabled=true \ + --set udev.configuration.enabled=true \ + --set udev.configuration.discoveryDetails.udevRules[0]='SUBSYSTEM=="sound"\, ATTR{vendor}=="Great Vendor"' \ + --set udev.configuration.discoveryDetails.udevRules[1]='SUBSYSTEM=="sound"\, ATTR{vendor}=="Awesome Vendor"' ``` Akri will now discover these devices and advertize them to the cluster as resources. Each discovered device is represented as an Akri Instance. To list them, run `kubectl get akrii`. Note `akrii` is a short name for Akri Instance. All the instances will be named in the format `-`. You could change the name of the -Configuration and resultant Instances to be `sound-device` by adding `--set udev.name=sound-devices` to your +Configuration and resultant Instances to be `sound-device` by adding `--set udev.configuration.name=sound-devices` to your installation command. Now, you can schedule pods that request these Instances as resources, as explained in the [requesting akri resources document](./requesting-akri-resources.md). @@ -120,17 +148,19 @@ Configuration. Then, a broker will automatically be deployed to each discovered information the broker needs to find its device as an environment variable. Namely, it injects an environment variable named `UDEV_DEVNODE` which contains the devnode path for that device (ie: `/dev/snd/pcmC0D0c`). The broker can grab this environment variable and proceed to interact with the device. To add a broker to the udev configuration, set the -`udev.brokerPod.image.repository` value to point to your image. As an example, the installation below will deploy an +`udev.configuration.brokerPod.image.repository` value to point to your image. As an example, the installation below will deploy an empty nginx pod for each instance. Instead, you can point to your image, say `ghcr.io//sound-broker`. ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set udev.enabled=true \ - --set udev.udevRules[0]='SUBSYSTEM=="sound"\, ATTR{vendor}=="Great Vendor"' \ - --set udev.brokerPod.image.repository=nginx + --set udev.discovery.enabled=true \ + --set udev.configuration.enabled=true \ + --set udev.configuration.discoveryDetails.udevRules[0]='SUBSYSTEM=="sound"\, ATTR{vendor}=="Great Vendor"' \ + --set udev.configuration.brokerPod.image.repository=nginx ``` -The Configuration will automatically create a broker for each discovered device. It will also create a service for each +> Note: set `udev.configuration.brokerPod.image.tag` to specify an image tag (defaults to `latest`). + +Akri will automatically create a broker for each discovered device. It will also create a service for each broker and one for all brokers of the Configuration that applications can point to. See the [Customizing Akri Installation](./customizing-akri-installation.md) to learn how to [modify the broker pod spec](./customizing-akri-installation.md#modifying-the-brokerpodspec) and [service @@ -143,27 +173,24 @@ Helm. For example, to instead run all processes in the Pod with user ID 1000 and ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set udev.enabled=true \ - --set udev.udevRules[0]='SUBSYSTEM=="sound"\, ATTR{vendor}=="Great Vendor"' \ - --set udev.brokerPod.image.repository=nginx \ - --set udev.brokerPod.securityContext.runAsUser=1000 \ - --set udev.brokerPod.securityContext.runAsGroup=1000 + --set udev.discovery.enabled=true \ + --set udev.configuration.enabled=true \ + --set udev.configuration.discoveryDetails.udevRules[0]='SUBSYSTEM=="sound"\, ATTR{vendor}=="Great Vendor"' \ + --set udev.configuration.brokerPod.image.repository=nginx \ + --set udev.configuration.brokerPod.securityContext.runAsUser=1000 \ + --set udev.configuration.brokerPod.securityContext.runAsGroup=1000 ``` - -## Disabling automatic service creation -By default, the generic udev Configuration will create services for all the brokers of a specific Akri Instance and all the brokers of an Akri Configuration. Disable the create of Instance level services and Configuration level services by setting `--set udev.createInstanceServices=false` and `--set udev.createConfigurationService=false`, respectively. - ## Modifying a Configuration -More information about how to modify an installed Configuration, add additional protocol Configurations to a cluster, or delete a Configuration can be found in the [Customizing an Akri Installation document](./customizing-akri-installation.md). +Akri has provided further documentation on [modifying the broker PodSpec](./customizing-akri-installation.md#modifying-the-brokerpodspec), [instanceServiceSpec, or configurationServiceSpec](./customizing-akri-installation.md#modifying-instanceservicespec-or-configurationservicespec) +More information about how to modify an installed Configuration, add additional Configurations to a cluster, or +delete a Configuration can be found in the [Customizing an Akri Installation +document](./customizing-akri-installation.md). ## Implementation details The udev implementation can be understood by looking at several things: -1. [UdevDiscoveryHandlerConfig](../shared/src/akri/configuration.rs) defines the required properties -1. [The udev property in akri-configuration-crd.yaml](../deployment/helm/crds/akri-configuration-crd.yaml) validates the - CRD input -1. [UdevDiscoveryHandler](../agent/src/protocols/udev/discovery_handler.rs) defines udev camera discovery -1. [samples/brokers/udev-video-broker](../samples/brokers/udev-video-broker) defines the udev protocol broker +1. [UdevDiscoveryDetails](../discovery-handlers/udev/src/discovery_handler.rs) defines the required properties +1. [UdevDiscoveryHandler](../discovery-handlers/udev/src/discovery_handler.rs) defines udev discovery +1. [samples/brokers/udev-video-broker](../samples/brokers/udev-video-broker) defines the udev broker 1. [udev_rule_grammar.pest](../agent/src/protocols/udev/udev_rule_grammar.pest) defines the grammar for parsing udev rules and enumerate which fields are supported (such as `ATTR` and `TAG`), which are yet to be supported (`ATTRS` and `TAGS`), and which fields will never be supported, mainly due to be assignment rather than matching fields (such as diff --git a/docs/udev-video-sample.md b/docs/udev-video-sample.md index 7e5782c7f..26c5081a6 100644 --- a/docs/udev-video-sample.md +++ b/docs/udev-video-sample.md @@ -1,18 +1,18 @@ # Using the Udev Discovery Protocol to Discover USB Cameras -As an example of handling local capabilities, a sample broker and streaming app have been made for utilizing video cameras discovered by Akri's udev protocol. To create an Akri Configuration to discover other devices via udev, see the [udev Configuration documentation](./udev-configuration.md). +As an example of handling local capabilities, a sample broker and streaming app have been made for utilizing video cameras discovered by Akri's udev Discovery Handler. To create an Akri Configuration to discover other devices via udev, see the [udev Configuration documentation](./udev-configuration.md). Udev is a device manager for the Linux kernel. The udev discovery handler parses udev rules listed in a Configuration, searches for them using udev, and returns a list of device nodes (ie: /dev/video0). An instance is created for each device node. Since this example uses a [sample broker](../samples/brokers/udev-video-broker) that streams frames from a local camera, the rule added to the Configuration is `KERNEL=="video[0-9]*"`. To determine if a node has video devices that will be discovered by this Configuration, run `ls -l /sys/class/video4linux/` or `sudo v4l2-ctl --list-devices`. ## Usage -To use create a udev Configuration for video devices for your cluster, you can simply set `udev.enabled=true` and a udev rule of `--set udev.udevRules[0]='KERNEL==\"video[0-9]*\"'` when installing the Akri Helm chart. Optionally, set a name for your generated Configuration by setting `--set udev.name=akri-udev-video` and add a broker image in the case you want a workload automatically deployed to discovered devices. More information about the Akri Helm charts can be found in the [user guide](./user-guide.md#understanding-akri-helm-charts). +To use create a udev Configuration for video devices for your cluster, you can simply set `udev.configuration.enabled=true` and a udev rule of `--set udev.configuration.udevRules[0]='KERNEL==\"video[0-9]*\"'` when installing the Akri Helm chart. Also enable udev discovery via `udev.discovery.enabled=true`. Optionally, set a name for your generated Configuration by setting `udev.configuraion.name=akri-udev-video` and add a broker image in the case you want Pods automatically deployed to discovered devices. More information about the Akri Helm charts can be found in the [user guide](./user-guide.md#understanding-akri-helm-charts). ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set udev.enabled=true \ - --set udev.name=akri-udev-video \ - --set udev.udevRules[0]='KERNEL=="video[0-9]*"' \ - --set udev.brokerPod.image.repository="ghcr.io/deislabs/akri/udev-video-broker:latest-dev" + --set udev.discovery.enabled=true \ + --set udev.configuration.enabled=true \ + --set udev.configuration.name=akri-udev-video \ + --set udev.configuration.discoveryDetails.udevRules[0]='KERNEL=="video[0-9]*"' \ + --set udev.configuration.brokerPod.image.repository="ghcr.io/deislabs/akri/udev-video-broker" ``` Akri will find all video4linux cameras and ensure that broker Pods are running on nodes that can access the cameras at all times, supplying each Instance Service and the Configuration Service with frames. @@ -30,35 +30,39 @@ For example, the rule can be narrowed by matching cameras with specific properti ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set udev.enabled=true \ - --set udev.udevRules[0]='KERNEL=="video[0-9]*"\, ENV{ID_VENDOR}=="Microsoft"' \ - --set udev.brokerPod.image.repository="ghcr.io/deislabs/akri/udev-video-broker:latest-dev" + --set udev.discovery.enabled=true \ + --set udev.configuration.enabled=true \ + --set udev.configuration.name=akri-udev-video \ + --set udev.configuration.discoveryDetails.udevRules[0]='KERNEL=="video[0-9]*"\, ENV{ID_VENDOR}=="Microsoft"' \ + --set udev.configuration.brokerPod.image.repository="ghcr.io/deislabs/akri/udev-video-broker" ``` As another example, to make sure that the camera has a capture capability rather than just being a video output device, modify the udev rule as follows: ```bash helm repo add akri-helm-charts https://deislabs.github.io/akri/ helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set udev.enabled=true \ - --set udev.udevRules[0]='KERNEL=="video[0-9]*"\, ENV{ID_V4L_CAPABILITIES}=="*:capture:*"' \ - --set udev.brokerPod.image.repository="ghcr.io/deislabs/akri/udev-video-broker:latest-dev" + --set udev.discovery.enabled=true \ + --set udev.configuration.enabled=true \ + --set udev.configuration.name=akri-udev-video \ + --set udev.configuration.discoveryDetails.udevRules[0]='KERNEL=="video[0-9]*"\, ENV{ID_V4L_CAPABILITIES}=="*:capture:*"' \ + --set udev.configuration.brokerPod.image.repository="ghcr.io/deislabs/akri/udev-video-broker" ``` -### Modifying the brokerPod spec -The `brokerPodSpec` property is a full [PodSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#podspec-v1-core) and can be modified as such. For example, to configure the frame rate, resolution, and image type the broker streams from the discovered video cameras, environment variables can be modified in the podspec. To examine what settings are supported by a camera, install `v4l-utils` and run `sudo v4l2-ctl -d /dev/video0 --list-formats-ext` on the node. By default, the environment variables are set to MJPG format, 640x480 resolution, and 10 frames per second. If the broker sees that those settings are not supported by the camera, it will query the v4l device for supported settings and use the first format, resolution, and fps in the lists returned. The environment variables can be changed when installing the Akri Helm chart. For example, tell the broker to stream JPEG format, 1000x800 resolution, and 30 frames per second by setting those environment variables when installing Akri. +### Modifying the broker PodSpec +The `brokerPodSpec` property is a full [PodSpec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#podspec-v1-core) and can be modified as such. For example, to configure the frame rate, resolution, and image type the broker streams from the discovered video cameras, environment variables can be modified in the PodSpec. To examine what settings are supported by a camera, install `v4l-utils` and run `sudo v4l2-ctl -d /dev/video0 --list-formats-ext` on the node. By default, the environment variables are set to MJPG format, 640x480 resolution, and 10 frames per second. If the broker sees that those settings are not supported by the camera, it will query the v4l device for supported settings and use the first format, resolution, and fps in the lists returned. The environment variables can be changed when installing the Akri Helm chart. For example, tell the broker to stream JPEG format, 1000x800 resolution, and 30 frames per second by setting those environment variables when installing Akri. ```bash helm install akri akri-helm-charts/akri \ - --set useLatestContainers=true \ - --set udev.enabled=true \ - --set udev.udevRules[0]='KERNEL=="video[0-9]*"' \ - --set udev.brokerPod.image.repository="ghcr.io/deislabs/akri/udev-video-broker:latest-dev" \ - --set udev.brokerPod.env.FORMAT='JPEG' \ - --set udev.brokerPod.env.RESOLUTION_WIDTH='1000' \ - --set udev.brokerPod.env.RESOLUTION_HEIGHT='800' \ - --set udev.brokerPod.env.FRAMES_PER_SECOND='30' + --set udev.discovery.enabled=true \ + --set udev.configuration.enabled=true \ + --set udev.configuration.name=akri-udev-video \ + --set udev.configuration.discoveryDetails.udevRules[0]='KERNEL=="video[0-9]*"' \ + --set udev.configuration.brokerPod.image.repository="ghcr.io/deislabs/akri/udev-video-broker" \ + --set udev.configuration.brokerPod.env.FORMAT='JPEG' \ + --set udev.configuration.brokerPod.env.RESOLUTION_WIDTH='1000' \ + --set udev.configuration.brokerPod.env.RESOLUTION_HEIGHT='800' \ + --set udev.configuration.brokerPod.env.FRAMES_PER_SECOND='30' ``` +Instead of just setting these environment variables in the broker Pods automatically deployed by Akri, you can ensure that all Pods that request resources discovered via a Configuration have certain environment variables by [setting them in a Configuration's `brokerProperties`](./broker-development#Specifying-additional-broker-environment-variables-in-a-Configuration). **Note:** The udev video broker pods run privileged in order to access the video devices. More explicit device access could have been configured by setting the appropriate [security diff --git a/docs/user-guide.md b/docs/user-guide.md index aba2abaae..1163e536e 100644 --- a/docs/user-guide.md +++ b/docs/user-guide.md @@ -5,9 +5,10 @@ footage from those cameras. It includes instructions on K8s cluster setup. If yo cluster of Raspberry Pi 4's, see the [Raspberry Pi 4 demo](./end-to-end-demo-rpi4.md). ## Getting Started -To get started using Akri, you must first decide what you want to discover and whether Akri current supports a protocol -that can be used to discover resources of that type. To see the list of currently supported protocols, see our -[roadmap](./roadmap.md). +To get started using Akri, you must first decide what you want to discover and whether Akri currently supports a +Discovery Handler that can be used to discover resources of that type. Akri discovers devices via Discovery Handlers, +which are often protocol implementations that understand filter information passed via an Akri Configuration. To see the +list of currently supported Discovery Handlers, see our [roadmap](./roadmap.md). ### Understanding Akri Helm charts Akri is most easily deployed with Helm charts. Helm charts provide convenient packaging and configuration. @@ -28,91 +29,128 @@ helm repo add akri-helm-charts https://deislabs.github.io/akri/ helm install akri akri-helm-charts/akri ``` +To use the latest containers of the Akri components, add `--set useLatestContainers=true` when installing Akri like so: +```sh +helm install akri akri-helm-charts/akri \ + --set useLatestContainers=true +``` + +Before v0.4.0, all of Akri's Discovery Handlers were embedded in the Agent. As more Discovery Handlers are added to +Akri, this will become unsustainable and cause the Agent to have a larger footprint than oftentimes necessary (if only +one of the many Discovery Handlers is being leveraged). Starting in v0.4.0, Akri is starting the transition to mainly +supporting an Agent image without any embedded Discovery Handlers, which will be the image used by Akri's Helm chart by +default. The required Discovery Handlers can be deployed as DaemonSets by setting `.discovery.enabled=true` when installing Akri, as explained in the [user flow](#installing-akri-flow). To instead +use the previous strategy of an Agent image with embedded udev, OPC UA, and ONVIF Discovery Handlers, set +`agent.full=true`. + +To see which version of the **akri** and **akri-dev** Helm charts are stored locally, run `helm inspect chart akri-helm-charts/akri` and `helm inspect chart akri-helm-charts/akri-dev`, respectively. + +To grab the latest Akri Helm charts, run `helm repo update`. + ### Setting up your cluster -1. Before deploying Akri, you must have a Kubernetes (v1.16 or higher) cluster running and `kubectl` installed. All - nodes must be Linux. All of the Akri component containers are currently built for amd64, arm64v8, or arm32v7, so all nodes must - have one of these platforms. +Before deploying Akri, you must have a Kubernetes cluster (v1.16 or higher) running with `kubectl` and `Helm` installed. Reference our [cluster setup documentation](./setting-up-cluster.md) to set up a cluster or adapt your currently existing cluster. Akri currently supports Linux Nodes on amd64, arm64v8, or arm32v7. +### Installing Akri Flow +Akri is installed using its Helm Chart, which contains settings for deploying the Akri Agents, Controller, Discovery Handlers, and Configurations. All these can be installed in one command, in several different Helm installations, or via consecutive `helm upgrades`. This section will focus on the latter strategy, helping you construct your Akri installation command, assuming you have already decided what you want Akri to discover. + +Akri's Helm chart deploys the Akri Controller and Agent by default, so you only need to specify which Discovery Handlers and Configurations need to be deployed in your command. Akri discovers devices via Discovery Handlers, which are often protocol implementations. Akri currently supports three Discovery Handlers (udev, OPC UA and ONVIF); however, custom discovery handlers can be created and deployed as explained in Akri's [Discovery Handler development document](./discovery-handler-development.md). Akri is told what to discover via Akri Configurations, which specify the name of the Discovery Handler that should be used, any discovery details (such as filters) that need to be passed to the Discovery Handler, and optionally any broker Pods and services that should be created upon discovery. For example, the ONVIF Discovery Handler can receive requests to include or exclude cameras with certain IP addresses. -### Deploying Akri -1. Install Helm +Let's walk through building an Akri installation command: + +1. Get Akri's Helm repo + ```sh + helm repo add akri-helm-charts https://deislabs.github.io/akri/ + ``` +2. Install Akri's Controller and Agent, specifying the crictl configuration from [the cluster setup steps](./setting-up-cluster.md) in not using vanilla Kubernetes: ```sh - curl -L https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash + helm install akri akri-helm-charts/akri \ + $AKRI_HELM_CRICTL_CONFIGURATION ``` -1. Provide runtime-specific configuration to enable Akri and Helm - - 1. If using **K3s**, point to `kubeconfig` for Helm and configure Akri to use the K3s embedded crictl. - ```sh - # Helm uses $KUBECONFIG to find the Kubernetes configuration - export KUBECONFIG=/etc/rancher/k3s/k3s.yaml - # Configure Akri to use K3s' embedded crictl and CRI socket - export AKRI_HELM_CRICTL_CONFIGURATION="--set agent.host.crictl=/usr/local/bin/crictl --set agent.host.dockerShimSock=/run/k3s/containerd/containerd.sock" - ``` - 1. If using **MicroK8s**, enable CoreDNS, RBAC (optional), and Helm. If your broker Pods must run privileged, enable - privileged Pods. Also, install crictl, and configure Akri to use MicroK8s' CRI socket. - ```sh - # Enable CoreDNS, RBAC and Helm - microk8s enable dns rbac helm3 - - # Optionally enable privileged pods (if your broker Pods must run privileged) and restart MicroK8s. - echo "--allow-privileged=true" >> /var/snap/microk8s/current/args/kube-apiserver - sudo microk8s stop && microk8s start - - # Install crictl locally (note: there are no known version - # limitations, any crictl version is expected to work) - VERSION="v1.17.0" - curl -L https://github.com/kubernetes-sigs/cri-tools/releases/download/$VERSION/crictl-${VERSION}-linux-amd64.tar.gz --output crictl-${VERSION}-linux-amd64.tar.gz - sudo tar zxvf crictl-$VERSION-linux-amd64.tar.gz -C /usr/local/bin - rm -f crictl-$VERSION-linux-amd64.tar.gz - - # Configure Akri to use MicroK8s' CRI socket - export AKRI_HELM_CRICTL_CONFIGURATION="--set agent.host.crictl=/usr/local/bin/crictl --set agent.host.dockerShimSock=/var/snap/microk8s/common/run/containerd.sock" - ``` - If you don't have existing kubectl and helm installations, you can add aliases. If you do not want to set an - alias, add microk8s in front of all kubectl and helm commands. - ```sh - alias kubectl='microk8s kubectl' - alias helm='microk8s helm3' - ``` - 1. If using **Kubernetes**, Helm and crictl do not require additional configuration. - -1. When installing the Akri Helm chart, you can specify what Configuration to apply by specifying the discovery protocol - that will be used in the Configuration. This is done in the setting `--set .enabled=true` below. Here, - `` could be `udev`, `onvif`, or `opcua`. Helm will automatically apply the default Configuration for that protocol to - the cluster. You can set values in the Helm install command to customize the Configuration. To explore the values you - can set, see our documentation on customizing the provided [ONVIF](./onvif-configuration.md), - [udev](./udev-configuration.md), and [OPC UA](./opcua-configuration.md) Configuration templates. - - The Helm settings can also be used to customize where the Akri Controller runs. By default the Controller can be - deployed to any control plane or worker node. These settings can be changed by adding extra settings when installing - Akri below. If you don't want the Controller to ever be scheduled to control plane nodes, add `--set - controller.allowOnControlPlane=false` to your install command below. Conversely, if you only want the Controller to - run on control plane nodes, add `--set controller.onlyOnControlPlane=true`. This will guarantee the Controller only - runs on nodes with the label (key, value) of (`node-role.kubernetes.io/master`, ""), which is the default label for - the control plane node for Kubernetes. However, control plane nodes on MicroK8s and K3s do not have this label by - default, so you can add it by running `kubectl label node ${HOSTNAME,,} node-role.kubernetes.io/master= - --overwrite=true`. - - Run the following to fetch the Akri Helm chart, install Akri, and apply the default configuration for ``, - optionally specifying the image for the broker pod that should be deployed to utilize each discovered device. + > Note: To use Akri's latest dev releases, specify `akri-helm-charts/akri` + +3. Upgrade the installation to deploy the Discovery Handler you wish to use. Discovery Handlers are deployed as DaemonSets like the Agent when `.discovery.enabled` is set. ```sh - helm repo add akri-helm-charts https://deislabs.github.io/akri/ - helm install akri akri-helm-charts/akri \ - $AKRI_HELM_CRICTL_CONFIGURATION \ - --set useLatestContainers=true \ - --set .enabled=true \ - # --set .brokerPod.image.repository= \ - # apply any additional settings here + helm upgrade akri akri-helm-charts/akri \ + --set .discovery.enabled=true ``` - Run `kubectl get crd`, and you should see Akri's two CRDs listed. Run `kubectl get pods -o wide`, and you should see - the Akri Controller pod, Agent pods, and broker pods if a broker was specified. Run `kubectl get akric`, and you - should see the Configuration for the protocol you specified. If devices were discovered, the instances can be seen - by running `kubectl get akrii` and further inspected by running `kubectl get akrii - -o yaml`. -1. Delete the configuration and watch the instances, pods, and services (if you specified a broker image) be deleted. + > Note: To install a full Agent with embedded udev, OPC UA, and ONVIF Discovery Handlers, set `agent.full=true` instead of enabling the Discovery Handlers. Note, this we restart the + > Agent Pods. + > ```sh + > helm upgrade akri akri-helm-charts/akri \ + > --set agent.full=true + > ``` + +4. Upgrade the installation to apply a Configuration, which requests discovery of certain devices by a Discovery Handler. A Configuration is applied by setting `.configuration.enabled`. While some Configurations may not require any discovery details to be set, oftentimes setting details is preferable for narrowing the Discovery Handlers' search. These are set under `.configuration.discoveryDetails`. For example, udev rules are passed to the udev Discovery Handler to specify which devices in the Linux device file system it should search for by setting `udev.configuration.discoveryDetails.udevRules`. Akri can be instructed to automatically deploy workloads called "brokers" to each discovered device by setting a broker Pod image in a Configuration via `--set .configuration.brokerPod.image.repository=`. Learn more about creating brokers in the [broker development document](./broker-development.md). ```sh - kubectl delete akric akri- - watch kubectl get pods,services,akric,akrii -o wide + helm upgrade akri akri-helm-charts/akri \ + --set .discovery.enabled=true \ + --set .configuration.enabled=true \ + # set any discovery details in the Configuration + # specify any broker images in the Configuration ``` -### Modifying your Akri installation or deploying a custom Akri Configuration -See the [Customizing an Akri Installation document](./customizing-akri-installation.md) for more information on how to modify -your already deployed Akri installation or to specify a custom Akri Configuration. +Installation could have been done in one step rather than a series of upgrades: +```sh +helm repo add akri-helm-charts https://deislabs.github.io/akri/ +helm install akri akri-helm-charts/akri \ + --set .discovery.enabled=true \ + --set .configuration.enabled=true \ + # set any discovery details in the Configuration + # specify any broker images in the Configuration +``` +As a real example, Akri's Controller, Agents, udev Discovery Handlers, and a udev Configuration that specifies the discovery of only USB video devices and an nginx broker image are installed like so: +```sh +helm install akri akri-helm-charts/akri \ + --set udev.discovery.enabled=true \ + --set udev.configuration.enabled=true \ + --set udev.configuration.discoveryDetails.udevRules[0]='KERNEL=="video[0-9]*"' \ + --set udev.configuration.brokerPod.image.repository=nginx +``` +> Note: set `.brokerPod.image.tag` to specify an image tag (defaults to `latest`). + +This installation can be expanded to install multiple Discovery Handlers and/or Configurations. See the documentation on [udev](./udev-configuration.md), [OPC UA](./opcua-configuration.md), and [ONVIF](./onvif-configuration.md) Configurations to learn more about setting the discovery details passed to their Discovery Handlers and more. + +See [modifying an Akri Installation](./customizing-akri-installation.md) to learn about how to use Akri's Helm chart to install additional Configurations and Discovery Handlers. + +### Inspecting an Akri Installation +- Run `kubectl get crd`, and you should see Akri's two CRDs listed. +- Run `kubectl get pods -o wide`, and you should see the Akri Controller, Agent, and (if specified) broker pods. +- Run `kubectl get akric`, and you should see the Configuration for the protocol you specified. +- If devices were discovered, the instances can be seen by running `kubectl get akrii` and further inspected by running `kubectl get akrii - -o yaml`. +- List all that Akri has automatically created and deployed, namely the Akri Controller, Agents, Configurations, Instances (which are the Akri custom resource that represents each device), and if specified, broker Pods, a service for each broker Pod, and a service for all brokers. + ```sh + watch microk8s kubectl get pods,akric,akrii,services -o wide + ``` + For K3s and vanilla Kubernetes + ```sh + watch kubectl get pods,akric,akrii,services -o wide + ``` +### Deleting Akri Configurations +To tell Akri to stop discovering devices, simply delete the Configuration that initiated the discovery. Watch as all instances that represent the discovered devices are deleted. +```sh +kubectl delete akric akri- +kubectl get akrii +``` + +### Deleting Akri +1. If you are done using Akri, it can be uninstalled via Helm. + ```sh + helm delete akri + ``` +1. Delete Akri's CRDs. + ```sh + kubectl delete crd instances.akri.sh + kubectl delete crd configurations.akri.sh + ``` + +### Customizing where the Controller runs +By default the Controller can be deployed to any control plane or worker node. This can be changed by adding extra settings when installing +Akri below. If you don't want the Controller to ever be scheduled to control plane nodes, add `--set +controller.allowOnControlPlane=false` to your install command below. Conversely, if you only want the Controller to +run on control plane nodes, add `--set controller.onlyOnControlPlane=true`. This will guarantee the Controller only +runs on nodes with the label (key, value) of (`node-role.kubernetes.io/master`, ""), which is the default label for +the control plane node for Kubernetes. + +However, control plane nodes on MicroK8s and K3s may not have this exact label by +default, so you can add it by running `kubectl label node ${HOSTNAME,,} node-role.kubernetes.io/master=--overwrite=true`. +Or alternatively, in K3s, you can keep the default label value on the master and set `controller.nodeSelectors."node-role\.kubernetes\.io/master"=true`. diff --git a/docs/workflows-run-test-cases.md b/docs/workflows-run-test-cases.md new file mode 100644 index 000000000..c8707c0e9 --- /dev/null +++ b/docs/workflows-run-test-cases.md @@ -0,0 +1,120 @@ +# Test K3s, Kubernetes (Kubeadm) and MicroK8s + +File: `/.github/workflows/run-test-cases.yml` + +A GitHub workflow that: + ++ runs Python-based end-to-end [tests](#Tests); ++ through 5 different Kubernetes [versions](#Versions): 1.16, 1.17, 1.18, 1.19, 1.20; ++ on 3 different Kubernetes distros: [K3s](https://k3s.io), [Kubernetes (Kubeadm)](https://kubernetes.io/docs/reference/setup-tools/kubeadm/), [MicroK8s](https://microk8s.io). + +## Tests + +|Name|File|Documentation| +|----|----|-----------| +|end-to-end|`/test/run-end-to-end.py`|TBD| + +## Versions + +Distro K3s Version 1.16 creates [Device Plugins](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/device-plugins/) sockets at `/var/lib/rancher/k3s/agent/kubelet/device-plugins` whereas Kubernetes expects these sockets to be created at `/var/lib/kubelet/device-plugins`. + +See K3s issue: [Compatibility with Device Plugins #1390](https://github.com/k3s-io/k3s/issues/1390) + +The fix for K3s 1.16 is to create a symbolic link from the K3s location to the Kubernetes-expected location. This is added as an exception to the workflow step for K3s: + +```bash +if [ "${{ matrix.kube.runtime }}" == "K3s-1.16" ]; then + mkdir -p /var/lib/kubelet + if [ -d /var/lib/kubelet/device-plugins ]; then + sudo rm -rf /var/lib/kubelet/device-plugins + fi + sudo ln -s /var/lib/rancher/k3s/agent/kubelet/device-plugins /var/lib/kubelet/device-plugins +fi +``` + +This issue was addressed in K3s version 1.17. + +## Jobs|Steps + +The workflow comprises two jobs (`build-containers` and `test-cases`). + +## `build-containers` + +`build-containers` builds container images for Akri 'controller' and 'agent' based upon the commit that triggers the workflow. Once build, these iamges are shared across the `test-cases` job, using GitHub Action [upload-artifact](https://github.com/actions/upload-artifact). + +## `test-cases` + +`test-cases` uses a GitHub [strategy](https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#jobsjob_idstrategy) to run its steps across the different Kubernetes distros and versions summarized at the top of this document. + +New Kubernetes distro versions may be added to the job by adding entries to `jobs.test-cases.strategy.matrix.kube`. Each array entry must include: + +|Property|Description| +|--------|-----------| +|`runtime`|A unique identifier for this distro-version pair| +|`version`|A distro-specific unique identifier for the Kubernetes version| +|`crictl`|A reference to the release of [`cri-tools`](https://github.com/kubernetes-sigs/cri-tools) including `crictl` that will be used| + +Notes: + ++ `runtime` is used by subsequent steps as a way to determine the distro, e.g. `startsWith(matrix.kube.runtime, 'K3s')` ++ `version` is used by each distro to determine which binary, snap etc. to install. Refer to each distro's documentation to determine the value required ++ `crictl` is used by `K3s` and `MicroK8s` to determine which version of `crictl` (sic.) is must be installed. `Kubeadm` includes `crictl` and so this variable is left as `UNUSED` for this distro. + +### Distro installation and Akri container images + +Each distro has an installation step and a step to import the Akri `controller` and `agent` images created by the `build-containers` job. + +The installation steps are identified by: + +```YAML +if: startsWith(matrix.kube.runtime, ${DISTRO}) +``` + +The installation instructions map closely with the installation instructions provided for the distro. For `K3s` and `MicroK8s`, the step includes installation of `cri-tools` so that `crictl` is available. + +The container image import steps are identified by: + +```YAML +if: (startsWith(github.event_name, 'pull_request')) && (startsWith(matrix.kube.runtime, ${DISTRO})) +``` + +### Helm and state + +In order to pass state between the workflow and the Python end-to-end test scripts, temporary (`/tmp`) files are used: + +|File|Description| +|----|-----------| +|`agent_log.txt`|Filename used by workflow to persist the Agent's log| +|`controller_log.txt`|Filename used by workflow to persist the Controller's log| +|`cri_args_to_test.txt`|`crictcl` configuration that is passed to `helm install` command| +|`extra_helm_args.txt`|Additionl configuration that is passed to `helm install` command| +|`helm_chart_location.txt`|Path to the Helm Chart| +|`kubeconfig_path_to_test.txt`|Path to `kubectl` cluster configuration file| +|`runtime_cmd_to_test`|Location of `kubectl` binary| +|`sleep_duration.txt`|Optional: contains the number of seconds to pause| +|`version_to_test.txt`|Akri version to test| + + +If you review `/test/run-end-to-end.py` and `/test/shared_test_code.py`, you will see these files referenced. + +```Python3 +AGENT_LOG_PATH = "/tmp/agent_log.txt" +CONTROLLER_LOG_PATH = "/tmp/controller_log.txt" +KUBE_CONFIG_PATH_FILE = "/tmp/kubeconfig_path_to_test.txt" +RUNTIME_COMMAND_FILE = "/tmp/runtime_cmd_to_test.txt" +HELM_CRI_ARGS_FILE = "/tmp/cri_args_to_test.txt" +VERSION_FILE = "/tmp/version_to_test.txt" +SLEEP_DURATION_FILE = "/tmp/sleep_duration.txt" +EXTRA_HELM_ARGS_FILE = "/tmp/extra_helm_args.txt" +HELM_CHART_LOCATION = "/tmp/helm_chart_location.txt" +``` + +### Tests + +Of all the steps, only one is needed to run the Python end-to-end script. + +stdout|stderr from the script can be logged to the workflow. + +### Upload + +Once the end-to-end script is complete, the workflow uses the GitHub Action [upload-artifact](https://github.com/actions/upload-artifact) again to upload `/tmp/agent_log.txt` and `/tmp/controller_log.txt` so that these remain available (for download) once the workflow completes. \ No newline at end of file diff --git a/samples/apps/anomaly-detection-app/anomaly_detection.py b/samples/apps/anomaly-detection-app/anomaly_detection.py index 685a02d26..82336646d 100644 --- a/samples/apps/anomaly-detection-app/anomaly_detection.py +++ b/samples/apps/anomaly-detection-app/anomaly_detection.py @@ -71,9 +71,11 @@ def continuously_get_values(): logging.info("Starting to call GetValue on endpoint %s", url) while True: try: - channel = grpc.insecure_channel(url) + channel = grpc.insecure_channel(url, options=( + ('grpc.use_local_subchannel_pool', 1),)) stub = opcua_node_pb2_grpc.OpcuaNodeStub(channel) value_response = stub.GetValue(opcua_node_pb2.ValueRequest()) + channel.close() if test_new_value(data, value_response.value) == -1: values_log.append(make_log_entry( value_response.opcua_server, value_response.value, True)) diff --git a/samples/apps/anomaly-detection-app/templates/index.html b/samples/apps/anomaly-detection-app/templates/index.html index ab23e9010..936711a55 100644 --- a/samples/apps/anomaly-detection-app/templates/index.html +++ b/samples/apps/anomaly-detection-app/templates/index.html @@ -6,7 +6,7 @@
-

Akri OPCUA Node Value Anomaly Detection

+

Akri OPC UA Node Value Anomaly Detection

  • No values
  • diff --git a/samples/apps/video-streaming-app/streaming.py b/samples/apps/video-streaming-app/streaming.py index b6b4303b3..b6d6024be 100644 --- a/samples/apps/video-streaming-app/streaming.py +++ b/samples/apps/video-streaming-app/streaming.py @@ -19,180 +19,222 @@ from kubernetes import client, config import re -camera_frame_queues = [] -small_frame_sources = [] +class CameraFeed: + def __init__(self, url): + global global_stop_event + + self.url = url + self.queue = queue.Queue(1) + self.thread = None + self.stop_event = global_stop_event + + def __eq__(self, other): + if other is None: + return False + return self.url == other.url + + def start_handler(self): + self.thread = threading.Thread(target=self.get_frames) + self.thread.start() + + def wait_handler(self): + if self.thread is not None: + self.thread.join() + + # Generator function for video streaming. + def generator_func(self): + while not self.stop_event.wait(0.01): + frame = self.queue.get(True, None) + yield (b'--frame\r\nContent-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') + + # Loops, creating gRPC client and grabing frame from camera serving specified url. + def get_frames(self): + logging.info("Starting get_frames(%s)" % self.url) + while not self.stop_event.wait(0.01): + try: + client_channel = grpc.insecure_channel(self.url, options=( + ('grpc.use_local_subchannel_pool', 1),)) + camera_stub = camera_pb2_grpc.CameraStub(client_channel) + frame = camera_stub.GetFrame(camera_pb2.NotifyRequest()) + frame = frame.frame + client_channel.close() + + frame_received = False + # prevent stale data + if (len(frame) > 0): + if (self.queue.full()): + try: + self.queue.get(False) + except: + pass + self.queue.put(frame, False) + frame_received = True + + if (frame_received): + sleep(1) + + except: + logging.info("[%s] Exception %s" % (self.url, traceback.format_exc())) + sleep(1) -def get_camera_list(configuration_name): - camera_list = [] +class CameraDisplay: + def __init__(self): + self.main_camera = None + self.small_cameras = [] + self.mutex = threading.Lock() + + def __eq__(self, other): + return self.main_camera == other.main_camera and self.small_cameras == other.small_cameras + + def start_handlers(self): + if self.main_camera is not None: + self.main_camera.start_handler() + for small_camera in self.small_cameras: + small_camera.start_handler() + + def wait_handlers(self): + global global_stop_event + + global_stop_event.set() + if self.main_camera is not None: + self.main_camera.wait_handler() + for small_camera in self.small_cameras: + small_camera.wait_handler() + global_stop_event.clear() + + def merge(self, other): + self.mutex.acquire() + try: + self.wait_handlers() + + self.main_camera = other.main_camera + self.small_cameras = other.small_cameras + + self.start_handlers() + finally: + self.mutex.release() + + def count(self): + self.mutex.acquire() + result = len(self.small_cameras) + if self.main_camera is not None: + result += 1 + self.mutex.release() + return result + + def hash_code(self): + self.mutex.acquire() + cameras = ",".join([camera.url for camera in self.small_cameras]) + if self.main_camera is not None: + cameras = "{0}+{1}".format(self.main_camera.url, cameras) + self.mutex.release() + return cameras + + def stream_frames(self, camera_id): + selected_camera = None + camera_id = int(camera_id) + + self.mutex.acquire() + if camera_id == 0: + selected_camera = self.main_camera + elif camera_id - 1 < len(self.small_cameras): + selected_camera = self.small_cameras[camera_id - 1] + self.mutex.release() + + if selected_camera is None: + return Response(None, 500) + else: + return Response(selected_camera.generator_func(), mimetype='multipart/x-mixed-replace; boundary=frame') + +def get_camera_display(configuration_name): + camera_display = CameraDisplay() + config.load_incluster_config() coreV1Api = client.CoreV1Api() + + # TODO use labels instead once available + instance_service_name_regex = re.compile( + configuration_name + "-[\da-f]{6}-svc") + ret = coreV1Api.list_service_for_all_namespaces(watch=False) - p = re.compile(configuration_name + "-[\da-f]{6}-svc") for svc in ret.items: - if not p.match(svc.metadata.name): - continue - grpc_ports = list(filter(lambda port: port.name == "grpc", svc.spec.ports)) - if (len(grpc_ports) == 1): - url = "{0}:{1}".format(svc.spec.cluster_ip, grpc_ports[0].port) - camera_list.append(url) - camera_list.sort() - return camera_list + if svc.metadata.name == configuration_name + "-svc": + grpc_ports = list( + filter(lambda port: port.name == "grpc", svc.spec.ports)) + if (len(grpc_ports) == 1): + url = "{0}:{1}".format(svc.spec.cluster_ip, grpc_ports[0].port) + camera_display.main_camera = CameraFeed(url) + elif instance_service_name_regex.match(svc.metadata.name): + grpc_ports = list( + filter(lambda port: port.name == "grpc", svc.spec.ports)) + if (len(grpc_ports) == 1): + url = "{0}:{1}".format(svc.spec.cluster_ip, grpc_ports[0].port) + camera_display.small_cameras.append(CameraFeed(url)) -app = Flask(__name__) + camera_display.small_cameras.sort(key=lambda camera: camera.url) -@app.route('/') -# Home page for video streaming. -def index(): - global camera_frame_queues - return render_template('index.html', camera_count=len(camera_frame_queues)-1) - -@app.route('/camera_list') -# Returns the current list of cameras to allow for refresh -def camera_list(): - global small_frame_sources - return ",".join(small_frame_sources) - -# Generator function for video streaming. -def gen(frame_queue, verbose=False): - while True: - frame = frame_queue.get(True, None) - yield (b'--frame\r\n' - b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') + return camera_display -# Gets response and puts it in frame queue. -def response_wrapper(frame_queue): - return Response(gen(frame_queue), - mimetype='multipart/x-mixed-replace; boundary=frame') +def run_webserver(): + app.run(host='0.0.0.0', threaded=True) -@app.route('/camera_frame_feed/') -# Gets frame feed for specified camera. -def camera_frame_feed(camera_id=0): - global camera_frame_queues - camera_id = int(camera_id) - if (camera_id <= len(camera_frame_queues)): - logging.info("camera_feed %d" % camera_id) - return response_wrapper(camera_frame_queues[camera_id]) - return None - -# Updates set of cameras based on set of camera instance services -def refresh_cameras(camera_frame_threads, small_frame_sources, camera_frame_queues, stop_event): +def refresh_cameras(): + global global_camera_display while True: sleep(1) - camera_list = get_camera_list(os.environ['CONFIGURATION_NAME']) - if camera_list != small_frame_sources: - old_count = len(small_frame_sources) - new_count = len(camera_list) - logging.info("Camera change detected, old: %d, new: %d" % (old_count, new_count)) - if old_count != new_count: - if old_count < new_count: - for x in range(new_count - old_count): - camera_frame_queues.append(queue.Queue(1)) - small_frame_sources[:] = camera_list - else: - small_frame_sources[:] = camera_list - camera_frame_queues[:] = camera_frame_queues[:(old_count - new_count)] - else: - small_frame_sources[:] = camera_list - logging.info(small_frame_sources) - schedule_get_frames( - camera_frame_threads, small_frame_sources, camera_frame_queues, stop_event) - -def run_webserver(): - app.run(host='0.0.0.0', threaded=True) + camera_display = get_camera_display(os.environ['CONFIGURATION_NAME']) + if camera_display != global_camera_display: + global_camera_display.merge(camera_display) -# Loops, creating gRPC client and grabing frame from camera serving specified url. -def get_frames(url, frame_queue, stop_event): - logging.info("Starting get_frames(%s)" % url) - while not stop_event.wait(0.01): - try: - client_channel = grpc.insecure_channel(url, options=( - ('grpc.use_local_subchannel_pool', 1),)) - camera_stub = camera_pb2_grpc.CameraStub(client_channel) - frame = camera_stub.GetFrame(camera_pb2.NotifyRequest()) - frame = frame.frame - client_channel.close() - - frame_received = False - # prevent stale data - if (len(frame) > 0): - if (frame_queue.full()): - try: - frame_queue.get(False) - except: - pass - frame_queue.put(frame, False) - frame_received = True - - if (frame_received): - sleep(1) +global_stop_event = threading.Event() +global_camera_display = CameraDisplay() - except: - logging.info("[%s] Exception %s" % (url, traceback.format_exc())) - sleep(1) +app = Flask(__name__) -# schedules frame polling threads -def schedule_get_frames(camera_frame_threads, small_frame_sources, camera_frame_queues, stop_event): - if camera_frame_threads: - stop_event.set() - for camera_frame_thread in camera_frame_threads: - camera_frame_thread.join() - stop_event.clear() - camera_frame_threads.clear() +# Home page for video streaming. +@app.route('/') +def index(): + global global_camera_display + return render_template('index.html', camera_count=global_camera_display.count(), camera_list=global_camera_display.hash_code()) - cameras_frame_thread = threading.Thread(target=get_frames, args=(main_frame_source, camera_frame_queues[0], stop_event)) - cameras_frame_thread.start() - camera_frame_threads.append(cameras_frame_thread) +# Returns the current list of cameras to allow for refresh +@app.route('/camera_list') +def camera_list(): + global global_camera_display + logging.info("Expected cameras: %s" % global_camera_display.hash_code()) + return global_camera_display.hash_code() - for camera_id in range(1, len(small_frame_sources) + 1): - camera_frame_thread = threading.Thread(target=get_frames, args=(small_frame_sources[camera_id - 1], camera_frame_queues[camera_id], stop_event)) - camera_frame_thread.start() - camera_frame_threads.append(camera_frame_thread) +# Gets frame feed for specified camera. +@app.route('/camera_frame_feed/') +def camera_frame_feed(camera_id=0): + global global_camera_display + return global_camera_display.stream_frames(camera_id) print("Starting...", flush=True) logging.basicConfig(format="%(asctime)s: %(message)s", level=logging.INFO, datefmt="%H:%M:%S") -main_frame_source = "" - if 'CONFIGURATION_NAME' in os.environ: # Expecting source service ports to be named grpc configuration_name = os.environ['CONFIGURATION_NAME'] + camera_display = get_camera_display(configuration_name) + global_camera_display.merge(camera_display) - config.load_incluster_config() - coreV1Api = client.CoreV1Api() - ret = coreV1Api.list_service_for_all_namespaces(watch=False) - for svc in ret.items: - if svc.metadata.name == configuration_name + "-svc": - grpc_ports = list( - filter(lambda port: port.name == "grpc", svc.spec.ports)) - if (len(grpc_ports) == 1): - main_frame_source = "{0}:{1}".format( - svc.spec.cluster_ip, grpc_ports[0].port) - - small_frame_sources = get_camera_list(configuration_name) - camera_count = len(small_frame_sources) + refresh_thread = threading.Thread(target=refresh_cameras) + refresh_thread.start() else: camera_count = int(os.environ['CAMERA_COUNT']) - main_frame_source = "{0}:80".format(os.environ['CAMERAS_SOURCE_SVC']) + main_camera_url = "{0}:80".format(os.environ['CAMERAS_SOURCE_SVC']) + global_camera_display.main_camera = CameraFeed(main_camera_url) for camera_id in range(1, camera_count + 1): url = "{0}:80".format( os.environ['CAMERA{0}_SOURCE_SVC'.format(camera_id)]) - small_frame_sources.append(url) - -for camera_id in range(camera_count + 1): - camera_frame_queues.append(queue.Queue(1)) + global_camera_display.small_cameras.append(CameraFeed(url)) + global_camera_display.start_handlers() webserver_thread = threading.Thread(target=run_webserver) webserver_thread.start() -stop_event = threading.Event() -camera_frame_threads = [] -schedule_get_frames(camera_frame_threads, small_frame_sources, camera_frame_queues, stop_event) - -if 'CONFIGURATION_NAME' in os.environ: - refresh_thread = threading.Thread(target=refresh_cameras, args=(camera_frame_threads, small_frame_sources, camera_frame_queues, stop_event)) - refresh_thread.start() - print("Started", flush=True) webserver_thread.join() print("Done", flush=True) diff --git a/samples/apps/video-streaming-app/templates/index.html b/samples/apps/video-streaming-app/templates/index.html index ad980eafe..dde641b9e 100644 --- a/samples/apps/video-streaming-app/templates/index.html +++ b/samples/apps/video-streaming-app/templates/index.html @@ -5,11 +5,13 @@

    Akri Demo

    + {%if camera_count > 0 %}
    + {%endif%}
      - {%for camera_id in range(1, camera_count + 1)%} + {%for camera_id in range(1, camera_count)%}
    • @@ -17,10 +19,10 @@

      Akri Demo