Compare commits

..

No commits in common. "devel" and "v3.6.0" have entirely different histories.

3414 changed files with 145006 additions and 123988 deletions

View File

@ -2,5 +2,4 @@
BasedOnStyle: Chromium
IndentWidth: 4
ColumnLimit: 120
AccessModifierOffset: -2
InsertNewlineAtEOF: true
AccessModifierOffset: -2

View File

@ -6,6 +6,6 @@ Checks: >
modernize-redundant-void-arg,
modernize-use-bool-literals,
modernize-use-nullptr,
readability-braces-around-statements,
-clang-analyzer-security.insecureAPI.rand
readability-braces-around-statements
-clang-analyzer-security.insecureAPI.rand,
WarningsAsErrors: '*'

View File

@ -1,34 +0,0 @@
title: "[Summarize the pattern. Be as clear and concise as possible.] "
labels: ["ROSES"]
body:
- type: markdown
attributes:
value: |
> [!TIP]
> The **Title** may be the only thing the community sees when voting on your pattern.
This work is funded through [NASA ROSES](https://github.com/nasa/fprime/discussions/3041).
- type: textarea
id: pattern
attributes:
label: Pattern
description: "Describe what you want documented."
validations:
required: true
- type: markdown
attributes:
value: |
> [!TIP]
> Describe the **Pattern** without trying to sell it. Describe what you need.
- type: textarea
id: rationale
attributes:
label: Rationale
description: "(Optional) Describe why you want it documented. Sell it to the community."
validations:
required: false
- type: markdown
attributes:
value: |
> [!TIP]
> Use the **Rationale** to sell your idea to the community. Describe why you need this.

View File

@ -1,7 +1,5 @@
set timeout 180
spawn fprime-bootstrap project
expect -re {.*Project repository name.*}
send "my-project\r"
expect -re {.*Project top-level namespace.*}
expect -re {.*Project name.*}
send "MyProject\r"
expect eof

View File

@ -2,8 +2,6 @@ set timeout 60
spawn fprime-util new --deployment
expect -re {Deployment name.*}
send "MyDeployment\r"
expect -re {Deployment namespace.*}
send "MyNamespace\r"
expect -re {.*Select communication driver type}
send "1\r"
expect -re "Add .*MyDeployment.*"

View File

@ -101,4 +101,3 @@
^\QSvc/FileUplink/test/ut/dest.bin\E$
ignore$
mlc-config.json
Autocoders/

File diff suppressed because it is too large Load Diff

View File

@ -137,12 +137,3 @@ TeX/AMS
# ignore long runs of a single character:
\b([A-Za-z])\g{-1}{3,}\b
##########################################
###### F Prime specific patterns ######
##########################################
# .get...() .set...() autocoded functions
\.get\w+\(
\.set\w+\(

View File

@ -3,25 +3,20 @@
|**_Related Issue(s)_**| |
|**_Has Unit Tests (y/n)_**| |
|**_Documentation Included (y/n)_**| |
|**_Generative AI was used in this contribution (y/n)_**| |
---
## Change Description
<!-- A description of the changes contained in the PR. -->
A description of the changes contained in the PR.
## Rationale
<!-- A rationale for this change. e.g. fixes bug, or most projects need XYZ feature. -->
A rationale for this change. e.g. fixes bug, or most projects need XYZ feature.
## Testing/Review Recommendations
<!-- Fill in testing procedures, specific items to focus on for review, or other info to help the team verify these changes are flight-quality. -->
Fill in testing procedures, specific items to focus on for review, or other info to help the team verify these changes are flight-quality.
## Future Work
<!-- Note any additional work that will be done relating to this issue. -->
## AI Usage (see [policy](https://github.com/nasa/fprime/blob/devel/AI_POLICY.md))
<!-- If AI was used, please describe how it was utilized (e.g., code generation, documentation, testing, debugging assistance, etc.). -->
Note any additional work that will be done relating to this issue.

View File

@ -1,6 +1,6 @@
# This is a basic workflow to help you get started with Actions
name: CI [macOS]
name: macOS-CI
# Controls when the action will run. Triggers the workflow on push or pull request
# events but only for the master branch
@ -15,10 +15,6 @@ on:
- '**.md'
- '.github/actions/spelling/**'
- '.github/ISSUE_TEMPLATE/**'
# Cancel in-progress runs if a newer run is started on a given PR
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ !contains(github.ref, 'devel') && !contains(github.ref, 'release/')}}
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:

View File

@ -1,92 +0,0 @@
# Run suite of CI builds and tests in a RHEL8 container
name: CI [RHEL8]
# Controls when the action will run. Triggers the workflow on push or pull request
# events but only for the master branch
on:
push:
branches: [ devel, release/** ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ devel, release/** ]
paths-ignore:
- 'docs/**'
- '**.md'
- '.github/actions/spelling/**'
- '.github/ISSUE_TEMPLATE/**'
# Cancel in-progress runs if a newer run is started on a given PR
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ !contains(github.ref, 'devel') && !contains(github.ref, 'release/')}}
jobs:
Framework:
runs-on: ubuntu-latest
container:
image: redhat/ubi8:8.10
steps:
- name: "Install dependencies"
run: |
dnf install -y git python3.12 python3.12-pip llvm-toolset libasan libubsan
git config --global --add safe.directory ${GITHUB_WORKSPACE}
- name: "Checkout F´ Repository"
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- uses: ./.github/actions/setup
- name: Build Framework
run: |
fprime-util generate -DFPRIME_ENABLE_JSON_MODEL_GENERATION=ON
fprime-util build --all -j4
Ref:
runs-on: ubuntu-latest
container:
image: redhat/ubi8:8.10
steps:
- name: "Install dependencies"
run: |
dnf install -y git python3.12 python3.12-pip llvm-toolset libasan libubsan
git config --global --add safe.directory ${GITHUB_WORKSPACE}
- name: "Checkout F´ Repository"
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- uses: ./.github/actions/setup
- name: Build Ref
run: |
cd Ref
fprime-util generate -DFPRIME_ENABLE_JSON_MODEL_GENERATION=ON
fprime-util build -j4
UTs:
runs-on: ubuntu-latest
container:
image: redhat/ubi8:8.10
steps:
- name: "Install dependencies"
run: |
dnf install -y git python3.12 python3.12-pip llvm-toolset libasan libubsan
git config --global --add safe.directory ${GITHUB_WORKSPACE}
- name: "Checkout F´ Repository"
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- uses: ./.github/actions/setup
# Some UTs require a non-root user to run properly due to using file permissions in tests
# This issue shows up on RHEL8 containers
- name: Setup test environment for permission tests
run: |
useradd -m -u 1001 -s /bin/bash test-user
chown -R test-user:test-user .
- name: UT Build and Run
run: |
su test-user -c "
fprime-util generate --ut &&
fprime-util build --all --ut -j4 &&
fprime-util check --all -j4
"

83
.github/workflows/build-test-rpi.yml vendored Normal file
View File

@ -0,0 +1,83 @@
# This is a basic workflow to help you get started with Actions
name: RPI-CI
# Controls when the action will run. Triggers the workflow on push or pull request
# events but only for the master branch
on:
push:
branches: [ devel, release/** ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ devel, release/** ]
paths-ignore:
- 'docs/**'
- '**.md'
- '.github/actions/spelling/**'
- '.github/ISSUE_TEMPLATE/**'
env:
RPI_TOOLCHAIN_DIR: /tmp/rpi-toolchain
DICTIONARY_PATH: build-artifacts/raspberrypi/RPI/dict/RPITopologyDictionary.json
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
RPI:
runs-on: ubuntu-22.04
steps:
- name: "Checkout F´ Repository"
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- uses: ./.github/actions/setup
- name: Setup RPI Toolchain
uses: fprime-community/setup-rpi-sysroot@main
- name: F prime CI step
run: ./ci/tests/RPI.bash
- name: Copy Tests
run: mkdir -p artifact/RPI; cp -rp RPI/test RPI/build-artifacts artifact/RPI; cp -rp ci artifact
# Build Artifacts
- name: 'RPI Build Output'
uses: actions/upload-artifact@v4
with:
name: rpi-build
path: artifact
retention-days: 5
# Archive the outputs
- name: 'Archive Logs'
uses: actions/upload-artifact@v4
if: always()
with:
name: rpi-logs
path: ci-logs.tar.gz
retention-days: 5
RPI-Integration:
runs-on: self-hosted
needs: RPI
steps:
- uses: actions/checkout@v4
with:
sparse-checkout: 'requirements.txt'
- name: "Setup environment"
run: |
python -m venv venv
. venv/bin/activate
pip install -r requirements.txt
- name: RPI Build Download
uses: actions/download-artifact@v4
with:
name: rpi-build
- name: RPI Integration Tests
run: |
chmod +x RPI/build-artifacts/raspberrypi/RPI/bin/RPI
. venv/bin/activate
/bin/bash ci/tests/RPI-Ints.bash
# Archive the outputs
- name: 'Archive Logs'
uses: actions/upload-artifact@v4
if: always()
with:
name: pi-int-logs
path: ci-logs.tar.gz
retention-days: 5
# Clean-Up moved to post script to prevent collisions

View File

@ -1,6 +1,6 @@
# This is a basic workflow to help you get started with Actions
name: CI [ubuntu]
name: CI
# Controls when the action will run. Triggers the workflow on push or pull request
# events but only for the master branch
@ -15,10 +15,6 @@ on:
- '**.md'
- '.github/actions/spelling/**'
- '.github/ISSUE_TEMPLATE/**'
# Cancel in-progress runs if a newer run is started on a given PR
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ !contains(github.ref, 'devel') && !contains(github.ref, 'release/')}}
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:

View File

@ -13,10 +13,7 @@ on:
- '**.md'
- '.github/actions/spelling/**'
- '.github/ISSUE_TEMPLATE/**'
# Cancel in-progress runs if a newer run is started on a given PR
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ !contains(github.ref, 'devel') && !contains(github.ref, 'release/')}}
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
@ -29,6 +26,14 @@ jobs:
fetch-depth: 0
submodules: true
- uses: ./.github/actions/setup
# Specifically install CMake minimum version
- name: Minimum CMake Install
shell: bash
run: |
export CMAKE_TAR_FILE="https://cmake.org/files/v3.16/cmake-3.16.0-Linux-x86_64.tar.gz"
export CMAKE_INSTALL_DIRECTORY="${GITHUB_WORKSPACE}/tools-override"
mkdir -p "${GITHUB_WORKSPACE}/tools-override"
curl -Ls "${CMAKE_TAR_FILE}" | tar -zC "${CMAKE_INSTALL_DIRECTORY}" --strip-components=1 -x
- name: CMake Tests
working-directory: ./cmake/test
shell: bash

View File

@ -1,7 +1,7 @@
# Semantic code analysis with CodeQL
# see https://github.com/github/codeql-action
name: "Code Scan: JPL Coding Standard"
name: "JPL Coding Standard Scan"
on:
push:
@ -14,10 +14,6 @@ on:
- '**.md'
- '.github/actions/spelling/**'
- '.github/ISSUE_TEMPLATE/**'
# Cancel in-progress runs if a newer run is started on a given PR
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ !contains(github.ref, 'devel') && !contains(github.ref, 'release/')}}
jobs:
analyze:

View File

@ -1,7 +1,7 @@
# Semantic code analysis with CodeQL
# see https://github.com/github/codeql-action
name: "Code Scan: CodeQL Security"
name: "CodeQL Security Scan"
on:
push:
@ -14,10 +14,6 @@ on:
- '**.md'
- '.github/actions/spelling/**'
- '.github/ISSUE_TEMPLATE/**'
# Cancel in-progress runs if a newer run is started on a given PR
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ !contains(github.ref, 'devel') && !contains(github.ref, 'release/')}}
jobs:
analyze:

View File

@ -0,0 +1,93 @@
name: Cookiecutters Tests
on:
push:
branches: [ devel, release/** ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ devel, release/** ]
paths-ignore:
- 'docs/**'
- '**.md'
- '.github/actions/spelling/**'
- '.github/ISSUE_TEMPLATE/**'
# This workflow tests the project bootstrapping and cookiecutter templates by creating
# a new project, deployment and component and building them
# This uses the `expect` utility to feed input into the various cookiecutter prompts
jobs:
Validate:
runs-on: ubuntu-latest
steps:
# Checkout only the bootstrap.expect file, since the full F´ repo will be
# checked out as part of the fprime-bootstrap process
- name: "Retrieve bootstrap.expect file"
uses: actions/checkout@v4
with:
sparse-checkout: |
.github/actions/cookiecutter-check/bootstrap.expect
sparse-checkout-cone-mode: false
fetch-depth: 0
- name: "Setup Python"
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: "Install expect and fprime-bootstrap@devel"
run: |
sudo apt-get install expect
pip install git+https://github.com/fprime-community/fprime-bootstrap@devel
- name: "Bootstrap Project"
run: |
expect .github/actions/cookiecutter-check/bootstrap.expect
# Overlay fprime@current_rev in new project so that we build with it in the tests
# current_rev is devel on the devel branch and the PR revision in PR checks
- name: "Overlay fprime@current_rev in new project"
uses: actions/checkout@v4
with:
submodules: true
path: ./MyProject/lib/fprime
fetch-depth: 0
- name: "Update dependencies and install fprime-tools@devel"
run: |
cd MyProject
. fprime-venv/bin/activate
pip install -U -r ./lib/fprime/requirements.txt
pip install git+https://github.com/nasa/fprime-tools@devel
- name: "Version Check"
run: |
cd MyProject
. fprime-venv/bin/activate
fprime-util version-check
- name: "Test Generate and Build Project"
run: |
cd MyProject
. fprime-venv/bin/activate
fprime-util generate
fprime-util build -j4
- name: "Test New Deployment and Build"
run: |
cd MyProject
. fprime-venv/bin/activate
expect ./lib/fprime/.github/actions/cookiecutter-check/deployment.expect
cd MyDeployment
fprime-util build -j4
- name: "Test New Component and Build"
run: |
cd MyProject
. fprime-venv/bin/activate
expect ./lib/fprime/.github/actions/cookiecutter-check/component.expect
cd MyComponent
fprime-util build -j4

View File

@ -1,5 +1,5 @@
# Adapted from https://github.com/nasa/cFS/blob/c36aa2c1df0fb47a3838577908af3d0d0ab0ef54/.github/workflows/static-analysis.yml
name: "Code Scan: CppCheck"
name: "Cppcheck Scan"
on:
push:
@ -12,10 +12,6 @@ on:
- '**.md'
- '.github/actions/spelling/**'
- '.github/ISSUE_TEMPLATE/**'
# Cancel in-progress runs if a newer run is started on a given PR
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ !contains(github.ref, 'devel') && !contains(github.ref, 'release/')}}
jobs:
cppcheck:
@ -35,7 +31,7 @@ jobs:
- uses: ./.github/actions/setup
- name: Install cppcheck
run: sudo apt-get update && sudo apt-get install cppcheck xsltproc -y
run: sudo apt-get install cppcheck xsltproc -y
- name: Install sarif tool
run: npm i -g @microsoft/sarif-multitool
@ -47,10 +43,6 @@ jobs:
run: |
fprime-util generate -DCMAKE_EXPORT_COMPILE_COMMANDS=ON
fprime-util build --all --jobs "$(nproc || printf '%s\n' 1)"
# Add EXCLUDE_FROM_ALL targets as we need to explicitly build them
fprime-util build --target Svc_Subtopologies --jobs "$(nproc || printf '%s\n' 1)"
fprime-util build --target Svc_GenericHub --jobs "$(nproc || printf '%s\n' 1)"
echo CPPCHECK_OPTS=--project="$GITHUB_WORKSPACE/build-fprime-automatic-native/compile_commands.json" >> $GITHUB_ENV
- name: Run cppcheck

View File

@ -1,4 +1,4 @@
name: "Code Scan: Cpplint"
name: "Cpplint Scan"
on:
push:
@ -11,10 +11,6 @@ on:
- '**.md'
- '.github/actions/spelling/**'
- '.github/ISSUE_TEMPLATE/**'
# Cancel in-progress runs if a newer run is started on a given PR
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ !contains(github.ref, 'devel') && !contains(github.ref, 'release/')}}
jobs:
cpplint:
@ -37,7 +33,7 @@ jobs:
run: pip install cpplint
- name: Install xsltproc
run: sudo apt-get update && sudo apt-get install xsltproc -y
run: sudo apt-get install xsltproc -y
- name: Install sarif tool
run: npm i -g @microsoft/sarif-multitool

View File

@ -1,113 +0,0 @@
# Cross-compile https://github.com/fprime-community/fprime-workshop-led-blinker
# Runs integration tests on aarch64-linux
name: "External Repo: AArch64 Linux LedBlinker"
on:
push:
branches: [ devel, release/** ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ devel, release/** ]
paths-ignore:
- 'docs/**'
- '**.md'
- '.github/actions/spelling/**'
- '.github/ISSUE_TEMPLATE/**'
# Cancel in-progress runs if a newer run is started on a given PR
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ !contains(github.ref, 'devel') && !contains(github.ref, 'release/')}}
env:
AARCH64_TOOLCHAIN_DIR: /tmp/aarch64-toolchain
AARCH64_TOOLCHAIN_URL: https://developer.arm.com/-/media/Files/downloads/gnu-a/10.2-2020.11/binrel/gcc-arm-10.2-2020.11-x86_64-aarch64-none-linux-gnu.tar.xz
ARM_TOOLS_PATH: /tmp/aarch64-toolchain
FPRIME_LOCATION: ./lib/fprime
jobs:
get-branch:
name: "Get target branch"
uses: ./.github/workflows/reusable-get-pr-branch.yml
with:
target_repository: fprime-community/fprime-workshop-led-blinker
cross-compilation:
name: "Cross Compilation"
runs-on: ubuntu-22.04
needs: get-branch
steps:
- name: "Checkout target repository"
uses: actions/checkout@v4
with:
submodules: false
repository: fprime-community/fprime-workshop-led-blinker
ref: ${{ needs.get-branch.outputs.target-branch }}
- name: "Overlay current F´ revision"
uses: actions/checkout@v4
with:
submodules: true
path: ${{ env.FPRIME_LOCATION }}
fetch-depth: 0
- uses: ./lib/fprime/.github/actions/setup
with:
location: ${{ env.FPRIME_LOCATION }}
- name: "Download and Setup AArch64 Linux Toolchain"
run: |
mkdir -p ${AARCH64_TOOLCHAIN_DIR}
wget -q ${AARCH64_TOOLCHAIN_URL}
tar -xf $(basename ${AARCH64_TOOLCHAIN_URL}) -C ${AARCH64_TOOLCHAIN_DIR} --strip-components 1
echo "${AARCH64_TOOLCHAIN_DIR}/bin" >> $GITHUB_PATH
- name: "Generate AArch64 Linux Build Cache"
run: |
fprime-util generate aarch64-linux
- name: "Build AArch64 Linux"
run: |
fprime-util build aarch64-linux
- name: "Prepare artifacts"
run: |
mkdir -p aarch64-linux-artifacts
cp -r ./build-artifacts aarch64-linux-artifacts
cp -r LedBlinker/Components/Led/test/int aarch64-linux-artifacts
- name: "Archive Build Artifacts"
uses: actions/upload-artifact@v4
with:
name: aarch64-linux-artifacts
path: aarch64-linux-artifacts
retention-days: 5
aarch64-integration:
name: "AArch64 Linux Integration Tests"
runs-on: [self-hosted, aarch64-linux]
needs: cross-compilation
steps:
- name: "Checkout F´ Repository"
uses: actions/checkout@v4
with:
sparse-checkout: "requirements.txt"
sparse-checkout-cone-mode: false
- name: "Setup environment"
run: |
python -m venv venv
. venv/bin/activate
pip install -r requirements.txt
- name: "Artifacts Download"
uses: actions/download-artifact@v4
with:
name: aarch64-linux-artifacts
- name: Run Integration Tests
run: |
DEPLOYMENT=LedBlinker_LedBlinkerDeployment
. venv/bin/activate
mkdir -p ci-logs
chmod +x ./build-artifacts/aarch64-linux/${DEPLOYMENT}/bin/${DEPLOYMENT}
fprime-gds --ip-client -d ./build-artifacts/aarch64-linux/${DEPLOYMENT} --logs ./ci-logs &
sleep 10
pytest --dictionary ./build-artifacts/aarch64-linux/${DEPLOYMENT}/dict/LedBlinkerDeploymentTopologyDictionary.json ./int/led_integration_tests.py
- name: "Archive logs"
uses: actions/upload-artifact@v4
if: always()
with:
name: aarch64-linux-logs
path: ci-logs
retention-days: 5

View File

@ -1,38 +0,0 @@
# Builds and runs UTs on https://github.com/fprime-community/fprime-examples
name: "External Repo: fprime-examples"
on:
push:
branches: [ devel, release/** ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ devel, release/** ]
paths-ignore:
- 'docs/**'
- '**.md'
- '.github/actions/spelling/**'
- '.github/ISSUE_TEMPLATE/**'
- '.gitignore'
# Cancel in-progress runs if a newer run is started on a given PR
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ !contains(github.ref, 'devel') && !contains(github.ref, 'release/')}}
jobs:
get-branch:
name: "Get target branch"
uses: ./.github/workflows/reusable-get-pr-branch.yml
with:
target_repository: nasa/fprime-examples
run:
needs: get-branch
name: ""
uses: ./.github/workflows/reusable-project-builder.yml
with:
target_repository: nasa/fprime-examples
build_location: FlightExamples
run_unit_tests: true
target_ref: ${{ needs.get-branch.outputs.target-branch }}
fprime_location: ./FlightExamples/lib/fprime

View File

@ -1,6 +1,6 @@
# Builds and runs UTs on https://github.com/fprime-community/fprime-tutorial-hello-world
name: "External Repo: Tutorial: HelloWorld"
name: "Tutorial: HelloWorld"
on:
push:
@ -13,10 +13,6 @@ on:
- '**.md'
- '.github/actions/spelling/**'
- '.github/ISSUE_TEMPLATE/**'
# Cancel in-progress runs if a newer run is started on a given PR
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ !contains(github.ref, 'devel') && !contains(github.ref, 'release/')}}
jobs:
get-branch:
@ -31,7 +27,6 @@ jobs:
uses: ./.github/workflows/reusable-project-builder.yml
with:
target_repository: fprime-community/fprime-tutorial-hello-world
fprime_location: ./lib/fprime
build_location: Hello/HelloWorldDeployment
run_unit_tests: false # no UTs in HelloWorld project
build_location: HelloWorldDeployment
run_unit_tests: true
target_ref: ${{ needs.get-branch.outputs.target-branch }}

View File

@ -1,6 +1,6 @@
# Builds and runs UTs on https://github.com/fprime-community/fprime-workshop-led-blinker
name: "External Repo: Tutorial: LedBlinker"
name: "Tutorial: LedBlinker"
on:
push:
@ -13,10 +13,6 @@ on:
- '**.md'
- '.github/actions/spelling/**'
- '.github/ISSUE_TEMPLATE/**'
# Cancel in-progress runs if a newer run is started on a given PR
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ !contains(github.ref, 'devel') && !contains(github.ref, 'release/')}}
jobs:
get-branch:
@ -31,7 +27,6 @@ jobs:
uses: ./.github/workflows/reusable-project-builder.yml
with:
target_repository: fprime-community/fprime-workshop-led-blinker
fprime_location: lib/fprime
build_location: LedBlinker/LedBlinkerDeployment
build_location: LedBlinker
run_unit_tests: true
target_ref: ${{ needs.get-branch.outputs.target-branch }}

View File

@ -1,6 +1,6 @@
# Builds and runs UTs on https://github.com/fprime-community/fprime-tutorial-math-component
name: "External Repo: Tutorial: MathComponent"
name: "Tutorial: MathComponent"
on:
push:
@ -13,10 +13,6 @@ on:
- '**.md'
- '.github/actions/spelling/**'
- '.github/ISSUE_TEMPLATE/**'
# Cancel in-progress runs if a newer run is started on a given PR
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ !contains(github.ref, 'devel') && !contains(github.ref, 'release/')}}
jobs:
get-branch:
@ -31,7 +27,6 @@ jobs:
uses: ./.github/workflows/reusable-project-builder.yml
with:
target_repository: fprime-community/fprime-tutorial-math-component
build_location: MathProject/MathDeployment
fprime_location: ./lib/fprime
build_location: MathDeployment
run_unit_tests: true
target_ref: ${{ needs.get-branch.outputs.target-branch }}

View File

@ -1,115 +0,0 @@
name: "External Repo: Cookiecutters Tests"
on:
push:
branches: [ devel, release/** ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ devel, release/** ]
paths-ignore:
- 'docs/**'
- '**.md'
- '.github/actions/spelling/**'
- '.github/ISSUE_TEMPLATE/**'
# Cancel in-progress runs if a newer run is started on a given PR
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ !contains(github.ref, 'devel') && !contains(github.ref, 'release/')}}
# This workflow tests the project bootstrapping and cookiecutter templates by creating
# a new project, deployment and component and building them
# This uses the `expect` utility to feed input into the various cookiecutter prompts
jobs:
# -------- Retrieve target branches for fprime-tools and fprime-bootstrap --------
get-tools-branch:
name: "Get fprime-tools target branch"
uses: ./.github/workflows/reusable-get-pr-branch.yml
with:
target_repository: nasa/fprime-tools
get-bootstrap-branch:
name: "Get fprime-bootstrap target branch"
uses: ./.github/workflows/reusable-get-pr-branch.yml
with:
target_repository: fprime-community/fprime-bootstrap
# -------- Install target versions of the cookiecutter templates and validate -------
Validate:
runs-on: ubuntu-latest
needs: [ get-tools-branch, get-bootstrap-branch ]
steps:
# Checkout only the bootstrap.expect file, since the full F´ repo will be
# checked out as part of the fprime-bootstrap process
- name: "Retrieve bootstrap.expect file"
uses: actions/checkout@v4
with:
sparse-checkout: |
.github/actions/cookiecutter-check/bootstrap.expect
sparse-checkout-cone-mode: false
fetch-depth: 0
- name: "Setup Python"
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: "Install expect and fprime-bootstrap"
run: |
sudo apt-get update
sudo apt-get install expect
pip install git+https://github.com/fprime-community/fprime-bootstrap@${{ needs.get-bootstrap-branch.outputs.target-branch }}
- name: "Bootstrap Project"
run: |
expect .github/actions/cookiecutter-check/bootstrap.expect
# Overlay fprime@current_rev in new project so that we build with it in the tests
# current_rev is devel on the devel branch and the PR revision in PR checks
- name: "Overlay fprime@current_rev in new project"
uses: actions/checkout@v4
with:
submodules: true
path: ./my-project/lib/fprime
fetch-depth: 0
- name: "Update dependencies and install fprime-tools"
run: |
cd my-project
. fprime-venv/bin/activate
pip install -U -r ./requirements.txt
pip install git+https://github.com/nasa/fprime-tools@${{ needs.get-tools-branch.outputs.target-branch }}
- name: "Version Check"
run: |
cd my-project
. fprime-venv/bin/activate
fprime-util version-check
- name: "Test Generate and Build Project"
run: |
cd my-project
. fprime-venv/bin/activate
fprime-util generate
fprime-util build -j4
- name: "Test New Deployment and Build"
run: |
cd my-project
. fprime-venv/bin/activate
cd MyProject
expect ../lib/fprime/.github/actions/cookiecutter-check/deployment.expect
cd MyDeployment
fprime-util build -j4
- name: "Test New Component and Build"
run: |
cd my-project
. fprime-venv/bin/activate
cd MyProject
expect ../lib/fprime/.github/actions/cookiecutter-check/component.expect
cd MyComponent
fprime-util build -j4

View File

@ -1,36 +0,0 @@
# Cross-compile https://github.com/fprime-community/fprime-zephyr-reference
name: "External Repo: Zephyr Reference (Pico 2)"
on:
push:
branches: [ devel, release/** ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ devel, release/** ]
paths-ignore:
- 'docs/**'
- '**.md'
- '.github/actions/spelling/**'
- '.github/ISSUE_TEMPLATE/**'
# Cancel in-progress runs if a newer run is started on a given PR
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ !contains(github.ref, 'devel') && !contains(github.ref, 'release/')}}
jobs:
get-branch:
name: "Get target branch"
uses: ./.github/workflows/reusable-get-pr-branch.yml
with:
target_repository: fprime-community/fprime-zephyr-reference
build:
name: "Zephyr Build"
needs: get-branch
uses: ./.github/workflows/reusable-project-ci.yml
with:
target_repository: fprime-community/fprime-zephyr-reference
target_ref: ${{ needs.get-branch.outputs.target-branch }}
ci_config_file: ./lib/fprime-zephyr/ci/sample-configs/pico2.yml

View File

@ -1,36 +0,0 @@
# Cross-compile https://github.com/fprime-community/fprime-zephyr-reference
name: "External Repo: Zephyr Reference (Teensy 4.1)"
on:
push:
branches: [ devel, release/** ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ devel, release/** ]
paths-ignore:
- 'docs/**'
- '**.md'
- '.github/actions/spelling/**'
- '.github/ISSUE_TEMPLATE/**'
# Cancel in-progress runs if a newer run is started on a given PR
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ !contains(github.ref, 'devel') && !contains(github.ref, 'release/')}}
jobs:
get-branch:
name: "Get target branch"
uses: ./.github/workflows/reusable-get-pr-branch.yml
with:
target_repository: fprime-community/fprime-zephyr-reference
build:
name: "Zephyr Build"
needs: get-branch
uses: ./.github/workflows/reusable-project-ci.yml
with:
target_repository: fprime-community/fprime-zephyr-reference
target_ref: ${{ needs.get-branch.outputs.target-branch }}
ci_config_file: ./lib/fprime-zephyr/ci/sample-configs/teensy41.yml

View File

@ -1,7 +1,7 @@
# Cross-compile https://github.com/fprime-community/fprime-workshop-led-blinker
# Runs integration tests on RaspberryPi
name: "External Repo: RPI LedBlinker"
name: "RPI LedBlinker"
on:
push:
@ -14,14 +14,9 @@ on:
- '**.md'
- '.github/actions/spelling/**'
- '.github/ISSUE_TEMPLATE/**'
# Cancel in-progress runs if a newer run is started on a given PR
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ !contains(github.ref, 'devel') && !contains(github.ref, 'release/')}}
env:
RPI_TOOLCHAIN_DIR: /tmp/rpi-toolchain
FPRIME_LOCATION: ./lib/fprime
jobs:
get-branch:
@ -45,11 +40,11 @@ jobs:
uses: actions/checkout@v4
with:
submodules: true
path: ${{ env.FPRIME_LOCATION }}
path: ./fprime
fetch-depth: 0
- uses: ./lib/fprime/.github/actions/setup
- uses: ./fprime/.github/actions/setup
with:
location: ${{ env.FPRIME_LOCATION }}
location: ./fprime
- name: "Setup RPI Toolchain"
uses: fprime-community/setup-rpi-sysroot@main
- name: "Generate RPI Build Cache"
@ -57,14 +52,13 @@ jobs:
fprime-util generate raspberrypi
- name: "Build RPI"
run: |
cd LedBlinker/LedBlinkerDeployment
fprime-util build raspberrypi
- name: "Prepare artifacts"
run: |
mkdir -p rpi-artifacts
cp -r ./build-artifacts rpi-artifacts
cp -r LedBlinker/Components/Led/test/int rpi-artifacts
- name: "Archive Build Artifacts"
cp -r Components/Led/test/int rpi-artifacts
- name: 'Archive Build Artifacts'
uses: actions/upload-artifact@v4
with:
name: rpi-artifacts
@ -73,14 +67,13 @@ jobs:
RPI-integration:
name: "RPI Integration Tests"
runs-on: [self-hosted, raspberrypi]
runs-on: self-hosted
needs: cross-compilation
steps:
- name: "Checkout F´ Repository"
uses: actions/checkout@v4
with:
sparse-checkout: "requirements.txt"
sparse-checkout-cone-mode: false
sparse-checkout: 'requirements.txt'
- name: "Setup environment"
run: |
python -m venv venv
@ -92,14 +85,13 @@ jobs:
name: rpi-artifacts
- name: Run Integration Tests
run: |
DEPLOYMENT=LedBlinker_LedBlinkerDeployment
. venv/bin/activate
mkdir -p ci-logs
chmod +x ./build-artifacts/raspberrypi/${DEPLOYMENT}/bin/${DEPLOYMENT}
fprime-gds --ip-client -d ./build-artifacts/raspberrypi/${DEPLOYMENT} --logs ./ci-logs &
chmod +x ./build-artifacts/raspberrypi/LedBlinker/bin/LedBlinker
fprime-gds --ip-client -d ./build-artifacts/raspberrypi/LedBlinker --logs ./ci-logs &
sleep 10
pytest --dictionary ./build-artifacts/raspberrypi/${DEPLOYMENT}/dict/LedBlinkerDeploymentTopologyDictionary.json ./int/led_integration_tests.py
- name: "Archive logs"
pytest --dictionary ./build-artifacts/raspberrypi/LedBlinker/dict/LedBlinkerTopologyDictionary.json ./int/led_integration_tests.py
- name: 'Archive logs'
uses: actions/upload-artifact@v4
if: always()
with:

View File

@ -1,79 +0,0 @@
# Cross-compile https://github.com/fprime-community/fprime-system-reference
name: "External Repo: System Reference"
on:
push:
branches: [ devel, release/** ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ devel, release/** ]
paths-ignore:
- 'docs/**'
- '**.md'
- '.github/actions/spelling/**'
- '.github/ISSUE_TEMPLATE/**'
# Cancel in-progress runs if a newer run is started on a given PR
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ !contains(github.ref, 'devel') && !contains(github.ref, 'release/')}}
env:
RPI_TOOLCHAIN_DIR: /tmp/rpi-toolchain
PKG_CONFIG_PATH: ${{ github.workspace }}/libcamera/build/lib/pkgconfig/
jobs:
get-branch:
name: "Get target branch"
uses: ./.github/workflows/reusable-get-pr-branch.yml
with:
target_repository: fprime-community/fprime-system-reference
default_target_ref: main
cross-compilation:
name: "Cross Compilation"
runs-on: ubuntu-22.04
needs: get-branch
steps:
- name: "Checkout target repository"
uses: actions/checkout@v4
with:
submodules: true
repository: fprime-community/fprime-system-reference
ref: ${{ needs.get-branch.outputs.target-branch }}
- name: "Overlay current F´ revision"
uses: actions/checkout@v4
with:
submodules: true
path: ./fprime
fetch-depth: 0
- uses: ./fprime/.github/actions/setup
with:
location: ./fprime
- name: "Install meson and ninja"
run: |
pip3 install meson ninja ply
shell: bash
- name: "Setup RPI Toolchain"
uses: fprime-community/setup-rpi-sysroot@main
with:
# libcamera requires 8+
toolchain: "https://developer.arm.com/-/media/Files/downloads/gnu-a/8.3-2019.03/binrel/gcc-arm-8.3-2019.03-x86_64-arm-linux-gnueabihf.tar.xz"
- name: "Add RPI Toolchain to PATH"
run: |
echo "PATH=$RPI_TOOLCHAIN_DIR/bin:$PATH" >> $GITHUB_ENV
- name: Build libcamera
run: |
cd libcamera
meson setup build -Dprefix=${{ github.workspace }}/libcamera/build/ -Dpipelines=rpi/vc4 -Dipas=rpi/vc4 --cross-file ../libcamera-aarch32.txt
cd build
ninja
ninja install
- name: "Generate System Reference build cache"
working-directory: SystemReference
run: |
fprime-util generate raspberrypi
- name: "Build System Reference"
working-directory: SystemReference
run: |
fprime-util build raspberrypi

View File

@ -1,47 +0,0 @@
name: "Code Format Check"
on:
push:
branches: [ devel, release/**, ci/** ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ devel, release/** ]
paths-ignore:
- 'docs/**'
- '**.md'
- '.github/actions/spelling/**'
- '.github/ISSUE_TEMPLATE/**'
jobs:
cpp-formatting:
name: C++ Formatting
runs-on: ubuntu-22.04
steps:
- name: "Checkout F´ Repository"
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- name: "Setup Python"
uses: actions/setup-python@v5
with:
python-version: 3.12
- uses: ./.github/actions/setup
- name: "Check C++ Formatting"
env:
# Svc is currently listing all but Svc/FpySequencer
CHECKED_DIRS: >-
CFDP
default
Drv
FppTestProject
Fw
Os
Ref
Svc
TestUtils
Utils
run: |
fprime-util format --check --dirs $CHECKED_DIRS
shell: bash

View File

@ -11,10 +11,6 @@ on:
- '**.md'
- '.github/actions/spelling/**'
- '.github/ISSUE_TEMPLATE/**'
# Cancel in-progress runs if a newer run is started on a given PR
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ !contains(github.ref, 'devel') && !contains(github.ref, 'release/')}}
jobs:
fpptest:
@ -31,17 +27,17 @@ jobs:
pip3 install -r ./requirements.txt
shell: bash
- name: "Generate UT build cache"
working-directory: ./FppTestProject
working-directory: ./FppTest
run: |
fprime-util generate --ut -DFPRIME_ENABLE_JSON_MODEL_GENERATION=ON
fprime-util generate --ut
shell: bash
- name: "Build UTs"
working-directory: ./FppTestProject/FppTest
working-directory: ./FppTest
run: |
fprime-util build --ut
shell: bash
- name: "Run UTs"
working-directory: ./FppTestProject/FppTest
working-directory: ./FppTest
run: |
fprime-util check
shell: bash
@ -50,5 +46,5 @@ jobs:
if: always()
with:
name: FppTest-Logs
path: ./FppTestProject/build-fprime-automatic-native-ut/Testing/Temporary/*.log
path: ./FppTest/build-fprime-automatic-native-ut/Testing/Temporary/*.log
retention-days: 5

View File

@ -1,40 +0,0 @@
name: "fpp-to-json Test"
on:
push:
branches: [ devel, release/** ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ devel, release/** ]
paths-ignore:
- 'docs/**'
- '**.md'
- '.github/actions/spelling/**'
- '.github/ISSUE_TEMPLATE/**'
# Cancel in-progress runs if a newer run is started on a given PR
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ !contains(github.ref, 'devel') && !contains(github.ref, 'release/')}}
jobs:
fpp-to-json-ref:
name: Ref Deployment
runs-on: ubuntu-22.04
steps:
- name: "Checkout F´ Repository"
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: true
- uses: ./.github/actions/setup
- name: "Generate Ref Deployment"
working-directory: ./Ref
run: |
fprime-util generate -DFPRIME_ENABLE_JSON_MODEL_GENERATION=ON
shell: bash
- name: "Run fpp-to-json on Ref topology"
working-directory: ./Ref/Top
run: |
DEPENDENCIES=$(fpp-depend ../build-fprime-automatic-native/locs.fpp *.fpp)
fpp-to-json ${DEPENDENCIES} *.fpp
shell: bash

View File

@ -5,10 +5,6 @@ on:
branches: [devel, docs/new-website]
pull_request:
branches: [devel]
# Cancel in-progress runs if a newer run is started on a given PR
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ !contains(github.ref, 'devel') && !contains(github.ref, 'release/')}}
jobs:
markdown-link-check:
@ -20,7 +16,6 @@ jobs:
- name: 'Generate Doxygen and CMake docs'
run: |
sudo apt-get update
sudo apt-get install -y doxygen
./docs/doxygen/generate_docs.bash

View File

@ -6,23 +6,19 @@ name: Python Dependency Check
on:
push:
paths:
- "requirements.txt"
- 'requirements.txt'
pull_request:
paths:
- "requirements.txt"
# Cancel in-progress runs if a newer run is started on a given PR
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ !contains(github.ref, 'devel') && !contains(github.ref, 'release/')}}
- 'requirements.txt'
jobs:
pip-install:
runs-on: ${{ matrix.runner }}
strategy:
matrix:
python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
# Purposefully test on both ARM and Intel macOS (macos-latest is ARM)
runner: [macos-15-intel, macos-latest, ubuntu-22.04, ubuntu-latest]
python-version: ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"]
# macos-13 is the last Intel-family runner; macos-latest is ARM
runner: [macos-13, macos-latest, ubuntu-22.04, ubuntu-latest]
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}

View File

@ -6,10 +6,6 @@ on:
pull_request:
# The branches below must be a subset of the branches above
branches: [ devel, release/** ]
# Cancel in-progress runs if a newer run is started on a given PR
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: ${{ !contains(github.ref, 'devel') && !contains(github.ref, 'release/')}}
jobs:
format:
@ -17,10 +13,10 @@ jobs:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- name: Setup Python 3.11
- name: Setup Python 3.8
uses: actions/setup-python@v5
with:
python-version: 3.11
python-version: 3.8
- name: Check formatting
run: |
pip install click==8.0.4 black==21.6b0

View File

@ -1,10 +1,5 @@
# Will return a target branch in the following priority
# - If the event that triggered this action is a PR and has a matching `pr-<number>` branch on
# target_repository, then return the name of that branch.
# - If the name of the branch the PR is trying to merge into has a matching branch name on target_repo,
# then return that branch name. (this is useful for example for release/ branches, to have a tracking release branch on the other repo)
# - Otherwise, return default_target_ref.
#
# If the event that triggered this action is a PR and has a matching `pr-<number>` branch on
# target_repository, then return the name of that branch. Otherwise, return default_target_ref.
# See the CONTRIBUTING.md for info on why this is used.
name: 'Get PR Branch'
@ -17,7 +12,7 @@ on:
type: string
required: true
default_target_ref:
description: 'Ref to use if no target branch is found'
description: 'Ref to use if the PR branch is not found'
type: string
required: false
default: devel
@ -34,19 +29,14 @@ jobs:
- name: "Get target branch"
id: get_target_branch
run: |
BRANCH_NAME="${GITHUB_REF#refs/heads/}"
echo "Looking for 'pr-${{ github.event.number }}'/'${BRANCH_NAME}'"
response_code_pr=`curl -w '%{response_code}' https://api.github.com/repos/${{ inputs.target_repository }}/branches/pr-${{ github.event.number }} -o /dev/null`
response_code_branch=`curl -w '%{response_code}' https://api.github.com/repos/${{ inputs.target_repository }}/branches/${BRANCH_NAME} -o /dev/null`
if [[ "${{ github.event_name }}" == "pull_request" && "$response_code_pr" == "200" ]]; then
response_code=`curl -w '%{response_code}' https://api.github.com/repos/${{ inputs.target_repository }}/branches/pr-${{ github.event.number }} -o /dev/null`
if [[ "${{ github.event_name }}" == "pull_request" && "$response_code" == "200" ]]; then
echo "TARGET_BRANCH=pr-${{ github.event.number }}" >> $GITHUB_OUTPUT
echo "PR branch found, using pr-${{ github.event.number }}"
elif [[ "$response_code_branch" == "200" ]]; then
echo "TARGET_BRANCH=${BRANCH_NAME}" >> $GITHUB_OUTPUT
echo "Base branch found, using ${BRANCH_NAME}"
else
echo "TARGET_BRANCH=${{ inputs.default_target_ref }}" >> $GITHUB_OUTPUT
echo "PR branch not found, using ${{ inputs.default_target_ref }}"
fi
shell: bash

View File

@ -49,7 +49,7 @@ jobs:
- name: "Checkout target repository"
uses: actions/checkout@v4
with:
submodules: recursive
submodules: false
repository: ${{ inputs.target_repository }}
ref: ${{ inputs.target_ref }}
- name: "Overlay current F´ revision"
@ -69,7 +69,7 @@ jobs:
- name: "Build"
working-directory: ${{ inputs.build_location }}
run: |
fprime-util build -j8 ${{ runner.debug == '1' && '--verbose' || '' }} ${{ inputs.target_platform }}
fprime-util build ${{ runner.debug == '1' && '--verbose' || '' }} ${{ inputs.target_platform }}
shell: bash
runUT:
@ -80,7 +80,7 @@ jobs:
- name: "Checkout target repository"
uses: actions/checkout@v4
with:
submodules: recursive
submodules: false
repository: ${{ inputs.target_repository }}
ref: ${{ inputs.target_ref }}
- name: "Overlay current F´ revision"
@ -100,11 +100,11 @@ jobs:
- name: "Build UTs"
working-directory: ${{ inputs.build_location }}
run: |
fprime-util build --ut -j8 ${{ runner.debug == '1' && '--verbose' || '' }} ${{ inputs.target_platform }}
fprime-util build --ut ${{ runner.debug == '1' && '--verbose' || '' }} ${{ inputs.target_platform }}
shell: bash
- name: "Run Unit Tests"
working-directory: ${{ inputs.build_location }}
run: |
fprime-util check -j8 ${{ runner.debug == '1' && '--verbose' || '' }} ${{ inputs.target_platform }}
fprime-util check ${{ runner.debug == '1' && '--verbose' || '' }} ${{ inputs.target_platform }}
shell: bash

View File

@ -1,85 +0,0 @@
# This workflow is intended for reuse by other workflows and will not run directly (no triggers).
# The behavior is to run the steps of fprime-ci.
name: "F´ CI - Reusable Workflow"
on:
workflow_call:
inputs:
target_repository:
description: "Additional external repository to checkout (<owner>/<repo>)"
required: true
type: string
fprime_location:
description: "Relative path from the external project root to its F´ submodule"
required: false
type: string
default: "./lib/fprime"
target_ref:
description: "Branch on target to checkout"
required: false
type: string
default: "devel"
ci_config_file:
required: true
type: string
run_unit_tests:
description: "Run an additional job in parallel to run unit tests."
required: false
type: boolean
default: true
runs_on:
description: "Platform to run on. Defaults to ubuntu-22.04"
required: false
type: string
default: "ubuntu-22.04"
runs_on_int:
description: "Platform to run integration tests on. Defaults to: apple-ci"
required: false
type: string
default: "apple-ci"
runs_on_ut:
description: "Platform to run UTs on. Defaults to ubuntu-22.04"
required: false
type: string
default: "ubuntu-22.04"
jobs:
build:
runs-on: ${{ inputs.runs_on }}
name: "Build"
steps:
- name: "Make Space"
uses: nasa/fprime-actions/make-space@devel
- name: "Set up target repository"
uses: nasa/fprime-actions/external-repository-setup@devel
with:
target_repository: ${{ inputs.target_repository }}
fprime_location: ${{ inputs.fprime_location }}
target_ref: ${{ inputs.target_ref }}
stage: build
- name: "Build Binary"
run: |
CCACHE_DISABLE=1 fprime-ci -c ${{ inputs.ci_config_file }} --add-stage build
- name: Archive Results
uses: actions/upload-artifact@v4
with:
name: archive.tar.gz
path: ./archive.tar.gz
integration-tests:
needs: build
name: "Integration Tests"
runs-on: ${{ inputs.runs_on_int }}
steps:
- name: "Set up target repository"
uses: nasa/fprime-actions/external-repository-setup@devel
with:
target_repository: ${{ inputs.target_repository }}
fprime_location: ${{ inputs.fprime_location }}
target_ref: ${{ inputs.target_ref }}
stage: int
- name: Pull Archive Results
uses: actions/download-artifact@v4
with:
name: archive.tar.gz
- name: "Integration tests"
run: |
fprime-ci -c ${{ inputs.ci_config_file }} --skip-stage build

6
.gitignore vendored
View File

@ -1,6 +1,5 @@
build/
build-fprime-*/
build-artifacts/
build*/
gtest/
docs-cache/
CMakePresets.json
@ -26,9 +25,7 @@ RemoteSystemsTempFiles
*.stackdump
Dict
*.core
*.swp
**/coverage/
*.gcov
!**/test/ut/output/*.gcov
GTestBase.*
@ -70,6 +67,7 @@ py_dict
build-fprime-automatic*
/Ref/bin
/RPI/bin
/ci-venv/
/ci-logs*
/ci-Framework-logs*

View File

@ -1,52 +0,0 @@
# This file is used to generate the navigation structure for the documentation website
# The website uses MkDocs Material and this file is used by the mkdocs-awesome-nav plugin
# https://github.com/lukasgeiter/mkdocs-awesome-nav
#
# Paths prefixed with ../ are not publicly available
nav:
- Home: '../'
- Overview: '../overview'
- Getting Started:
- 'Getting Started': docs/getting-started/index.md
- 'Installation and Troubleshooting': docs/getting-started/installing-fprime.md
- Documentation:
- docs/index.md
- Tutorials:
- Tutorials Index: docs/tutorials/index.md
- 'Hello World': tutorials-hello-world/docs/hello-world.md
- 'LED Blinker': tutorials-led-blinker/docs/led-blinker.md
- 'MathComponent': tutorials-math-component/docs/math-component.md
- 'Cross-Compilation Setup': docs/tutorials/cross-compilation.md
- 'Arduino LED Blinker': tutorials-arduino-led-blinker/docs/arduino-led-blinker.md
- User Manual:
- User Manual Index: docs/user-manual/index.md
- Overview: docs/user-manual/overview/
- Framework: docs/user-manual/framework/
- FPP: 'https://nasa.github.io/fpp/fpp-users-guide.html'
- GDS: docs/user-manual/gds
- Design Pattens: docs/user-manual/design-patterns/
- Build System: docs/user-manual/build-system/
- Security: docs/user-manual/security/
- How To: docs/how-to
- Reference:
- docs/reference/index.md
- APIs:
- C++: docs/reference/api/cpp/html/
- CMake: docs/reference/api/cmake/
- Component SDDs:
- Svc:
- "Svc/**/docs/sdd.md"
- Fw:
- DataStructures:
- "Fw/DataStructures/docs/**.md"
- "Fw/**/docs/sdd.md"
- Drv:
- "Drv/**/docs/sdd.md"
- Specifications:
- FPP Language Spec: 'https://nasa.github.io/fpp/fpp-spec.html'
- GDS Plugins: docs/reference/gds-plugins/
- "docs/reference/*.md"
- Support: '../support'
- Events: '../events'
- News: "../news"

View File

@ -1,84 +0,0 @@
# F´ Generative AI Usage Guidelines
We're excited about the potential of generative AI to help make [F´](https://github.com/nasa/fprime) development more productive, enjoyable, and accessible! Whether you're using AI to write code, improve documentation, or learn about complex systems, we welcome the thoughtful use of these powerful tools in your F´ contributions.
This guide shares our community's approach to using generative AI effectively and responsibly. You'll find practical tips, best practices, and simple guidelines to help you get the most out of AI tools while maintaining the quality standards that make F´ great.
## Our Position on Generative AI
F´ embraces technological advancement and innovation. Generative AI tools can assist with:
- Code generation and refactoring
- Documentation creation and improvement
- Test case development
- Debugging assistance
- Design pattern suggestions
- Learning and understanding our codebases
However, the use of generative AI must align with our commitment to high technical standards, quality, and the collaborative nature of open source development.
## Disclosure
To maintain transparency and enable effective code review, contributors **must disclose all generative AI usage**.
This includes contributions in the forms of **Pull Requests**, **Issues** or **Discussions**.
### Pull Request Submissions for Contributors
1. **Fill-In the "AI Used (y/n)" table entry** in the pull request template disclosing whether Gen AI was used in the pull request
2. **Provide details in the "AI Usage" section** describing how generative AI was utilized
### What to Disclose
Include information about:
- **Type of assistance**: Code generation, documentation, debugging, testing, refactoring, etc.
- **Scope of usage**: Which files, functions, or sections were AI-assisted
- **Tool(s) used**: Name of the AI system(s) employed (e.g., GitHub Copilot, ChatGPT, etc.)
- **Level of modification**: Whether AI-generated content was used as-is, modified, or used as inspiration
### What AI Cannot Replace
- **Domain expertise** in flight software and embedded systems
- **Understanding of F Prime architecture** and design patterns
- **Critical thinking** about system requirements and constraints
- **Human judgment** on safety-critical decisions
- **Community collaboration** and peer review processes
## Best Practices
### Providing Guidelines to AI Tools
When working with generative AI, provide clear rules and context to improve code quality and consistency. For Example:
- **Reference F´ Style Guidelines**: Include the [F´ Style Guidelines](https://github.com/nasa/fprime/wiki/F%C2%B4-Style-Guidelines) in your prompts
- **Enforce coding standards**: Instruct AI to avoid "magic numbers" and use descriptive variable names or comments
- **Provide project context**: Share relevant F´ architectural patterns and component structures
### Quality and Responsibility
- **Review all AI-generated code** thoroughly before submission
- **Verify necessity and relevance** - Remove verbose or unnecessary AI-generated content
- **Be concise** - Edit AI output to be clear and to-the-point
- **Ensure compliance** with F Prime coding standards and style guidelines
- **Verify correctness** and test all AI-assisted implementations
- **Maintain authorship responsibility** - you are accountable for all submitted code regardless of its origin
### Security
- **Be cautious with external dependencies** suggested by AI tools
- **Validate security implications** of AI-generated code, especially for flight software
### Code Review Considerations
- **Provide context** to reviewers about AI usage to enable informed evaluation
- **Be prepared to explain** AI-generated logic and design decisions
- **Accept feedback gracefully** - AI-generated code is not exempt from revision requests
- **Document complex AI-assisted algorithms** clearly for future maintainers
## Getting Help
If you have questions about appropriate AI usage or need guidance on disclosure:
- Open a [Discussion](https://github.com/nasa/fprime/discussions) for community input
- Contact the Community Managers for specific guidance

View File

@ -0,0 +1,2 @@
# Add autocoder subdirectories
add_fprime_subdirectory("${CMAKE_CURRENT_LIST_DIR}/Python/")

View File

@ -0,0 +1,9 @@
# Set python autocoder directory
set(PYTHON_AUTOCODER_DIR "${CMAKE_CURRENT_LIST_DIR}" CACHE INTERNAL "Directory for Python autocoder")
add_fprime_subdirectory("${CMAKE_CURRENT_LIST_DIR}/src/")
# Tests build for UTs only
if (BUILD_TESTING AND NOT __FPRIME_NO_UT_GEN__)
add_fprime_subdirectory("${CMAKE_CURRENT_LIST_DIR}/templates/")
add_fprime_subdirectory("${CMAKE_CURRENT_LIST_DIR}/test/")
endif()

155
Autocoders/Python/README.md Normal file
View File

@ -0,0 +1,155 @@
# Autocoders Python Source
## Autocoder tools
### codegen.py
Takes component, port type, serializable, topology, or enum XML files as input and produces FPrime Autocoded Ac.cpp/hpp files. Codegen is also set up to generate *Impl_[hpp,cpp] template implementation template files with -t/--template, unit test component files with -u/--unit-test, logger files through -l/--loggers and -L/--logger-output-file, dependency files through -d/--dependency-file, dictionary files through -g/--default_dict, -x/--xml_topology_dict, -T/--default_topology_dict, -a/--ampcs_dict, and -A/--ampcs_topology_dict, HTML docs through -H/--html_docs, Markdown docs through -m/--md_docs, and reports on component interfaces through -r/--gen_report.
### cosmosgen.py
Takes a topology XML file as input and generates all the configuration files necessary to add a deployment to Ball Aerospace's COSMOS suite of tools for sending and receiving commands and telemetry. The output directory defaults to $BUILD_ROOT/COSMOS. You can also delete a deployment from the COSMOS directory by using "-r DEPLOYMENT_NAME". Full documentation can be found in Autocoders/Python/src/fprime_ac/utils/cosmos/.
### JSONDictionaryGen.py
Takes a topology XML file as input and produces a JSON dictionary with naming convention "Deployment" + Dictionary.json. The JSON dictionary is generated within the directory where JSONDictionaryGen was run, but this can be overridden by inputting a new output directory in -p/--path.
### testgen.py
Testgen generates a test component in the same way that codegen does. It takes a component XML file as input and generates Tester.cpp/hpp, TesterBase.cpp/hpp, GTestBase.cpp/hpp, and a TestMain.cpp. TestMain.cpp and Tester.cpp/hpp won't be overwritten if they already exist. Rather, one should use command line option -m/--maincpp to overwrite TestMain.cpp and -f/--force_tester to overwrite Tester.cpp and Tester.hpp. Testgen supports absolute paths to inputted files, so you are able to call testgen on any file and the test component will be generated in the directory that testgen was called from.
### gds_dictgen.py
GDS Dictgen is a tool for generating GDS XML Dictionaries that are named *TopologyAppDictionary.xml. The tool takes topology XML as input, and the generated dictionary contains all enums, serializables, commands, events, channels, and parameters present within the topology. GDS Dictgen supports absolute paths to inputted files, so you are able to call GDS dictgen on any file and the dictionary will be generated in the directory that GDS dictgen was called from.
### pymod_dictgen.py
Pymod Dictgen is a tool for generating python modules for the GDS. The tool takes topology XML as input, and it generates command, event, channel, and parameter python modules within their own respective directories. The output path can be changed with command line option -o/--dict_dir. Pymod dictgen supports absolute paths to inputted files, so you are able to call testgen on any file and the directories will be generated in the directory that pymod dictgen was called from.
### implgen.py
Implgen is a tool that takes in component XML and generates ComponentImpl.cpp/hpp files. It is formatted in a very similar way to testgen.py. It generates the Impl files within whatever directory it was ran, and it has its own pytest in Autocoders/Python/test/implgen. Implgen also allows for absolute paths.
### tlmLayout.py
Takes as input a telemetry layout in tab-delimited format and generates .hpp files of the packet layout(s) and a table that can be used to form the packet(s).
## Autocoders Python directory structure
### bin/
Directory containing all 7 autocoder tools and a few scripts to invoke them.
### doc/
Directory containing telemetry packet / layout documentation. It also contains a changelog and a readme for the autocoder, but both are outdated.
### schema/
Directory containing all autocoder RelaxNG and Schematron XML schemas. Schemas are found within the default directory. The sample_XML_files directory contains sample XML files for the schemas that are run using the testSchemas.py test. These sample XML files were made for the RelaxNG schemas, but they have not been updated for the schematron schemas, so they may not work with them.
### src/
Directory containing all python code for the autocoder tools. Codegen uses the visitor pattern to generate its output files, and the generators for the visitor pattern are found in generators while the visitors are found in generators/visitors. Generators/templates contains all the cheetah templates required to run the autocoder. In a fresh repo, these should be built either by making F Prime, making the autocoder unit tests, or manually running cheetah compile within the templates directory. The parsers directory contains all the custom autocoder XML parser modules that the autocoder uses to create models from raw F Prime XML. Lastly, utils/cosmos contains the modules used by the cosmosgen tool. There is a readme file within this directory that explains what all of this code does.
#### src/generators/
| File | Description |
| ------------------------- | ---------------------------------------------------------------------------- |
| AbstractGenerator.py | Defines the interface for each portion or the code to be generated |
| ChannelBody.py | Main entry point of channel class body |
| ChannelHeader.py | Main entry point of channel class header |
| CommandBody.py | Main entry point of command class body |
| CommandHeader.py | Main entry point of command class header |
| DictBody.py | Main entry point of dict class body |
| DictHeader.py | Main entry point of dict class header |
| DictStart.py | Main entry point of start of dict class code |
| EventBody.py | Main entry point of event class body |
| EventHeader.py | Main entry point of event class header |
| FinishSource.py | Main entry point of end of file code |
| formatters.py | Contains various routines to output formatted strings for code generation |
| GenFactory.py | Factory for instancing the interface and connecting appropriate visitors |
| HtmlDocPage.py | Main entry point for generation of html documentation for grnd interfaces |
| HtmlStartPage.py | Main entry point for generation of html documentation for grnd interfaces |
| Includes1.py | Main entry point for generation of a first set of include statements |
| Includes2.py | Main entry point for generation of a second set of include statements |
| InitFiles.py | Main entry point for generation of initializing code |
| InstanceDictBody.py | Main entry point for generation of instance dict body |
| InstanceDictHeader.py | Main entry point for generation of instance dict header |
| InstanceDictStart.py | Main entry point for generation of start of instance dict class code |
| MdDocPage.py | Main entry point of markdown documentation for ground interfaces |
| MdStartPage.py | Main entry point for start of code for markdown documentation |
| Namespace.py | Main entry point for generation of initialization code |
| Private.py | Main entry point for generation of private code within a class |
| Protected.py | Main entry point for generation of private code within a class |
| Public.py | Main entry point for generation of private code within a class |
| StartChannel.py | Main entry point for start of code for channel class |
| StartCommand.py | Main entry point for start of code for command class |
| StartEvent.py | Main entry point for start of code for event class |
| StartSource.py | Main entry point for start of code |
#### src/models/
| File | Description |
| --- | ----------- |
| Arg.py | Argument meta-model that is instanced as an association to port instances |
| Channel.py | Channel meta-model - list of channels |
| Command.py | Command meta-model - list of commands |
| CompFactory.py | Factory class for instancing the component and building port and arg config |
| Component.py | Component meta-model and main interface for code generation queries |
| Event.py | Event meta-model - list of event |
| InternalInterface.py | Internal interface meta-model - list of internal interfaces |
| ModelParser.py | Contains various routines for parsing the meta-model object and returning lists, dicts, etc. for visitors to utilize in mapping to code |
| Parameter.py | Parameter meta-model - list of parameters |
| Port.py | Port meta-model contained within a component class |
| PortFactory.py | Factory class for instancing the port interface type and building up port and arg config |
| Serialize.py | Serializable meta-model |
| TopoFactory.py | Factory class for instancing topology meta-model |
| Topology.py | Topology meta-model and main interface for code generation queries |
#### src/parsers/
| File | Description |
| --- | ----------- |
| AbstractParser.py | Defines the shared interfaces for parsing, validation and getter methods for component, port, and topology xml |
| XmlComponentParser.py | Parses XML component description files |
| XmlEnumParser.py | Parses XML enum description files |
| XmlParser.py | Parent class of the rest of the XmlParser classes |
| XmlPortsParser.py | Parses XML port description files |
| XmlSerializeParser.py | Parses XML serialize description files |
| XmlTopologyParser.py | Parses XML topology description files |
#### src/utils/
| File | Description |
| --- | ----------- |
| ac_cli_helpers.py | Contains helper methods to parse arguments and to run the autocoder CLI and pass any arguments it needs |
| AddSysPath.py | Contains a function that adds a directory to the Python sys.path value - copied from Python cookbook |
| CaltechHeader.py | Contains the caltech header that should be used in autocoder classes |
| ConfigManager.py | Simple configuration class patterned after the Keck observation sequencer GUI and the Tahoe CalVal pipeline |
| DictTypeConverter.py | Contains methods to convert types to dictionary types and to replace special characters in format strings |
| DiffAndRename.py | Mainly used for difference and rename routines |
| DumpObj.py | Contains methods to print nicely formatted overviews of objects |
| EnumDictCheck.py | A structure used to report enumeration size errors |
| EnumGenerator.py | Generator to produce serializable enums |
| Logger.py | Sets up the logging for all other scripts based on the Python logging module - not a standalone file |
| ParseC.py | Contains a set of Python functions that parse C code |
| pyparsing.py | Python parser methods created externally by Paul T. McGuire |
| TopDictGenerator.py | Generator that produces a topology dictionary from parsed XML |
| TypesList.py | Contains types_list and port_types_list which contain types allowed in commands/telemetry/parameters/events |
| VersionFile.py | Check if file exists, and creates a backup copy using an appended versioning string - copied directly from the language cookbook |
| XmlParser.py | General XML parser class based on the Python xml2obj recipe |
### templates/
Directory containing a sample unit test for testing components. There are sample F Prime XML files, a sample Impl.cpp file, and the ut itself.
### test/
Directory containing all unit tests invoked by the cmake system. List of working unit tests are present within CMakeLists.txt. The directories testgen, dictgen, schematron, and enum_xml contain pytests which should be run using pytest rather than cmake. Testgen and enum_xml additionally contain unit tests, but they are set up within cmake to be built as utilities rather than unit tests, so they will not be invoked when running all unit tests in cmake. Dependencies and file i/o within these unit tests prevent them from being successfully run outside of their pytests, so this should be kept this way.
### CMakeLists.txt
Cmake file that is present in all parts of directory tree to be built by cmake. If cmake was invoked with type testing, templates and test get built. The src directory is always built as well.
### Requirements
The Autocoder's requirements are covered by installing the F´ software package. This is covered by the installation process.
## Schematron
In Autocoders/Python/schema/default there are various schematron .rng files mixed with the normal F Prime RelaxNG .rng schema files. The schematron files validate xml in the same way as the RelaxNG schemas (within the XmlParser classes), but these schematron files will only generate command line errors, not exceptions like the schema files will. Here is a short description of each file:
| Schematron File | Description |
| ------------------------- | ---------------------------------------------------------------------------- |
| top_uniqueness_schematron.rng | Compares base_id attributes of all instance xml elements by checking that the values within their base_id_window don't overlap |
| active_comp_schematron.rng | Checks that all active component xml elements have a child port element that is of type async_input |
| enum_value_schematron.rng | Checks that if one enum item xml element has an attribute "value" then all enum item xml elements have an attribute "value". It also checks uniqueness of these values |
| comp_uniqueness_schematron.rng | Checks that all ID's and opcodes for their respective elements within a component xml element are unique - this is the same as the bottom four schematron files but this file only runs on component xml tags |
| command_op_schematron.rng | Checks uniqueness of command opcodes in command xml elements |
| event_id_schematron.rng | Checks uniqueness of event id's in event xml elements |
| channel_id_schematron.rng | Checks uniqueness of channel id's in channel xml elements |
| parameter_id_schematron.rng | Checks uniqueness of parameter id's in parameter xml elements |
### Schematron Syntax
The root element of a schematron file is a schema tag with attribute xmlns set to "http://purl.oclc.org/dsdl/schematron". You are able to define any number of patterns with any number of rules within these files, where a pattern is a set of rules and a rule defines a set of asserts that all run on an xml element where the element's name is defined in the rule's context attribute. Schematron asserts use XPath syntax. It is helpful to find XPath cheat sheets online and have them open when defining a schematron assert.

2
Autocoders/Python/bin/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
isf_dict
ampcs_dict

1327
Autocoders/Python/bin/codegen.py Executable file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,748 @@
#!/usr/bin/env python3
# ===============================================================================
# NAME: TlmLayout.py
#
# DESCRIPTION: Takes as input a telemetry layout in tab-delimited format and
# generates .hpp of the packet layout(s) and a table that can be
# used to form the packet(s)
#
# USAGE:
#
# AUTHOR: Len Day
# EMAIL: len.day@jpl.nasa.gov
# DATE CREATED : 09/28/15
#
# Copyright 2015, California Institute of Technology.
# ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged.
# ===============================================================================
#
# Python standard modules
#
import argparse
import copy
import csv
from Cheetah.Template import Template
#
# Python extension modules and custom interfaces
#
#
# Universal globals used within module go here.
# (DO NOT USE MANY!)
#
tlm_input_line_num = 0
tlm_max_packet_bytes = 0
tlm_packet_list = []
tlm_duration = None
tlm_period = None
tlm_packet_id_schedule_list = []
tlm_packet_max_ids_list = []
tlm_packet_to_chan_list = []
# ===============================================================================
#
class HeaderItem:
def __init__(self):
self.m_type = ""
self.m_id = 0
self.m_bit_start = 0
self.m_bits = 0
self.m_comment = ""
class Item:
def __init__(self):
self.m_is_reserve = False
self.m_is_constant = False
self.m_name = ""
self.m_id = 0
self.m_data_type = ""
self.m_constant_value = 0
self.m_bit_start = 0
self.m_bits = 0
self.m_comment = ""
# The information for a telemetry packet is accumulated in a Packet class
# instance, then the packet_complete method is called when it is complete
class Packet:
def init(self):
self.m_name = ""
self.m_id = 0
self.m_id_comment = ""
self.m_bit_index = 0
self.m_bytes = 0
self.m_num_fixed_fields = 0
self.m_num_variable_fields = 0
self.m_max_field_bits = 0
self.num_header_general_fields = 0
self.m_header_list = []
self.m_item_list = []
# Additional packet information needed
# for scheduling and multiplexing to an address
#
self.m_chan = None
#
self.m_freq = None
self.m_offset = None
def __init__(self):
self.init()
# errors encountered parsing the file use err_msg to output the
# error and exit.
def err_msg(self, msg):
global tlm_input_line_num
print(msg, "at input line ", tlm_input_line_num)
exit(1)
def duration(self, line):
global tlm_duration
tlm_duration = float(line[1])
def run_period(self, line):
global tlm_period
tlm_period = float(line[1])
def frequency(self, line):
"""
Frequency of packet is always in Hz. so no units given.
"""
self.m_freq = float(line[1])
def offset(self, line):
"""
Offset of packet is essentially it's phase within duration.
If no units detected just count cycle slots.
If "s" for seconds is detected then compute cycle slots here.
NOTE: One and only one offset per ID can currently be specified.
"""
if "s" in line[1].lower():
self.m_offset = int(
(float(line[1].split()[0].strip()) / (1.0 / tlm_period)) + 0.5
)
else:
self.m_offset = int(line[1])
def channel(self, line):
"""
Channel number. This is a channel number assigned to
the packet of type ID. It is typically used for mapping
of packet ID's types to 1553 sub-addresses.
"""
self.m_chan = int(line[1])
# Comment records are ignored
def comment(self, line):
pass # Ignored
# Process and save a packet record. This completed processing of the
# previous packet (if any)
def packet(self, line):
self.packet_complete()
self.init()
# Process and save a header record
def header(self, line):
if len(line) < 2:
self.err_msg("Missing parameter for header")
he = HeaderItem()
he.m_bit_start = self.m_bit_index
kw = line[1].lower()
if kw == "id":
if len(line) < 5:
self.err_msg("Missing parameter for header ID")
he.m_type = "HEADER_FIELD_ID"
s = line[4].strip()
if not s.isdigit():
self.err_msg("Illegal value for number of bits: '" + s + "'")
he.m_bits = int(s)
self.m_bit_index += he.m_bits
elif kw == "time":
he.m_type = "HEADER_FIELD_TIME"
he.m_bits = 48
self.m_bit_index += he.m_bits
elif kw == "sequence":
if len(line) < 6:
self.err_msg("Missing parameter for header sequence")
he.m_type = "HEADER_FIELD_SEQUENCE"
s = line[4].strip()
if not s.isdigit():
self.err_msg("Illegal value for number of bits: '" + s + "'")
he.m_bits = int(s)
self.m_bit_index += he.m_bits
elif kw == "field":
if len(line) < 4:
self.err_msg("Missing parameter for header field")
he.m_type = "HEADER_FIELD_FIELD"
s = line[2].strip()
if not s.isdigit():
self.err_msg("Illegal value for ID: '" + s + "'")
he.m_id = int(s)
s = line[4].strip()
if not s.isdigit():
self.err_msg("Illegal value for number of bits: '" + s + "'")
he.m_bits = int(s)
self.m_bit_index += he.m_bits
self.num_header_general_fields += 1
else:
self.err_msg("Illegal keyword for header: '" + line[1] + "'")
if len(line) > 5:
he.m_comment = line[5]
self.m_header_list.append(he)
# Process and save the packet identifier record
def identifier(self, line):
if len(line) < 3:
self.err_msg("Missing parameter(s) for identifier")
self.m_name = line[1].strip()
if not self.m_name:
self.err_msg("Name cannot be blank")
if len(self.m_name.split()) > 1:
self.err_msg("Name must not contain spaces")
if self.m_name.find("/") >= 0:
self.err_msg("Name must not contain '/'")
if self.m_name.find("\\") >= 0:
self.err_msg("Name must not contain '\\'")
s = line[2].strip()
if not s.isdigit():
self.err_msg("Illegal value for identifier: '" + s + "'")
self.m_id = int(s)
if self.m_id < 0:
self.err_msg("Negative value for identifier: '" + line[2] + "'")
if len(line) > 5:
self.m_id_comment = line[5]
# Process and save an item record
def item(self, line):
self.m_num_variable_fields += 1
it = Item()
it.m_bit_start = self.m_bit_index
if len(line) < 5:
self.err_msg("Missing parameter(s) for item")
it.m_name = line[1].strip()
if not it.m_name:
self.err_msg("Name cannot be blank")
if len(it.m_name.split()) > 1:
self.err_msg("Name must not contain spaces")
s = line[2].strip()
if not s.isdigit():
self.err_msg("Illegal value for identifier: '" + s + "'")
it.m_id = int(s)
it.m_data_type = line[3].strip()
s = line[4].strip()
if not s.isdigit():
self.err_msg("Illegal value for number of bits: '" + s + "'")
it.m_bits = int(s)
self.m_bit_index += it.m_bits
if len(line) > 5:
it.m_comment = line[5]
self.m_item_list.append(it)
# Process and save a reserve record
def reserve(self, line):
it = Item()
it.m_bit_start = self.m_bit_index
if len(line) < 5:
self.err_msg("Missing parameter for reserve")
it.m_is_reserve = True
s = line[4].strip()
if not s.isdigit():
self.err_msg("Illegal value for number of bits: '" + s + "'")
it.m_bits = int(s)
if it.m_bits < 1:
self.err_msg("Illegal value for number of bits: '" + s + "'")
self.m_bit_index += it.m_bits
if len(line) > 5:
it.m_comment = line[5]
self.m_item_list.append(it)
# Process and save an align record
def align(self, line):
it = Item()
it.m_bit_start = self.m_bit_index
if len(line) < 2:
self.err_msg("Missing parameter for align")
it.m_is_reserve = True
s = line[1].strip()
if not s.isdigit():
self.err_msg("Illegal value for number of bits: '" + s + "'")
bits = int(s)
if bits not in (8, 16, 32, 64):
self.err_msg("Illegal value for number of bits: '" + s + "'")
if self.m_bit_index % bits:
bits = bits - (self.m_bit_index % bits)
else:
return # Already aligned
it.m_bits = bits
self.m_bit_index += it.m_bits
if len(line) > 5:
it.m_comment = line[5]
self.m_item_list.append(it)
# Process and save a constant record
def constant(self, line):
self.m_num_fixed_fields += 1
it = Item()
it.m_bit_start = self.m_bit_index
if len(line) < 5:
self.err_msg("Missing parameter for constant")
it.m_is_constant = True
it.m_name = line[1].strip()
if not it.m_name:
self.err_msg("Name cannot be blank")
if len(it.m_name.split()) > 1:
self.err_msg("Name must not contain spaces")
it.m_data_type = line[2].strip()
it.m_data_type = it.m_data_type.lower()
if it.m_data_type not in ("integer", "float", "text"):
self.err_msg("Invalid date type: '" + it.m_data_type + "'")
it.m_constant_value = line[3]
if it.m_data_type == "integer":
it.m_constant_value = it.m_constant_value.strip()
if not it.m_constant_value.isdigit():
self.err_msg("Invalid numeric value: '" + it.m_constant_value + "'")
it.m_constant_value = int(it.m_constant_value)
elif it.m_data_type == "float":
it.m_constant_value = it.m_constant_value.strip()
try:
f = float(it.m_constant_value)
except ValueError:
self.err_msg(
"Invalid floating point value: '" + it.m_constant_value + "'"
)
it.m_constant_value = f
s = line[4].strip()
if not s.isdigit():
self.err_msg("Illegal value for number of bits: '" + s + "'")
bits = int(s)
if bits < 1:
self.err_msg("Illegal value for number of bits: '" + s + "'")
it.m_bits = bits
self.m_bit_index += it.m_bits
if len(line) > 5:
it.m_comment = line[5]
self.m_item_list.append(it)
# packet_complete() is called when a packet definition is complete to save
# the packet
def packet_complete(self):
global tlm_input_line_num
global tlm_packet_list
global tlm_max_packet_bytes
global tlm_duration
global tlm_period
global verbose
if not self.m_header_list and not self.m_item_list:
return
self.m_bytes = (self.m_bit_index + 7) / 8
if self.m_bytes > tlm_max_packet_bytes:
tlm_max_packet_bytes = self.m_bytes
for field in self.m_header_list:
if field.m_bits > self.m_max_field_bits:
self.m_max_field_bits = field.m_bits
for item in self.m_item_list:
if not item.m_is_reserve and not item.m_is_constant:
if item.m_bits > self.m_max_field_bits:
self.m_max_field_bits = item.m_bits
if verbose:
if tlm_duration is not None:
print("Duration (in seconds only): %f" % tlm_duration)
if tlm_period is not None:
print("Run or Sample Period (in hz. only): %f" % tlm_period)
if self.m_freq is not None:
print("Packet frequency (Hz.): " + str(self.m_freq))
if self.m_offset is not None:
print("Packet offset: " + str(self.m_offset))
print(
"packet size in bits: "
+ str(self.m_bit_index)
+ " ("
+ str(self.m_bit_index / 8)
+ " bytes)"
)
print(
"Number of fixed-value fields:"
+ str(self.m_num_fixed_fields)
+ ", variable fields: "
+ str(self.m_num_variable_fields)
)
print("name: ", self.m_name)
print("packet ID: ", self.m_id, ", comment: ", self.m_id_comment)
print(
"Number of items in packet header list: ",
len(self.m_header_list),
)
print(
"Number of general items in packet header list: ",
self.num_header_general_fields,
)
print(
"Maximum field size in bits: ", self.m_max_field_bits
)
for field in self.m_header_list:
print("\tType: ", field.m_type)
print("\tID: ", field.m_id)
print("\tStart bit: ", field.m_bit_start)
print("\tSize in bits: ", field.m_bits)
print("\tComment: ", field.m_comment)
print("")
print("Number of items in packet item list: ", len(self.m_item_list))
for i, item in enumerate(self.m_item_list, start=1):
print("Item # ", i)
print("\tis_reserve: ", item.m_is_reserve)
print("\tis_constant: ", item.m_is_constant)
print("\tname: ", item.m_name)
print("\tid: ", item.m_id)
print("\tdata type: ", item.m_data_type)
print("\tconstant value: ", item.m_constant_value)
print("\tstart bit: ", item.m_bit_start)
print("\tbits: ", item.m_bits)
print("\tcomment: ", item.m_comment)
print()
if self.m_name == "":
self.err_msg("Preceding packet has no name")
p = copy.deepcopy(self)
tlm_packet_list.append(p)
# The CsvLine class parses a single line and saves the info in the
# packet member. When a packet record is found the previously defined
# packet is saved.
class CsvLine:
def __init__(self):
global tlm_duration
self.packet = Packet()
self.keywords = {
"duration": self.packet.duration,
"runperiod": self.packet.run_period,
"frequency": self.packet.frequency,
"offset": self.packet.offset,
"channel": self.packet.channel,
"comment": self.packet.comment,
"packet": self.packet.packet,
"header": self.packet.header,
"identifier": self.packet.identifier,
"item": self.packet.item,
"reserve": self.packet.reserve,
"align": self.packet.align,
"constant": self.packet.constant,
}
# process() is called with a single line already tokenized as a list
def process(self, line):
global tlm_input_line_num
if not line:
return
nonblank = False
for i in range(len(line)):
s = line[i].strip()
if len(s):
nonblank = True
break
if not nonblank:
return
kw = line[0].lower()
if kw in self.keywords:
self.keywords[kw](line)
else:
print("Invalid keyword '" + line[0] + "' at line ", tlm_input_line_num)
exit(1)
def finish(self):
self.packet.packet_complete()
# CsvFile reads the input file and processes each line
class CsvFile:
def __init__(self):
self.line = CsvLine()
def process(self, name):
global tlm_input_line_num
try:
m_fp = open(name)
except OSError:
print(f"Error opening {name}")
exit()
m_reader = csv.reader(m_fp, dialect="excel")
for line in m_reader:
tlm_input_line_num += 1
self.line.process(line)
self.line.finish()
m_fp.close()
def sched_cycle_ids_max(max_cycle):
"""
Return a list of the maximum times an ID is assigned to a cycle slot.
"""
cycle_max_list = [0 for _ in range(max_cycle)]
cycle_offset = None
i = 0
if tlm_period is None:
return []
for p in tlm_packet_list:
if (p.m_freq is not None) and (p.m_offset is not None):
cycle_offset = int(((1.0 / p.m_freq) / (1.0 / tlm_period)) + 0.5)
offset = p.m_offset
else:
cycle_offset = None
if cycle_offset is not None:
for i in range(max_cycle):
if i % cycle_offset == 0:
if (i + offset) < max_cycle:
cycle_max_list[i + offset] += 1
# print p.m_id, p.m_freq, p.m_offset
# print cycle_offset
# print cycle_max_list
return cycle_max_list
def sched_cycle_ids(max_cycle):
"""
Return a list of cycle slots where each slot is a list of IDs assigned in that slot.
Note the list returned by sched_cycle_ids_max is the number of IDs per cycle slot.
"""
cycle_id_list = [list() for _ in range(max_cycle)]
cycle_offset = None
if tlm_period is None:
return []
for p in tlm_packet_list:
if (p.m_freq is not None) and (p.m_offset is not None):
cycle_offset = int(((1.0 / p.m_freq) / (1.0 / tlm_period)) + 0.5)
offset = p.m_offset
else:
cycle_offset = None
if cycle_offset is not None:
for i in range(max_cycle):
if i % cycle_offset == 0:
if (i + offset) < max_cycle:
cycle_id_list[i + offset].append(p.m_id)
# print p.m_id, p.m_freq, p.m_offset
for id in cycle_id_list:
if len(id) == 0:
id.append(-1)
# j = 0
# for i in cycle_id_list:
# print "%d: %s" % (j, i)
# j += 1
return cycle_id_list
def sched_id_arr_size(cycle_id_list):
"""
Return total size for the scheduled ID array.
"""
id_list_size = 0
for l in cycle_id_list:
s = len(l)
if s == 0:
id_list_size += 1
else:
id_list_size += s
return id_list_size
def id_to_channel_map():
"""
Return an association list of ID to channel number assignments.
NOTE: the channel number is intended to be 1553 sub-address.
"""
def output_cpp(output_file, template_file):
global tlm_duration
global tlm_period
global tlm_packet_list
global tlm_max_packet_bytes
t = Template(file=template_file)
t.tlm_packet_list = tlm_packet_list
# Generate schedule code if both duration and period defined
if (tlm_duration is not None) and (tlm_period is not None):
t.tlm_max_cycles = int(tlm_duration * tlm_period)
else:
t.tlm_max_cycles = None
t.tlm_cycle_max_list = sched_cycle_ids_max(t.tlm_max_cycles)
t.tlm_cycle_id_list = sched_cycle_ids(t.tlm_max_cycles)
# Create ID to channel mapping
t.tlm_cycle_id_arr_size = sched_id_arr_size(t.tlm_cycle_id_list)
t.tlm_max_packet_bytes = tlm_max_packet_bytes
t.tlm_max_num_chan = sum([pkt.m_chan is not None for pkt in tlm_packet_list])
# Create duration and period values
t.tlm_max_num_freq = sum([pkt.m_freq is not None for pkt in tlm_packet_list])
if tlm_duration is not None:
t.tlm_duration = tlm_duration
else:
t.tlm_duration = -1.0
#
if tlm_period is not None:
t.tlm_period = tlm_period
else:
t.tlm_period = -1
f = open(output_file, "w")
print(t, file=f)
def output_html():
global tlm_packet_list
def output_dict():
global tlm_packet_list
def output(cpp_output_file, template_file):
output_cpp(cpp_output_file, template_file)
output_html()
output_dict()
def main():
global verbose
parser = argparse.ArgumentParser(description="Process a telemetry definition file")
parser.add_argument(
"-v", "--verbose", action="store_true", help="Enable verbose mode"
)
parser.add_argument(
"--cpp",
action="store",
required=True,
help="Required. Fully-qualified output cpp file name",
)
parser.add_argument(
"--template",
action="store",
required=True,
help="Required. Fully-qualified file name of the cheetah template used by this script (normally in Autocoders/Python/src/fprime_ac/utils/TlmPacketDefBase.tmpl)",
)
parser.add_argument(
"input_file", nargs="+", help="One or more input tab-delimited csv file names"
)
args = parser.parse_args()
verbose = args.verbose
cpp_output_file = args.cpp
template_file = args.template
f = CsvFile()
for file in args.input_file:
f.process(file)
output(cpp_output_file, template_file)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,646 @@
#!/usr/bin/env python
# ===============================================================================
# NAME: tlm_packet_gen.py
#
# DESCRIPTION: A tool for generating telemetry packet data structures for use with Svc/TlmPacketizer.
# Logic borrowed from gds_dictgen.py
#
# AUTHOR: Tim Canham
# EMAIL: timothy.canham@jpl.nasa.gov
# DATE CREATED: September 8, 2019
#
# Copyright 2019, California Institute of Technology.
# ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged.
# ===============================================================================
import logging
import os
import sys
from optparse import OptionParser
from Cheetah.Template import Template
# Meta-model for Component only generation
from fprime_ac.models import TopoFactory
# Parsers to read the XML
from fprime_ac.parsers import (
XmlArrayParser,
XmlEnumParser,
XmlSerializeParser,
XmlTopologyParser,
)
from fprime_ac.utils.buildroot import (
get_nearest_build_root,
search_for_file,
set_build_roots,
)
from lxml import etree
header_file_template = """
\#ifndef ${packet_list_name}_header_h
\#define ${packet_list_name}_header_h
\#include <Svc/TlmPacketizer/TlmPacketizerTypes.hpp>
namespace ${packet_list_namespace} {
// set of packets to send
extern const Svc::TlmPacketizerPacketList ${packet_list_name}Pkts;
// set of channels to ignore
extern const Svc::TlmPacketizerPacket ${packet_list_name}Ignore;
}
\#endif // ${packet_list_name}_header_h
"""
impl_file_template = """
\#include <Svc/TlmPacketizer/TlmPacketizerTypes.hpp>
\#include <${output_header}>
\#include <FpConfig.hpp>
\#include <FpConfig.hpp>
\#include <Fw/Time/Time.hpp>
// Verify packets not too large for ComBuffer
// if this macro gives a compile error, that means the packets are too large
void check_func(void) {
static_assert((${max_size} <= (FW_COM_BUFFER_MAX_SIZE - Fw::Time::SERIALIZED_SIZE - sizeof(FwTlmPacketizeIdType) - sizeof(FwPacketDescriptorType))), "PacketsTooBig");
}
namespace ${packet_list_namespace} {
#for $packet,$id,$level,$channel_list in $packet_list
static const Svc::TlmPacketizerChannelEntry ${packet}List[] = {
#for $channel_id,$channel_size,$channel_name in $channel_list:
{$channel_id, $channel_size}, // $channel_name
#end for
};
static const Svc::TlmPacketizerPacket ${packet} = { ${packet}List, $id, $level, FW_NUM_ARRAY_ELEMENTS(${packet}List) };
#end for
const Svc::TlmPacketizerPacketList ${packet_list_name}Pkts = {
{
#for $packet,$id,$level,$channel_list in $packet_list
&${packet},
#end for
},
$len($packet_list)
};
static const Svc::TlmPacketizerChannelEntry ignoreList[] = {
#for $channel_id,$channel_name in $ignore_list:
{$channel_id, 0}, // $channel_name
#end for
};
const Svc::TlmPacketizerPacket ${packet_list_name}Ignore = { ignoreList, 0, 0, FW_NUM_ARRAY_ELEMENTS(ignoreList) };
} // end namespace ${packet_list_namespace}
"""
# Global logger init. below.
PRINT = logging.getLogger("output")
DEBUG = logging.getLogger("debug")
PACKET_VIEW_DIR = "./Packet-Views"
class TlmPacketParseValueError(ValueError):
pass
class TlmPacketParseIOError(ValueError):
pass
class TlmPacketParser(object):
def __init__(self, verbose=False, dependency=None):
self.verbose = verbose
self.dependency = dependency
self.size_dict = {}
def add_type_size(self, type, size):
PRINT.debug("Type: %s size: %d" % (type, size))
self.size_dict[type] = size
def get_type_size(self, type_name, size):
# switch based on type
if type_name == "string":
return int(size) + 2 # plus 2 to store the string length
elif type_name == "I8":
return 1
elif type_name == "I16":
return 2
elif type_name == "I32":
return 4
elif type_name == "I64":
return 8
elif type_name == "U8":
return 1
elif type_name == "U16":
return 2
elif type_name == "U32":
return 4
elif type_name == "U64":
return 8
elif type_name == "F32":
return 4
elif type_name == "F64":
return 8
elif type_name == "bool":
return 1
else:
return None
def generate_channel_size_dict(self, the_parsed_topology_xml, xml_filename):
"""
Generates GDS XML dictionary from parsed topology XML
"""
if self.verbose:
print(f"Topology xml type description file: {xml_filename}")
model = TopoFactory.TopoFactory.getInstance()
topology_model = model.create(the_parsed_topology_xml, generate_list_file=False)
# uses the topology model to process the items
# create list of used parsed component xmls
parsed_xml_dict = {}
# deployment = the_parsed_topology_xml.get_deployment()
for comp in the_parsed_topology_xml.get_instances():
if comp.get_type() in topology_model.get_base_id_dict():
parsed_xml_dict[comp.get_type()] = comp.get_comp_xml()
else:
PRINT.info(
"Components with type {} aren't in the topology model.".format(
comp.get_type()
)
)
xml_list = []
for parsed_xml_type in parsed_xml_dict:
if parsed_xml_dict[parsed_xml_type] is None:
print(
"ERROR: XML of type {} is being used, but has not been parsed correctly. Check if file exists or add xml file with the 'import_component_type' tag to the Topology file.".format(
parsed_xml_type
)
)
raise Exception()
xml_list.append(parsed_xml_dict[parsed_xml_type])
topology_model.set_instance_xml_list(xml_list)
ch_size_dict = {}
for comp in the_parsed_topology_xml.get_instances():
comp_name = comp.get_name()
comp_id = int(comp.get_base_id(), 0)
comp_type = comp.get_type()
if self.verbose:
PRINT.debug("Processing %s" % comp_name)
# check for included XML types
self.process_enum_files(parsed_xml_dict[comp_type].get_enum_type_files())
self.process_array_files(parsed_xml_dict[comp_type].get_array_type_files())
self.process_serializable_files(
parsed_xml_dict[comp_type].get_serializable_type_files()
)
# check for channels
if parsed_xml_dict[comp_type].get_channels() is not None:
for chan in parsed_xml_dict[comp_type].get_channels():
channel_name = f"{comp_name}.{chan.get_name()}"
if self.verbose:
print("Processing Channel %s" % channel_name)
chan_type = chan.get_type()
# if channel is enum
if type(chan_type) == type(tuple()):
chan_size = 4
# if channel type is string
# elif chan_type == "string":
# chan_size = int(chan.get_size()) + 2 # FIXME: buffer size storage size magic number - needs to be turned into a constant
# if channel is serializable
elif chan_type in self.size_dict:
chan_size = self.size_dict[chan_type]
else:
chan_size = self.get_type_size(chan_type, chan.get_size())
if chan_size is None:
print(
'Component %s channel %s type "%s" not found!'
% (comp_name, channel_name, chan_type)
)
sys.exit(-1)
chan_id = int(chan.get_ids()[0], 0) + comp_id
if self.verbose:
print("Channel %s size %d" % (channel_name, chan_size))
ch_size_dict[channel_name] = (chan_id, chan_size)
return ch_size_dict
def gen_packet_file(self, xml_filename):
view_path = PACKET_VIEW_DIR
if not os.path.exists(view_path):
os.mkdir(view_path)
# Make sure files
if not os.path.isfile(xml_filename):
raise TlmPacketParseIOError("File %s does not exist!" % xml_filename)
fd = open(xml_filename, "r")
xml_parser = etree.XMLParser(remove_comments=True)
element_tree = etree.parse(fd, parser=xml_parser)
channel_size_dict = None
ht = Template(header_file_template)
it = Template(impl_file_template)
if element_tree.getroot().tag == "packets":
list_name = element_tree.getroot().attrib["name"]
list_namespace = element_tree.getroot().attrib["namespace"]
max_size = int(element_tree.getroot().attrib["size"])
# fill in template fields for header
ht.packet_list_name = list_name
ht.packet_list_namespace = list_namespace
# fill in template fields for implementation file
it.packet_list_name = list_name
it.packet_list_namespace = list_namespace
it.max_size = max_size
packet_list_container = []
packetized_channel_list = []
it.ignore_list = []
id_list = [] # check for duplicates
ignore_name_list = []
size_dict = {}
ht.num_packets = 0
total_packet_size = 0
levels = []
view_path = PACKET_VIEW_DIR
# find the topology import
for entry in element_tree.getroot():
# read in topology file
if entry.tag == "import_topology":
top_file = search_for_file("Topology", entry.text)
if top_file is None:
raise TlmPacketParseIOError(
"import file %s not found" % entry.text
)
the_parsed_topology_xml = XmlTopologyParser.XmlTopologyParser(
top_file
)
deployment = the_parsed_topology_xml.get_deployment()
if self.verbose:
print("Found assembly or deployment named: %s\n" % deployment)
channel_size_dict = self.generate_channel_size_dict(
the_parsed_topology_xml, xml_filename
)
elif entry.tag == "packet":
if channel_size_dict is None:
raise TlmPacketParseValueError(
"%s: Topology import must be before packet definitions"
% xml_filename
)
packet_size = 0
packet_name = entry.attrib["name"]
# Open a text file for a GDS view
vfd = open("%s/%s.txt" % (view_path, packet_name), "w")
packet_id = entry.attrib["id"]
packet_level = entry.attrib["level"]
print("Packetizing %s (%s)" % (packet_name, packet_id))
if packet_id in id_list:
raise TlmPacketParseValueError(
"Duplicate packet id %s" % packet_id
)
else:
id_list.append(packet_id)
channel_list = []
for channel in entry:
channel_name = channel.attrib["name"]
# TKC 11/20/2024 - In order to work with the ground system,
# we have to strip off the leading module. This is a
# band-aid until FPP packets are done
name_parts = channel_name.split(".")
if len(name_parts) != 3:
raise TlmPacketParseValueError(
'Channel %s must be of the format "module.component.channel_name"'
% channel_name
)
(module, component, channel) = name_parts
channel_name = "%s.%s" % (component, channel)
if not channel_name in channel_size_dict:
raise TlmPacketParseValueError(
"Channel %s does not exist" % channel_name
)
(channel_id, channel_size) = channel_size_dict[channel_name]
packet_size += channel_size
if self.verbose:
print(
" -Channel %s ID %d size %d"
% (channel_name, channel_id, channel_size)
)
channel_list.append((channel_id, channel_size, channel_name))
packetized_channel_list.append(channel_name)
vfd.write("%s\n" % channel_name)
packet_list_container.append(
(packet_name, packet_id, packet_level, channel_list)
)
ht.num_packets += 1
packet_size += (
11 + 2 + 4
) # raw packet size + time tag + packet id + packet descriptor
if packet_size > max_size:
raise TlmPacketParseValueError(
"Packet %s is too large. Size: %d max: %d"
% (packet_name, packet_size, max_size)
)
print("Packet %s size %d/%d" % (packet_name, packet_size, max_size))
total_packet_size += packet_size
if packet_level in size_dict:
size_dict[packet_level] = size_dict[packet_level] + packet_size
else:
size_dict[packet_level] = packet_size
if not packet_level in levels:
levels.append(packet_level)
vfd.close()
elif entry.tag == "ignore":
if channel_size_dict is None:
raise TlmPacketParseValueError(
"%s: Topology import must be before packet definitions"
% xml_filename
)
for channel in entry:
channel_name = channel.attrib["name"]
# TKC 11/20/2024 - In order to work with the ground system,
# we have to strip off the leading module. This is a
# band-aid until FPP packets are done
name_parts = channel_name.split(".")
if len(name_parts) != 3:
raise TlmPacketParseValueError(
'Channel %s must be of the format "module.component.channel_name"'
% channel_name
)
(module, component, channel) = name_parts
channel_name = "%s.%s" % (component, channel)
if not channel_name in channel_size_dict:
raise TlmPacketParseValueError(
"Channel %s does not exist" % channel_name
)
(channel_id, channel_size) = channel_size_dict[channel_name]
it.ignore_list.append((channel_id, channel_name))
if self.verbose:
print(
"Channel %s (%d) ignored" % (channel_name, channel_id)
)
ignore_name_list.append(channel_name)
else:
raise TlmPacketParseValueError("Invalid packet tag %s" % entry.tag)
if self.verbose:
print("Entry: %s" % entry.tag)
else:
raise TlmPacketParseValueError(
"Invalid xml type %s" % element_tree.getroot().tag
)
output_file_base = os.path.splitext(os.path.basename(xml_filename))[0]
nearest_build_root = get_nearest_build_root(xml_filename)
file_dir = os.path.relpath(os.path.dirname(xml_filename), nearest_build_root)
missing_channels = False
for channel in channel_size_dict:
if (
not channel in packetized_channel_list
and not channel in ignore_name_list
):
(channel_id, channel_size) = channel_size_dict[channel]
print(
"Channel %s (%d) not packetized or ignored." % (channel, channel_id)
)
missing_channels = True
if missing_channels:
raise TlmPacketParseValueError("Channels missing from packets")
header = "%sAc.hpp" % output_file_base
source = "%sAc.cpp" % output_file_base
print("Generating %s and %s" % (header, source))
levels.sort()
for level in levels:
print(
"Level: %s Bytes: %d bits: %d"
% (level, size_dict[level], size_dict[level] * 8)
)
print(
"Number of packets: %d\nTotal packet bytes: %d bits: %d"
% (ht.num_packets, total_packet_size, total_packet_size * 8)
)
it.packet_list = packet_list_container
it.output_header = "%s/%sAc.hpp" % (file_dir, output_file_base)
open(header, "w").write(str(ht))
open(source, "w").write(str(it))
target_directory = os.getcwd().replace("\\", os.sep)
header_target = target_directory + os.sep + header
source_target = target_directory + os.sep + source
# write dependency file
if self.dependency is not None:
dependency_file_txt = "\n%s %s: %s\n" % (
source_target,
header_target,
top_file,
)
open(self.dependency, "w").write(dependency_file_txt)
def process_serializable_files(self, serializable_file_list):
for serializable_file in serializable_file_list:
serializable_file = search_for_file("Serializable", serializable_file)
serializable_model = XmlSerializeParser.XmlSerializeParser(
serializable_file
)
# process XML includes
self.process_enum_files(serializable_model.get_include_enums())
self.process_array_files(serializable_model.get_include_arrays())
self.process_serializable_files(serializable_model.get_includes())
serializable_type = (
serializable_model.get_namespace()
+ "::"
+ serializable_model.get_name()
)
serializable_size = 0
for (
member_name,
member_type,
member_array_size,
member_size,
member_format_specifier,
member_comment,
_,
) in serializable_model.get_members():
# if enumeration
if type(member_type) == type(tuple()):
type_size = 4 # Fixme: can we put this in a constant somewhere?
elif (
member_type in self.size_dict.keys()
): # See if it is a registered type
type_size = self.size_dict[member_type]
else:
type_size = self.get_type_size(member_type, member_size)
if type_size is None:
print(
"Illegal type %s in serializable %s"
% (member_type, serializable_type)
)
sys.exit(-1)
if member_array_size != None:
serializable_size += int(member_array_size) * type_size
else:
serializable_size += type_size
self.add_type_size(serializable_type, serializable_size)
if self.verbose:
print(
"Serializable %s size %d" % (serializable_type, serializable_size)
)
def process_enum_files(self, enum_file_list):
for enum_file in enum_file_list:
enum_file = search_for_file("Enumeration", enum_file)
enum_model = XmlEnumParser.XmlEnumParser(enum_file)
enum_type = enum_model.get_namespace() + "::" + enum_model.get_name()
self.add_type_size(
enum_type, 4
) # Fixme: can we put this in a constant somewhere?
def process_array_files(self, array_file_list):
for array_file in array_file_list:
array_file = search_for_file("Array", array_file)
array_model = XmlArrayParser.XmlArrayParser(array_file)
# process any XML includes
self.process_enum_files(array_model.get_include_enum_files())
self.process_array_files(array_model.get_include_array_files())
self.process_serializable_files(array_model.get_includes())
array_type = array_model.get_namespace() + "::" + array_model.get_name()
array_size = int(array_model.get_size())
elem_type = array_model.get_type()
elem_type_size = None
if type(elem_type) == type(tuple()):
elem_type_size = 4 # Fixme: can we put this in a constant somewhere?
elif elem_type in self.size_dict.keys(): # See if it is a registered type
elem_type_size = self.size_dict[elem_type]
else:
elem_type_size = self.get_type_size(elem_type, 1) # Fixme: strings?
if elem_type_size is None:
print("Illegal type %s in array %s" % (elem_type, array_type))
sys.exit(-1)
self.add_type_size(array_type, elem_type_size * array_size)
def pinit():
"""
Initialize the option parser and return it.
"""
usage = "usage: %prog [options] [xml_topology_filename]"
parser = OptionParser(usage)
parser.add_option(
"-b",
"--build_root",
dest="build_root_overwrite",
type="string",
help="Overwrite environment variable BUILD_ROOT",
default=None,
)
parser.add_option(
"-v",
"--verbose",
dest="verbose_flag",
help="Enable verbose mode showing more runtime detail (def: False)",
action="store_true",
default=False,
)
parser.add_option(
"-d",
"--dependency-file",
dest="dependency_file",
type="string",
help="Generate dependency file for make",
default=None,
)
return parser
def main():
"""
Main program.
"""
global VERBOSE # prevent local creation of variable
global BUILD_ROOT # environmental variable if set
Parser = pinit()
(opt, args) = Parser.parse_args()
# opt.verbose_flag = True
#
# Parse the input Topology XML filename
#
if len(args) == 0:
print(f"Usage: {sys.argv[0]} [options] xml_filename")
return
elif len(args) == 1:
xml_filename = os.path.abspath(args[0])
else:
print("ERROR: Too many filenames, should only have one")
return
print("Processing packet file %s" % xml_filename)
set_build_roots(os.environ.get("BUILD_ROOT"))
packet_parser = TlmPacketParser(opt.verbose_flag, opt.dependency_file)
try:
packet_parser.gen_packet_file(xml_filename)
except TlmPacketParseValueError as e:
print("Packet XML parsing error: %s" % e)
sys.exit(-1)
except TlmPacketParseIOError as e:
print("Packet XML file error: %s" % e)
sys.exit(-1)
sys.exit(0)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,14 @@
#!/usr/bin/bash
script_dir=$(dirname $0)
# $0 = this script
# $1 = packet file
# $2 = deployment build cache (ex. Ref/build-fprime-automatic-native)
export PYTHONPATH=$script_dir/../src
export BUILD_ROOT=$script_dir/../../../:$2:$2/F-Prime
echo "BUILD_ROOT=$BUILD_ROOT"
# get python from the path
python3 $script_dir/tlm_packet_gen.py $1

View File

@ -0,0 +1,20 @@
####
# pathmaker.py:
#
# The purpose of this file is to setup the F prime path automatically as part of a run of F prime. This should prevent
# the user from needing to do the same thing, and allow for a pure-Python way of running F prime tools.
####
import os
import sys
def setup_fprime_autocoder_path():
"""
Sets up the system path to include F prime autocoder directories. This is useful for all AC scripts.
"""
FW_PACKAGE = os.path.join(
os.path.dirname(__file__), "..", "..", "..", "Fw", "Python", "src"
)
AC_PACKAGE = os.path.join(os.path.dirname(__file__), "..", "..", "src")
sys.path.append(FW_PACKAGE)
sys.path.append(AC_PACKAGE)

View File

@ -0,0 +1,8 @@
ChangeLog File for Autocoders
=============================
8 August 2013
This file is manually maintained and intended to capture changes
with the Autocoders module over time.
8 August 2013
Promoted changes for new design pattern generation upstream to ISF_Development.

View File

@ -0,0 +1,27 @@
codegen.py Autocoder Tool to support F1 code generation
=======================================================
(Last updated 8 August 2013)
The codegen.py code generation produces four types of ISF codes.
These are: Component, Ports, Serializable, and Topology construction.
Each type of code has its own unique custom XML syntax. To date
there is no schema for the XML so users should use the examples
under the test folder for guidance.
codegen.py required two external Python packages to function,
which are lxml and cheetah.
To install lxml go to http://lxml.de/installation.html for download and
install instructions. You might like to use the pip tool for installing
and a version is already in accurev and located at Autocoders/Python/utils/pip-1.2.1.
The other package you will need is the cheetah template language although I
think the yacgen.py will actually run without it. Really Cheetah is just
used to generate the template Python. I do not do the conversion at run-time
but precompile all templates and version control both templates and generated
python. Anyway, documentation on Cheetah is located at
http://www.cheetahtemplate.org/index.html and one can download the
software from this site as well. Cheetah is also in accurev and is
located at Autocoders/Python/utils/Cheetah-2.4.4.

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,3 @@
AMPCS schemas have been removed from the open source release.
If a JPL project would like to re-add them, see a member of the
F` team.

View File

@ -0,0 +1,11 @@
<?xml version="1.0" encoding="UTF-8"?>
<schema
xmlns="http://purl.oclc.org/dsdl/schematron">
<pattern id="ASYNC_PORT">
<rule context="component">
<assert test="(//component/@kind = 'active' and (count(//component/ports/port/@kind[. = 'async_input']) + count(//component/commands/command/@kind[. = 'async']) + count(//component/internal_interfaces/internal_interface) ) >= 1) or not (//component/@kind = 'active')">
Active components should have at least 1 port of kind async_input or internal_interface.
</assert>
</rule>
</pattern>
</schema>

View File

@ -0,0 +1,110 @@
<?xml version="1.0" encoding="UTF-8"?>
<grammar
xmlns="http://relaxng.org/ns/structure/1.0"
xmlns:a="http://relaxng.org/ns/compatibility/annotations/1.0"
datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
<include href="common_elements.rng"/>
<include href="common_types.rng"/>
<start>
<ref name="array_root_define"/>
</start>
<define name="array_root_define">
<element name="array">
<a:documentation>Component root tag.</a:documentation>
<!-- Required Attributes -->
<attribute name="name">
<a:documentation>Name of the array.</a:documentation>
<text/>
</attribute>
<!-- Optional attributes -->
<optional>
<attribute name="namespace">
<a:documentation>Namespace of the serializable object.</a:documentation>
<text/>
</attribute>
</optional>
<!-- Elements -->
<interleave>
<optional>
<!-- Optional Comments -->
<ref name="comment_define"/>
</optional>
<zeroOrMore>
<element name="include_header">
<a:documentation>Defines the header file of the interface.</a:documentation>
<data type="anyURI"/>
</element>
</zeroOrMore>
<zeroOrMore>
<element name="import_serializable_type">
<a:documentation>Imports serializable types.</a:documentation>
<data type="anyURI"/>
</element>
</zeroOrMore>
<zeroOrMore>
<element name="import_enum_type">
<a:documentation>Imports enum types.</a:documentation>
<data type="anyURI"/>
</element>
</zeroOrMore>
<zeroOrMore>
<element name="import_array_type">
<a:documentation>Import array XML files.</a:documentation>
<data type="anyURI"/>
</element>
</zeroOrMore>
<element name="format">
<a:documentation>Used to format data into readable content on the ground system software.
</a:documentation>
<text/>
</element>
<element name="type">
<optional>
<attribute name="size">
<a:documentation>Optional string size in addition to required array size.</a:documentation>
<data type="integer"/>
</attribute>
</optional>
<optional>
<attribute name="typeid">
<a:documentation>ID for the object. If not declared, autocoder generates one. Must be
unique across all array files.
</a:documentation>
<ref name="id_define"/>
</attribute>
</optional>
<a:documentation>Type of the array.</a:documentation>
<text/>
</element>
<element name="size">
<a:documentation>Size of the array.</a:documentation>
<data type="integer"/>
</element>
<element name="default">
<oneOrMore>
<element name="value">
<text/>
</element>
</oneOrMore>
</element>
</interleave>
</element>
</define>
</grammar>

View File

@ -0,0 +1,10 @@
<?xml version="1.0" encoding="UTF-8"?>
<schema
xmlns="http://purl.oclc.org/dsdl/schematron">
<pattern id="DEFAULT_VALUES">
<rule context="array">
<assert test="count(current()/default/value) = current()/size">Size greater than number of default values.
</assert>
</rule>
</pattern>
</schema>

View File

@ -0,0 +1,9 @@
<?xml version="1.0" encoding="UTF-8"?>
<schema
xmlns="http://purl.oclc.org/dsdl/schematron">
<pattern id="UNIQUE_ID_CHAN">
<rule context="channel">
<assert test="count(//channel/@id[. = current()/@id]) = 1">Channel ID's should be unique.</assert>
</rule>
</pattern>
</schema>

View File

@ -0,0 +1,112 @@
<?xml version="1.0" encoding="UTF-8"?>
<grammar
xmlns="http://relaxng.org/ns/structure/1.0"
xmlns:a="http://relaxng.org/ns/compatibility/annotations/1.0"
datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
<include href="common_elements.rng"/>
<include href="common_types.rng"/>
<start>
<ref name="channel_root_define"/>
</start>
<define name="channel_root_define">
<element name="telemetry">
<optional>
<attribute name="telemetry_base">
<a:documentation>Base at which IDs start from.</a:documentation>
<ref name="base_code_define"/>
</attribute>
</optional>
<oneOrMore>
<ref name="channel_definition"/>
</oneOrMore>
</element>
</define>
<define name="channel_definition">
<element name="channel">
<a:documentation>Definition for a telemetry channel.</a:documentation>
<!-- Required Attributes -->
<attribute name="id">
<a:documentation>ID for the telemetry channel.</a:documentation>
<ref name="id_define"/>
</attribute>
<attribute name="name">
<a:documentation>Name of the telemetry channel.</a:documentation>
<text/>
</attribute>
<!-- Optional Attributes -->
<optional>
<attribute name="update">
<a:documentation>Defines when the channel updates.</a:documentation>
<ref name="channel_update_define"/>
</attribute>
</optional>
<optional>
<attribute name="abbrev">
<a:documentation>Required for AMPCS dictionary generation.</a:documentation>
<text/>
</attribute>
</optional>
<optional>
<attribute name="format_string">
<a:documentation>Used to format data into readable content on the ground system software.
</a:documentation>
<text/>
</attribute>
</optional>
<optional>
<attribute name="high_yellow">
<data type="decimal"/>
</attribute>
</optional>
<optional>
<attribute name="high_red">
<data type="decimal"/>
</attribute>
</optional>
<optional>
<attribute name="high_orange">
<data type="decimal"/>
</attribute>
</optional>
<optional>
<attribute name="low_yellow">
<data type="decimal"/>
</attribute>
</optional>
<optional>
<attribute name="low_red">
<data type="decimal"/>
</attribute>
</optional>
<optional>
<attribute name="low_orange">
<data type="decimal"/>
</attribute>
</optional>
<interleave>
<ref name="type_size_choice_define"/>
<optional>
<ref name="comment_define"/>
</optional>
</interleave>
</element>
</define>
</grammar>

View File

@ -0,0 +1,10 @@
<?xml version="1.0" encoding="UTF-8"?>
<schema
xmlns="http://purl.oclc.org/dsdl/schematron">
<pattern id="UNIQUE_OPCODE_CMD">
<rule context="command">
<assert test="count(//command/@opcode[. = current()/@opcode]) = 1">Command opcodes should be unique.
</assert>
</rule>
</pattern>
</schema>

View File

@ -0,0 +1,82 @@
<?xml version="1.0" encoding="UTF-8"?>
<grammar
xmlns="http://relaxng.org/ns/structure/1.0"
xmlns:a="http://relaxng.org/ns/compatibility/annotations/1.0"
datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
<include href="common_elements.rng"/>
<include href="common_types.rng"/>
<start>
<ref name="command_root_define"/>
</start>
<define name="command_root_define">
<element name="commands">
<optional>
<attribute name="opcode_base">
<a:documentation>Base at which the opcodes start from.</a:documentation>
<ref name="base_code_define"/>
</attribute>
</optional>
<oneOrMore>
<element name="command">
<!-- Required Attributes -->
<attribute name="kind">
<a:documentation>Command kind.</a:documentation>
<ref name="command_kind_define"/>
</attribute>
<attribute name="opcode">
<a:documentation>Command opcode.</a:documentation>
<ref name="id_define"/>
</attribute>
<attribute name="mnemonic">
<a:documentation>Command mnemonic.</a:documentation>
<text/>
</attribute>
<!-- Optional Attributes -->
<optional>
<attribute name="priority">
<a:documentation>Priority of the command.</a:documentation>
<data type="integer"/>
</attribute>
</optional>
<optional>
<attribute name="full">
<a:documentation>Describes what to do with incoming items if full.</a:documentation>
<ref name="full_items_define"/>
</attribute>
</optional>
<a:documentation>Defines optional comments and arguments within commands.</a:documentation>
<interleave>
<zeroOrMore>
<ref name="comment_define"/>
</zeroOrMore>
<optional>
<ref name="args_define"/>
</optional>
</interleave>
</element>
</oneOrMore>
</element>
</define>
<define name="args_define">
<element name="args">
<a:documentation>Command arguments.</a:documentation>
<zeroOrMore>
<ref name="external_arg_define"/>
</zeroOrMore>
</element>
</define>
</grammar>

View File

@ -0,0 +1,370 @@
<?xml version="1.0" encoding="UTF-8"?>
<grammar xmlns="http://relaxng.org/ns/structure/1.0" xmlns:a="http://relaxng.org/ns/compatibility/annotations/1.0"
datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
<!-- Definitions for items that can only have specific values -->
<define name="full_items_define">
<a:documentation>Valid values for the full tag.</a:documentation>
<choice>
<value>drop</value>
<value>assert</value>
<value>block</value>
</choice>
</define>
<define name="pass_by_define">
<a:documentation>Defines how the variable is being passed.</a:documentation>
<choice>
<value>reference</value>
<value>pointer</value>
</choice>
</define>
<define name="component_role_define">
<a:documentation>Choice for component roles.</a:documentation>
<choice>
<value>LogEvent</value>
<value>LogTextEvent</value>
<value>TimeGet</value>
<value>ParamSet</value>
<value>ParamGet</value>
<value>Telemetry</value>
<value>CmdRegistration</value>
<value>Cmd</value>
<value>CmdResponse</value>
</choice>
</define>
<define name="channel_update_define">
<a:documentation>Choice between always and on_change. This is used in the channel 'update' tag.
</a:documentation>
<choice>
<value>always</value>
<value>on_change</value>
</choice>
</define>
<define name="severity_define">
<a:documentation>Set of valid severity values. This is used for an event 'severity' tag.</a:documentation>
<choice>
<value>COMMAND</value>
<value>ACTIVITY_LO</value>
<value>ACTIVITY_HI</value>
<value>WARNING_LO</value>
<value>WARNING_HI</value>
<value>DIAGNOSTIC</value>
<value>FATAL</value>
</choice>
</define>
<define name="command_kind_define">
<a:documentation>Choice between different command kinds.</a:documentation>
<choice>
<value>async</value>
<value>sync</value>
<value>guarded</value>
</choice>
</define>
<define name="component_types_define">
<a:documentation>Choice between active, passive, or queued.</a:documentation>
<choice>
<value>active</value>
<value>passive</value>
<value>queued</value>
</choice>
</define>
<define name="port_types_define">
<a:documentation>Choice between different port types.</a:documentation>
<choice>
<value>input</value>
<value>sync_input</value>
<value>guarded_input</value>
<value>async_input</value>
<value>model_input</value>
<value>output</value>
</choice>
</define>
<!-- Data types that require regular expressions -->
<define name="id_or_system_var_define">
<a:documentation>Data types for items that can either be numbers or references to system variables that have
numbers.
</a:documentation>
<choice>
<ref name="system_var_define"/>
<ref name="id_define"/>
</choice>
</define>
<define name="base_code_define">
<a:documentation>Made for base codes, ie 0x100,0x200</a:documentation>
<choice>
<ref name="system_var_define"/>
<data type="string">
<param name="pattern">(((0?x[0-9A-Fa-f]+)|\d+)(,?))+</param>
</data>
</choice>
</define>
<define name="system_var_define">
<a:documentation>Data type for items that are system variables.</a:documentation>
<data type="string">
<param name="pattern">$[\w|_|\-]+</param>
</data>
</define>
<define name="id_define">
<a:documentation>Defines an ID data type. Acceptable values formats include "10" , "0xA" , "xA".
</a:documentation>
<data type="string">
<param name="pattern">((0?x[0-9A-Fa-f]+)|\d+)</param>
</data>
</define>
<!-- Repeated element defines -->
<define name="comment_define">
<element name="comment">
<a:documentation>Simple comment tag with no attributes.</a:documentation>
<text/>
</element>
</define>
<define name="enum_define">
<element name="enum">
<attribute name="name">
<a:documentation>Enum Name.</a:documentation>
<text/>
</attribute>
<oneOrMore>
<element name='item'>
<attribute name="name">
<a:documentation>Name of the enum item.</a:documentation>
<text/>
</attribute>
<optional>
<attribute name="value">
<a:documentation>The value being sent through the enum item.</a:documentation>
<text/>
</attribute>
</optional>
<optional>
<attribute name="comment">
<a:documentation>Comment about the enum item.</a:documentation>
<text/>
</attribute>
</optional>
</element>
</oneOrMore>
</element>
</define>
<define name="arg_define">
<a:documentation>Arguments with the pass by attribute.</a:documentation>
<element name="arg">
<!-- Required Attributes -->
<attribute name="name">
<a:documentation>Name of the argument.</a:documentation>
<text/>
</attribute>
<optional>
<attribute name="pass_by">
<a:documentation>Defines how the arguments are passed.</a:documentation>
<ref name="pass_by_define"/>
</attribute>
</optional>
<optional>
<attribute name="comment">
<a:documentation>Comments about the argument.</a:documentation>
<data type="string"/>
</attribute>
</optional>
<!-- The interleave is used to mix together Attribute typed paired with an enum element if type = ENUM and a comment block -->
<interleave>
<ref name="type_size_choice_define"/>
<optional>
<ref name="comment_define"/>
</optional>
</interleave>
</element>
</define>
<define name="return_define">
<a:documentation>Return with the pass by attribute</a:documentation>
<element name="return">
<!-- Required Attributes -->
<optional>
<attribute name="name">
<a:documentation>Name of the argument.</a:documentation>
<text/>
</attribute>
</optional>
<optional>
<attribute name="pass_by">
<a:documentation>Defines how the arguments are passed.</a:documentation>
<ref name="pass_by_define"/>
</attribute>
</optional>
<optional>
<attribute name="comment">
<a:documentation>Comments about the argument.</a:documentation>
<data type="string"/>
</attribute>
</optional>
<!-- The interleave is used to mix together Attribute typed paired with an enum element if type = ENUM and a comment block -->
<interleave>
<ref name="type_size_choice_define"/>
<optional>
<ref name="comment_define"/>
</optional>
</interleave>
</element>
</define>
<define name="external_arg_define">
<a:documentation>Arguments without the pass by attributes. Used for events, commands, and telemetry.
</a:documentation>
<element name="arg">
<!-- Required Attributes -->
<attribute name="name">
<a:documentation>Name of the argument.</a:documentation>
<text/>
</attribute>
<!-- Optional Attributes -->
<optional>
<attribute name="comment">
<a:documentation>Comments about the argument.</a:documentation>
<data type="string"/>
</attribute>
</optional>
<!-- The interleave is used to mix together Attribute typed paired with an enum element if type = ENUM and a comment block -->
<interleave>
<ref name="type_size_choice_define"/>
<optional>
<ref name="comment_define"/>
</optional>
</interleave>
</element>
</define>
<define name="type_size_choice_define">
<!-- This is a common choice within many elements. -->
<choice>
<!-- Choice one: Type = string or buffer and size is also an attribute -->
<group>
<choice>
<!-- Choice between 'type' and 'data_type'. This is to preserve compatibility with the telemetry channels -->
<attribute name="type">
<a:documentation>The type of data being passed through the argument.</a:documentation>
<choice>
<value>string</value>
<value>buffer</value>
</choice>
</attribute>
<attribute name="data_type">
<a:documentation>The type of data being passed through the argument.</a:documentation>
<choice>
<value>string</value>
<value>buffer</value>
</choice>
</attribute>
</choice>
<attribute name="size">
<a:documentation>The size of the argument.</a:documentation>
<data type="nonNegativeInteger"/>
</attribute>
<optional>
<attribute name="array_size">
<a:documentation>The length of the array.</a:documentation>
<data type="nonNegativeInteger"/>
</attribute>
</optional>
</group>
<!--Choice two: Type = ENUM and Enum element inside block -->
<group>
<choice>
<!-- Choice between 'type' and 'data_type'. This is to preserve compatibility with the telemetry channels -->
<attribute name="type">
<a:documentation>The type of data being passed through the argument.</a:documentation>
<value>ENUM</value>
</attribute>
<attribute name="data_type">
<a:documentation>The type of data being passed through the argument.</a:documentation>
<value>ENUM</value>
</attribute>
</choice>
<ref name="enum_define"/>
</group>
<!-- Choice three: Type != ENUM and Type != string and nothing inside block -->
<group>
<choice>
<!-- Choice between 'type' and 'data_type'. This is to preserve compatibility with the telemetry channels -->
<attribute name="type">
<a:documentation>The type of data being passed through the argument.</a:documentation>
<!-- Any string BUT ENUM -->
<data type="string">
<except>
<choice>
<value>ENUM</value>
<value>string</value>
</choice>
</except>
</data>
</attribute>
<attribute name="data_type">
<a:documentation>The type of data being passed through the argument.</a:documentation>
<!-- Any string BUT ENUM -->
<data type="string">
<except>
<choice>
<value>ENUM</value>
<value>string</value>
</choice>
</except>
</data>
</attribute>
</choice>
<optional>
<attribute name="array_size">
<a:documentation>The length of the array.</a:documentation>
<data type="nonNegativeInteger"/>
</attribute>
</optional>
</group>
</choice>
</define>
</grammar>

View File

@ -0,0 +1,189 @@
<?xml version="1.0" encoding="UTF-8"?>
<grammar
xmlns="http://relaxng.org/ns/structure/1.0"
xmlns:a="http://relaxng.org/ns/compatibility/annotations/1.0"
datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
<define name="positive_integer_define">
<a:documentation>Positive, non-zero, whole numbers.</a:documentation>
<data type="integer">
<param name="minInclusive">1</param>
</data>
</define>
<!-- Raw C++ Type Defines -->
<define name="int8_t_define">
<a:documentation>Signed 8 bit integer.</a:documentation>
<data type="int">
<param name="minInclusive">-128</param>
<param name="maxInclusive">127</param>
</data>
</define>
<define name="uint8_t_define">
<a:documentation>Unsigned 8 bit integer</a:documentation>
<data type="unsignedByte">
<param name="minInclusive">0</param>
<param name="maxInclusive">255</param>
</data>
</define>
<define name="int16_t_define">
<a:documentation>Signed 16 bit integer.</a:documentation>
<data type="int">
<param name="minInclusive">-32768</param>
<param name="maxInclusive">32767</param>
</data>
</define>
<define name="uint16_t_define">
<a:documentation>Unsigned 16 bit integer</a:documentation>
<data type="int">
<param name="minInclusive">0</param>
<param name="maxInclusive">65535</param>
</data>
</define>
<define name="int32_t_define">
<a:documentation>Signed 32 bit integer.</a:documentation>
<data type="integer">
<param name="minInclusive">-2147483648</param>
<param name="maxInclusive">2147483647</param>
</data>
</define>
<define name="uint32_t_define">
<a:documentation>Unsigned 32 bit integer</a:documentation>
<data type="integer">
<param name="minInclusive">0</param>
<param name="maxInclusive">4294967295</param>
</data>
</define>
<define name="int64_t_define">
<a:documentation>Signed 64 bit integer.</a:documentation>
<data type="integer">
<param name="minInclusive">-9223372036854775808</param>
<param name="maxInclusive">9223372036854775807</param>
</data>
</define>
<define name="uint64_t_define">
<a:documentation>Unsigned 64 bit integer</a:documentation>
<data type="integer">
<param name="minInclusive">0</param>
<param name="maxInclusive">18446744073709551615</param>
</data>
</define>
<!-- User C++ Type defines (Found from BasicTypes.hpp) -->
<define name="not_user_cpp_type_define">
<a:documentation>Ensures data is not of the names of any other user defined C++ name.</a:documentation>
<data type="string">
<except>
<choice>
<value>int32_t</value>
<value>uint32_t</value>
<value>I8</value>
<value>U8</value>
<value>I16</value>
<value>U16</value>
<value>I32</value>
<value>U32</value>
<value>I64</value>
<value>U64</value>
<value>F32</value>
<value>F64</value>
<value>NATIVE_INT_TYPE</value>
<value>NATIVE_UINT_TYPE</value>
<value>string</value>
</choice>
</except>
</data>
</define>
<define name="NATIVE_INT_TYPE_define">
<a:documentation>native integer type declaration</a:documentation>
<ref name="int32_t_define">
</ref>
</define>
<define name="NATIVE_UINT_TYPE_define">
<a:documentation>native unsigned integer type declaration</a:documentation>
<ref name="uint32_t_define">
</ref>
</define>
<define name="I8_define">
<a:documentation>8-bit signed integer</a:documentation>
<ref name="int8_t_define">
</ref>
</define>
<define name="U8_define">
<a:documentation>8-bit unsigned integer</a:documentation>
<ref name="uint8_t_define">
</ref>
</define>
<define name="BYTE_define">
<a:documentation>byte type</a:documentation>
<ref name="U8_define">
</ref>
</define>
<define name="I16_define">
<ref name="int16_t_define">
<a:documentation>16-bit signed integer</a:documentation>
</ref>
</define>
<define name="U16_define">
<a:documentation>16-bit unsigned integer</a:documentation>
<ref name="uint16_t_define">
</ref>
</define>
<define name="I32_define">
<a:documentation>32-bit signed integer</a:documentation>
<ref name="int32_t_define">
</ref>
</define>
<define name="U32_define">
<a:documentation>16-bit unsigned integer</a:documentation>
<ref name="uint32_t_define">
</ref>
</define>
<define name="I64_define">
<a:documentation>64-bit unsigned integer</a:documentation>
<ref name="int64_t_define">
</ref>
</define>
<define name="U64_define">
<a:documentation>64-bit unsigned integer</a:documentation>
<ref name="uint64_t_define">
</ref>
</define>
<define name="F32_define">
<a:documentation>32 bit float</a:documentation>
<!--
Use text instead of "float" here
"Float" does not accept the C++ syntax for F32 literal values, e.g., 1.0f
<data type="float">
</data>
-->
<text/>
</define>
<define name="F64_define">
<a:documentation>64 bit float</a:documentation>
<data type="double">
</data>
</define>
</grammar>

View File

@ -0,0 +1,25 @@
<?xml version="1.0" encoding="UTF-8"?>
<schema
xmlns="http://purl.oclc.org/dsdl/schematron">
<pattern id="UNIQUE_OP_COMP_CMD">
<rule context="command">
<assert test="count(//command/@opcode[. = current()/@opcode]) = 1">Command opcodes should be unique.
</assert>
</rule>
</pattern>
<pattern id="UNIQUE_ID_COMP_PARAM">
<rule context="parameter">
<assert test="count(//parameter/@id[. = current()/@id]) = 1">Parameter ID's should be unique.</assert>
</rule>
</pattern>
<pattern id="UNIQUE_ID_COMP_EVR">
<rule context="event">
<assert test="count(//event/@id[. = current()/@id]) = 1">Event ID's should be unique.</assert>
</rule>
</pattern>
<pattern id="UNIQUE_ID_COMP_CHAN">
<rule context="channel">
<assert test="count(//channel/@id[. = current()/@id]) = 1">Channel ID's should be unique.</assert>
</rule>
</pattern>
</schema>

View File

@ -0,0 +1,218 @@
<?xml version="1.0" encoding="UTF-8"?>
<grammar
xmlns="http://relaxng.org/ns/structure/1.0"
xmlns:a="http://relaxng.org/ns/compatibility/annotations/1.0"
xmlns:elements="common_elements.rng"
datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
<include href="common_elements.rng"/>
<include href="common_types.rng"/>
<start>
<element name="component">
<a:documentation>Component root tag.</a:documentation>
<ref name="component_define"/>
</element>
</start>
<define name="component_define">
<!-- Required attributes -->
<attribute name="name">
<a:documentation>Name of the component.</a:documentation>
<text/>
</attribute>
<attribute name="kind">
<a:documentation>Choice between active, passive, and queued.</a:documentation>
<ref name="component_types_define"/>
</attribute>
<!-- Optional attributes -->
<optional>
<attribute name="namespace">
<a:documentation>The namespace in which the component is located in.</a:documentation>
<text/>
</attribute>
</optional>
<optional>
<attribute name="modeler">
<data type="boolean"/>
</attribute>
</optional>
<interleave>
<!-- Import external XML files. -->
<zeroOrMore>
<ref name="import_port_define"/>
</zeroOrMore>
<zeroOrMore>
<ref name="import_dict_define"/>
</zeroOrMore>
<zeroOrMore>
<ref name="import_header_define"/>
</zeroOrMore>
<zeroOrMore>
<ref name="import_serializable_define"/>
</zeroOrMore>
<zeroOrMore>
<ref name="import_enum_define"/>
</zeroOrMore>
<zeroOrMore>
<ref name="import_array_define"/>
</zeroOrMore>
<!-- Optional elements -->
<optional>
<ref name="ports_define"/>
</optional>
<optional>
<ref name="comment_define"/>
</optional>
<!-- These elements can be imported from their own XML files -->
<optional>
<externalRef href="channel_schema.rng"/>
</optional>
<optional>
<externalRef href="event_schema.rng"/>
</optional>
<optional>
<externalRef href="command_schema.rng"/>
</optional>
<optional>
<externalRef href="internal_interface_schema.rng"/>
</optional>
<optional>
<externalRef href="parameters_schema.rng"/>
</optional>
</interleave>
</define>
<define name="ports_define">
<element name="ports">
<a:documentation>A collection of ports.</a:documentation>
<oneOrMore>
<ref name="port_define"/>
</oneOrMore>
</element>
</define>
<define name="port_define">
<element name="port">
<a:documentation>Element that specifies eternal interfaces that can connect and be connected to.
</a:documentation>
<!-- Required Attributes -->
<attribute name="name">
<a:documentation>Name of the port.</a:documentation>
<text/>
</attribute>
<attribute name="data_type">
<a:documentation>Type of data that is being accessed/sent from the port.</a:documentation>
<text/>
</attribute>
<attribute name="kind">
<a:documentation>Defines if port is an input or an output port.</a:documentation>
<ref name="port_types_define"/>
</attribute>
<!-- Optional Attributes -->
<optional>
<attribute name="max_number">
<a:documentation>Defines how many connections can be established to this port.</a:documentation>
<ref name="id_or_system_var_define"/>
</attribute>
</optional>
<optional>
<attribute name="role">
<a:documentation>Specifies what role this port plays or what this port is connected to.
</a:documentation>
<ref name="component_role_define"/>
</attribute>
</optional>
<optional>
<attribute name="priority">
<a:documentation>Priority of port.</a:documentation>
<data type="integer"/>
</attribute>
</optional>
<optional>
<attribute name="full">
<a:documentation>Describes what to do with incoming items if full.</a:documentation>
<elements:ref name="full_items_define"></elements:ref>
</attribute>
</optional>
<!-- Optional Element -->
<optional>
<a:documentation>Optional element that can be inside the port tags.</a:documentation>
<ref name="comment_define"/>
</optional>
</element>
</define>
<define name="import_port_define">
<element name="import_port_type">
<a:documentation>Path to port XML definition.</a:documentation>
<data type="anyURI"/>
</element>
</define>
<define name="import_dict_define">
<element name="import_dictionary">
<a:documentation>Path to events,commands,or telemetry XML files.</a:documentation>
<data type="anyURI"/>
</element>
</define>
<define name="import_header_define">
<element name="include_header">
<a:documentation>Path to header file.</a:documentation>
<data type="anyURI"/>
</element>
</define>
<define name="import_serializable_define">
<element name="import_serializable_type">
<a:documentation>Path to serializable types</a:documentation>
<data type="anyURI"/>
</element>
</define>
<define name="import_enum_define">
<element name="import_enum_type">
<a:documentation>Path to enum types</a:documentation>
<data type="anyURI"/>
</element>
</define>
<define name="import_array_define">
<element name="import_array_type">
<a:documentation>Import array XML files.</a:documentation>
<data type="anyURI"/>
</element>
</define>
</grammar>

View File

@ -0,0 +1,3 @@
AMPCS schemas have been removed from the open source release.
If a JPL project would like to re-add them, see a member of the
F` team.

Binary file not shown.

View File

@ -0,0 +1,86 @@
<?xml version="1.0" encoding="UTF-8"?>
<grammar
xmlns="http://relaxng.org/ns/structure/1.0"
xmlns:a="http://relaxng.org/ns/compatibility/annotations/1.0"
datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
<include href="common_elements.rng"/>
<include href="common_types.rng"/>
<start>
<ref name="enum_root_define"/>
</start>
<define name="enum_root_define">
<element name="enum">
<a:documentation>Component root tag.</a:documentation>
<!-- Required Attributes -->
<attribute name="name">
<a:documentation>Name of the enum.</a:documentation>
<text/>
</attribute>
<!-- Optional attributes -->
<optional>
<attribute name="namespace">
<a:documentation>Namespace of the serializable object.</a:documentation>
<text/>
</attribute>
</optional>
<optional>
<attribute name = "default">
<a:documentation>Default value of the enum.</a:documentation>
<text/>
</attribute>
</optional>
<optional>
<attribute name = "serialize_type">
<a:documentation>Serialization type of the enum.</a:documentation>
<text/>
</attribute>
</optional>
<!-- Elements -->
<interleave>
<optional>
<!-- Optional Comments -->
<ref name="comment_define"/>
</optional>
<oneOrMore>
<ref name="item_definition"/>
</oneOrMore>
</interleave>
</element>
</define>
<define name="item_definition">
<element name="item">
<a:documentation>Definition for a telemetry channel.</a:documentation>
<!-- Required Attributes -->
<attribute name="name">
<a:documentation>Name of the item.</a:documentation>
<text/>
</attribute>
<!-- Optional Attributes -->
<optional>
<attribute name="value">
<data type="integer"/>
</attribute>
</optional>
<optional>
<attribute name="comment">
<text/>
</attribute>
</optional>
</element>
</define>
</grammar>

View File

@ -0,0 +1,16 @@
<?xml version="1.0" encoding="UTF-8"?>
<schema
xmlns="http://purl.oclc.org/dsdl/schematron">
<pattern id="VALUE_EXISTS">
<rule context="item">
<assert test="count(//item[@value]) = count(//item) or count(//item[@value]) = 0">Either all enum items
should have a value or none should
</assert>
</rule>
</pattern>
<pattern id="VALUE_UNIQUE">
<rule context="item">
<assert test="count(//item/@value[. = current()/@value]) = 1">Enum item values should be unique</assert>
</rule>
</pattern>
</schema>

View File

@ -0,0 +1,9 @@
<?xml version="1.0" encoding="UTF-8"?>
<schema
xmlns="http://purl.oclc.org/dsdl/schematron">
<pattern id="UNIQUE_ID_EVR">
<rule context="event">
<assert test="count(//event/@id[. = current()/@id]) = 1">Event ID's should be unique.</assert>
</rule>
</pattern>
</schema>

View File

@ -0,0 +1,86 @@
<?xml version="1.0" encoding="UTF-8"?>
<grammar
xmlns="http://relaxng.org/ns/structure/1.0"
xmlns:a="http://relaxng.org/ns/compatibility/annotations/1.0"
datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
<include href="common_elements.rng"/>
<include href="common_types.rng"/>
<start>
<ref name="event_root_define"/>
</start>
<define name="event_root_define">
<element name="events">
<a:documentation>Defines different events for a component.</a:documentation>
<optional>
<attribute name="event_base">
<a:documentation>Base at which ids start from.</a:documentation>
<ref name="base_code_define"/>
</attribute>
</optional>
<oneOrMore>
<ref name="event_define"/>
</oneOrMore>
</element>
</define>
<define name="event_define">
<element name="event">
<a:documentation>Defines a specific event for a component.</a:documentation>
<!-- Required Attributes -->
<attribute name="name">
<a:documentation>Name of the event.</a:documentation>
<text/>
</attribute>
<attribute name="id">
<a:documentation>Event ID.</a:documentation>
<ref name="id_define"/>
</attribute>
<attribute name="severity">
<a:documentation>Severity of event.</a:documentation>
<ref name="severity_define"/>
</attribute>
<attribute name="format_string">
<a:documentation>Output string that will be shown in ground system software.</a:documentation>
<text/>
</attribute>
<!-- Optional Attributes -->
<optional>
<attribute name="throttle">
<a:documentation>Determines how many of the events are generated before the program stops them.
</a:documentation>
<data type="nonNegativeInteger"/>
</attribute>
</optional>
<interleave>
<optional>
<ref name="comment_define"/>
</optional>
<optional>
<ref name="args_define"/>
</optional>
</interleave>
</element>
</define>
<define name="args_define">
<element name="args">
<zeroOrMore>
<ref name="external_arg_define"/>
</zeroOrMore>
</element>
</define>
</grammar>

View File

@ -0,0 +1,108 @@
<?xml version="1.0" encoding="UTF-8"?>
<grammar
xmlns="http://relaxng.org/ns/structure/1.0"
xmlns:a="http://relaxng.org/ns/compatibility/annotations/1.0"
datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
<!-- This is a common file for both interface_schema and internal_interface_schema. These two were originally merged into this file, but had to be split as a workaround to keep the same definitions but not validate component files that had the interface tag in it. -->
<include href="common_elements.rng"/>
<start>
<ref name="interface_root_define"/>
</start>
<define name="interface_root_define">
<a:documentation>Used for interface files.</a:documentation>
<choice>
<!--Choice between interface or port root tag.-->
<element name="interface">
<a:documentation>Allows for one interface.</a:documentation>
<ref name="interface_define"/>
</element>
<element name="port">
<a:documentation>Allows for one interface.</a:documentation>
<ref name="interface_define"/>
</element>
</choice>
</define>
<define name="interface_define">
<interleave>
<!-- Required Attributes -->
<attribute name="name">
<a:documentation>Interface name.</a:documentation>
<text/>
</attribute>
<!-- Optional Attributes -->
<optional>
<attribute name="priority">
<data type="integer"/>
</attribute>
</optional>
<optional>
<attribute name="namespace">
<a:documentation>Namespace in which the interface is in.</a:documentation>
<text/>
</attribute>
</optional>
<!-- Elements -->
<zeroOrMore>
<element name="include_header">
<a:documentation>Defines the header file of the interface.</a:documentation>
<data type="anyURI"/>
</element>
</zeroOrMore>
<zeroOrMore>
<element name="import_serializable_type">
<a:documentation>Imports serializable types.</a:documentation>
<data type="anyURI"/>
</element>
</zeroOrMore>
<zeroOrMore>
<element name="import_enum_type">
<a:documentation>Imports enum types.</a:documentation>
<data type="anyURI"/>
</element>
</zeroOrMore>
<zeroOrMore>
<element name="import_array_type">
<a:documentation>Import array XML files.</a:documentation>
<data type="anyURI"/>
</element>
</zeroOrMore>
<zeroOrMore>
<ref name="args_define"/>
</zeroOrMore>
<optional>
<ref name="return_define"/>
</optional>
<optional>
<ref name="comment_define"/>
</optional>
</interleave>
</define>
<define name="args_define">
<element name="args">
<a:documentation>One or more arguments.</a:documentation>
<zeroOrMore>
<ref name="arg_define"/>
</zeroOrMore>
</element>
</define>
</grammar>

View File

@ -0,0 +1,78 @@
<?xml version="1.0" encoding="UTF-8"?>
<grammar
xmlns="http://relaxng.org/ns/structure/1.0"
xmlns:a="http://relaxng.org/ns/compatibility/annotations/1.0"
datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
<!-- This is a common file for both interface_schema and internal_interface_schema. These two were originally merged into this file, but had to be split as a workaround to keep the same definitions but not validate component files that had the interface tag in it. -->
<include href="common_elements.rng"/>
<start>
<ref name="internal_interface_root_define"/>
</start>
<define name="internal_interface_root_define">
<a:documentation>Used for interfaces within components.</a:documentation>
<element name="internal_interfaces">
<a:documentation>Allows for multiple interfaces.</a:documentation>
<oneOrMore>
<element name="internal_interface">
<ref name="interface_define"/>
</element>
</oneOrMore>
</element>
</define>
<define name="interface_define">
<interleave>
<!-- Required Attributes -->
<attribute name="name">
<a:documentation>Interface name.</a:documentation>
<text/>
</attribute>
<!-- Optional Attributes -->
<optional>
<a:documentation>Describes what to do if items are full.</a:documentation>
<attribute name="full">
<ref name="full_items_define"/>
</attribute>
</optional>
<optional>
<attribute name="priority">
<data type="integer"/>
</attribute>
</optional>
<!-- Elements -->
<zeroOrMore>
<element name="include_header">
<a:documentation>Defines the header file of the interface.</a:documentation>
<data type="anyURI"/>
</element>
</zeroOrMore>
<zeroOrMore>
<ref name="args_define"/>
</zeroOrMore>
<optional>
<ref name="comment_define"/>
</optional>
</interleave>
</define>
<define name="args_define">
<element name="args">
<a:documentation>One or more arguments.</a:documentation>
<zeroOrMore>
<ref name="arg_define"/>
</zeroOrMore>
</element>
</define>
</grammar>

View File

@ -0,0 +1,9 @@
<?xml version="1.0" encoding="UTF-8"?>
<schema
xmlns="http://purl.oclc.org/dsdl/schematron">
<pattern id="UNIQUE_ID_PARAM">
<rule context="parameter">
<assert test="count(//parameter/@id[. = current()/@id]) = 1">Channel ID's should be unique.</assert>
</rule>
</pattern>
</schema>

View File

@ -0,0 +1,265 @@
<?xml version="1.0" encoding="UTF-8"?>
<grammar
xmlns="http://relaxng.org/ns/structure/1.0"
xmlns:a="http://relaxng.org/ns/compatibility/annotations/1.0"
datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
<include href="common_elements.rng"/>
<include href="common_types.rng"/>
<start>
<ref name="parameters_root_define"/>
</start>
<define name="parameters_root_define">
<element name="parameters">
<!-- Attributes -->
<optional>
<attribute name="parameter_base">
<ref name="base_code_define"/>
</attribute>
</optional>
<optional>
<attribute name="opcode_base">
<ref name="base_code_define"/>
</attribute>
</optional>
<!-- Elements -->
<oneOrMore>
<ref name="parameter_define"/>
</oneOrMore>
</element>
</define>
<define name="parameter_define">
<element name="parameter">
<a:documentation>Parameter definition.</a:documentation>
<!-- Attributes -->
<attribute name="id">
<a:documentation>ID of the attribute.</a:documentation>
<ref name="id_define"/>
</attribute>
<attribute name="set_opcode">
<a:documentation>Opcode for setting the parameter.</a:documentation>
<ref name="id_define"/>
</attribute>
<attribute name="save_opcode">
<a:documentation>Opcode for saving the parameter.</a:documentation>
<ref name="id_define"/>
</attribute>
<attribute name="name">
<a:documentation>Parameter name</a:documentation>
<text/>
</attribute>
<ref name="data_type_and_default_define"/>
<!-- Elements -->
<optional>
<ref name="comment_define"/>
</optional>
</element>
</define>
<define name="data_type_and_default_define">
<!-- If the data_type attribute isn't one that is explicitly specified, the default case will make it pass. -->
<a:documentation>Makes attribute pair choices to match data type with default value.</a:documentation>
<choice>
<group>
<a:documentation>Default pair.</a:documentation>
<attribute name="data_type">
<ref name="not_user_cpp_type_define"/>
</attribute>
<optional>
<attribute name="default">
<text/>
</attribute>
</optional>
</group>
<group>
<a:documentation>Enum pair.</a:documentation>
<attribute name="data_type">
<value>ENUM</value>
</attribute>
<optional>
<attribute name="default">
</attribute>
</optional>
<optional>
<ref name="enum_define"/>
</optional>
</group>
<group>
<a:documentation>String pair.</a:documentation>
<attribute name="data_type">
<value>string</value>
</attribute>
<optional>
<attribute name="default">
<data type="string"/>
</attribute>
</optional>
<attribute name="size">
<ref name="positive_integer_define"/>
</attribute>
</group>
<group>
<a:documentation>I8 pair.</a:documentation>
<attribute name="data_type">
<value>I8</value>
</attribute>
<optional>
<attribute name="default">
<ref name="I8_define"/>
</attribute>
</optional>
</group>
<group>
<a:documentation>U8 pair.</a:documentation>
<attribute name="data_type">
<value>U8</value>
</attribute>
<optional>
<attribute name="default">
<ref name="U8_define"/>
</attribute>
</optional>
</group>
<group>
<a:documentation>I16 pair.</a:documentation>
<attribute name="data_type">
<value>I16</value>
</attribute>
<optional>
<attribute name="default">
<ref name="I16_define"/>
</attribute>
</optional>
</group>
<group>
<a:documentation>U16 pair.</a:documentation>
<attribute name="data_type">
<value>U16</value>
</attribute>
<optional>
<attribute name="default">
<ref name="U16_define"/>
</attribute>
</optional>
</group>
<group>
<a:documentation>I32 pair.</a:documentation>
<attribute name="data_type">
<value>I32</value>
</attribute>
<optional>
<attribute name="default">
<ref name="I32_define"/>
</attribute>
</optional>
</group>
<group>
<a:documentation>U32 pair.</a:documentation>
<attribute name="data_type">
<value>U32</value>
</attribute>
<optional>
<attribute name="default">
<ref name="U32_define"/>
</attribute>
</optional>
</group>
<group>
<a:documentation>I64 pair.</a:documentation>
<attribute name="data_type">
<value>I64</value>
</attribute>
<optional>
<attribute name="default">
<ref name="I64_define"/>
</attribute>
</optional>
</group>
<group>
<a:documentation>U64 pair.</a:documentation>
<attribute name="data_type">
<value>U64</value>
</attribute>
<optional>
<attribute name="default">
<ref name="U64_define"/>
</attribute>
</optional>
</group>
<group>
<a:documentation>F32 pair.</a:documentation>
<attribute name="data_type">
<value>F32</value>
</attribute>
<optional>
<attribute name="default">
<ref name="F32_define"/>
</attribute>
</optional>
</group>
<group>
<a:documentation>F64 pair.</a:documentation>
<attribute name="data_type">
<value>F64</value>
</attribute>
<optional>
<attribute name="default">
<ref name="F64_define"/>
</attribute>
</optional>
</group>
<group>
<a:documentation>NATIVE_INT_TYPE pair.</a:documentation>
<attribute name="data_type">
<value>NATIVE_INT_TYPE</value>
</attribute>
<optional>
<attribute name="default">
<ref name="NATIVE_INT_TYPE_define"/>
</attribute>
</optional>
</group>
<group>
<a:documentation>NATIVE_UINT_TYPE pair.</a:documentation>
<attribute name="data_type">
<value>NATIVE_UINT_TYPE</value>
</attribute>
<optional>
<attribute name="default">
<ref name="NATIVE_UINT_TYPE_define"/>
</attribute>
</optional>
</group>
</choice>
</define>
</grammar>

View File

@ -0,0 +1,132 @@
<?xml version="1.0" encoding="UTF-8"?>
<grammar
xmlns="http://relaxng.org/ns/structure/1.0"
xmlns:a="http://relaxng.org/ns/compatibility/annotations/1.0"
datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
<include href="common_elements.rng"/>
<start>
<element name="serializable">
<!-- Required attributes -->
<attribute name="name">
<a:documentation>Name of the serializable object.</a:documentation>
<text/>
</attribute>
<!-- Optional attributes -->
<optional>
<attribute name="namespace">
<a:documentation>Namespace of the serializable object.</a:documentation>
<text/>
</attribute>
</optional>
<optional>
<attribute name="typeid">
<a:documentation>ID for the object. If not declared, the autocoder generates one. Must be unique
across all serializable files.
</a:documentation>
<ref name="id_define"/>
</attribute>
</optional>
<interleave>
<!-- Required elements -->
<ref name="members_define"/>
<!-- Optional elements -->
<zeroOrMore>
<element name="import_serializable_type">
<a:documentation>Import more serializable objects.</a:documentation>
<data type="anyURI"/>
</element>
</zeroOrMore>
<zeroOrMore>
<element name="include_header">
<a:documentation>Import header files.</a:documentation>
<data type="anyURI"/>
</element>
</zeroOrMore>
<zeroOrMore>
<element name="import_enum_type">
<a:documentation>Import enum XML files.</a:documentation>
<data type="anyURI"/>
</element>
</zeroOrMore>
<zeroOrMore>
<element name="import_array_type">
<a:documentation>Import array XML files.</a:documentation>
<data type="anyURI"/>
</element>
</zeroOrMore>
<optional>
<!-- Optional Comments -->
<ref name="comment_define"/>
</optional>
</interleave>
</element>
</start>
<define name="members_define">
<!-- 'members' is just a tag that hold multiple 'member' tags -->
<element name="members">
<a:documentation>A collection of member items to define the serializable object.</a:documentation>
<oneOrMore>
<ref name="member_define"/>
</oneOrMore>
</element>
</define>
<define name="member_define">
<!-- Defines each item that one of these objects can store -->
<element name="member">
<a:documentation>Item define in the serializable object.</a:documentation>
<!-- Required attributes -->
<attribute name="name">
<a:documentation>Name of the member item.</a:documentation>
<text/>
</attribute>
<!-- Optional attributes -->
<optional>
<a:documentation>Optional comment.</a:documentation>
<attribute name="comment">
<text/>
</attribute>
</optional>
<optional>
<a:documentation>Format string</a:documentation>
<attribute name="format">
<text/>
</attribute>
</optional>
<!-- Optional element -->
<interleave>
<optional>
<a:documentation>Optional default value.</a:documentation>
<element name="default">
<text/>
</element>
</optional>
<!-- Type, size, and internal enum define are defined within this ref -->
<ref name="type_size_choice_define"/>
</interleave>
</element>
</define>
</grammar>

View File

@ -0,0 +1,11 @@
<?xml version="1.0" encoding="UTF-8"?>
<schema
xmlns="http://purl.oclc.org/dsdl/schematron">
<pattern id="UNIQUE_WINDOW_ID">
<rule context="instance">
<assert test="count(//instance[((./@base_id+./@base_id_window)>current()/@base_id and (current()/@base_id+current()/@base_id_window)>(./@base_id))])=1 or (count(//instance[((./@base_id+./@base_id_window)=current()/@base_id and (current()/@base_id+current()/@base_id_window)=(./@base_id))])=1)">
Range of id's for every instance should be non-overlapping.
</assert>
</rule>
</pattern>
</schema>

View File

@ -0,0 +1,239 @@
<?xml version="1.0" encoding="UTF-8"?>
<grammar
xmlns="http://relaxng.org/ns/structure/1.0"
xmlns:a="http://relaxng.org/ns/compatibility/annotations/1.0"
datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
<include href="common_elements.rng"/>
<include href="common_types.rng"/>
<a:documentation>Test outside START</a:documentation>
<start>
<!-- Specifies that the root tag can either be "deployment" or "assembly". -->
<a:documentation>Test inside START</a:documentation>
<choice>
<element name="deployment">
<ref name="root_definition"/>
</element>
<element name="assembly">
<ref name="root_definition"/>
</element>
</choice>
</start>
<define name="root_definition">
<!-- Actual attributes and elements inside the root tag. -->
<!-- Attributes -->
<optional>
<attribute name="name">
<a:documentation>Name of the project.</a:documentation>
</attribute>
</optional>
<optional>
<attribute name="base_id">
<a:documentation>Specifies the base ID for the project.</a:documentation>
<ref name="id_define"/>
</attribute>
</optional>
<optional>
<choice>
<attribute name="base_id_window">
<a:documentation>Specifies the default ID range to use for the project. If this value is set to
zero, ISFGen will find an appropriate range from the component XML files. Attribute tag
'base_id_range' can also be used.
</a:documentation>
<data type="nonNegativeInteger"/>
</attribute>
<attribute name="base_id_range">
<a:documentation>Specifies the default ID range to use for the project. If this value is set to
zero, ISFGen will find an appropriate range from the component XML files. Attribute tag
'base_id_window' can also be used.
</a:documentation>
<data type="nonNegativeInteger"/>
</attribute>
</choice>
</optional>
<optional>
<attribute name="prepend_instance_name">
<a:documentation>If True, instance names will be prepended onto command,channel,and event generated
dictionaries in single instance scenarios.
</a:documentation>
<data type="boolean"/>
</attribute>
</optional>
<optional>
<attribute name="deployment">
<a:documentation>Name of the deployment.</a:documentation>
</attribute>
</optional>
<!-- Elements_ -->
<interleave>
<optional>
<ref name="comment_define"/>
</optional>
<zeroOrMore>
<a:documentation>Import of component type XML files.</a:documentation>
<ref name="import_definition"/>
</zeroOrMore>
<oneOrMore>
<a:documentation>Declares instances of objects.</a:documentation>
<ref name="instance_definition"/>
</oneOrMore>
<oneOrMore>
<a:documentation>Creates connections between objects and their ports.</a:documentation>
<ref name="connection_definition"/>
</oneOrMore>
</interleave>
</define>
<define name="import_definition">
<!-- Definition for "import_component_type" tags.-->
<element name="import_component_type">
<a:documentation>Value should be a path to a component XML file that is used as an instance.
</a:documentation>
<data type="anyURI"/>
</element>
</define>
<define name="instance_definition">
<!-- Defines attributes for "instance" tags. -->
<element name="instance">
<a:documentation>Instance of component within the model.</a:documentation>
<!-- Needed Attributes -->
<attribute name="namespace">
<a:documentation>Specifies the name space for the instance object.</a:documentation>
</attribute>
<attribute name="name">
<a:documentation>Specifies the name for the instance object.</a:documentation>
</attribute>
<attribute name="type">
<a:documentation>Specifies the type of the instance object.</a:documentation>
</attribute>
<!-- Optional Attributes -->
<optional>
<attribute name="dict_short_name">
<a:documentation>Specifies a name that can be used instead of the "name" attribute in ISFGen
dictionaries.
</a:documentation>
</attribute>
</optional>
<optional>
<attribute name="kind">
<a:documentation>Specifies the kind of component.</a:documentation>
<ref name="component_types_define"/>
</attribute>
</optional>
<optional>
<attribute name="base_id">
<a:documentation>Specifies a base ID for this instance.</a:documentation>
<ref name="id_define"/>
</attribute>
</optional>
<optional>
<choice>
<attribute name="base_id_window">
<a:documentation>Specifies an ID range the instance will occupy, starting at the base ID. Can
alternatively use the "base_id_range" tag.
</a:documentation>
<data type="nonNegativeInteger"/>
</attribute>
<attribute name="base_id_range">
<a:documentation>Specifies an ID range the instance will occupy, starting at the base ID. Can
alternatively use the "base_id_window" tag.
</a:documentation>
<data type="nonNegativeInteger"/>
</attribute>
</choice>
</optional>
</element>
</define>
<define name="connection_definition">
<element name="connection">
<a:documentation>Creates a new connection between source and target objects.</a:documentation>
<!-- Attributes -->
<attribute name="name">
<a:documentation>Specifies a unique connection name. EX: Connection1</a:documentation>
</attribute>
<optional>
<attribute name="type">
<a:documentation>Specifies connection type.</a:documentation>
</attribute>
</optional>
<!-- Elements -->
<interleave>
<optional>
<ref name="comment_define"/>
</optional>
<element name="source">
<a:documentation>Source Connection End.</a:documentation>
<ref name="connection_end_define"/>
</element>
<element name="target">
<a:documentation>Target Connection End</a:documentation>
<ref name="connection_end_define"/>
</element>
</interleave>
</element>
</define>
<define name="connection_end_define">
<!-- Specifies attributes for both target and source components.-->
<a:documentation>Specifies information about a connection end.</a:documentation>
<!-- Required attributes -->
<attribute name="component">
<a:documentation>Specifies the instance name of the connection end's component. Name must match an "name"
attribute from specified "instance" tags.
</a:documentation>
</attribute>
<attribute name="port">
<a:documentation>Specifies the port name on the object that the connection is attached to.</a:documentation>
<text/>
</attribute>
<attribute name="type">
<a:documentation>Specifies the type of the connection end. Generally, this type will match the "target"
type, unless connected to a port of type serial.
</a:documentation>
<text/>
</attribute>
<!-- Optional attributes -->
<optional>
<attribute name="num">
<a:documentation>Specifies the multiplicity or index of a port that is being connected to. Generally,
this value will be zero unless multiple indexes of the port exist.
</a:documentation>
<data type="nonNegativeInteger"/>
</attribute>
</optional>
</define>
</grammar>

View File

@ -0,0 +1,16 @@
<?xml version="1.0" encoding="UTF-8"?>
<telemetry telemetry_base="x1">
<channel id="0x0" name="ChanOne" update="always" data_type="I8" high_orange="30">
<comment>Channel One</comment>
</channel>
<channel id = "x1" name = "ChanTwo" update="on_change" data_type="ENUM">
<comment>Channel Two Coordinates</comment>
<enum name="coor">
<item name="x" comment="x coor"></item>
<item name="y"/>
</enum>
</channel>
<channel id="2" name="ChanThree" data_type="string" size = '10'>
<comment>Channel Three</comment>
</channel>
</telemetry>

View File

@ -0,0 +1,16 @@
<?xml version="1.0" encoding="UTF-8"?>
<telemetry telemetry_base="x1">
<channel id="0x0" name="ChanOne" update="always" data_type="I8" high_orange="med">
<comment>Channel One</comment>
</channel>
<channel id = "x1" name = "ChanTwo" update="on_change" data_type="ENUM">
<comment>Channel Two Coordinates</comment>
<enum name="coor">
<item name="x" comment="x coor"></item>
<item name="y"/>
</enum>
</channel>
<channel id="2" name="ChanThree" data_type="string">
<comment>Channel Three</comment>
</channel>
</telemetry>

View File

@ -0,0 +1,15 @@
<?xml version="1.0" encoding="UTF-8"?>
<telemetry telemetry_base="x1">
<channel id="0x0" name="ChanOne" update="always" data_type="I8" high_orange="30">
</channel>
<channel id = "x1" name = "ChanTwo" update="on_change" data_type="ENUM">
<comment>Channel Two Coordinates</comment>
<enum name="coor">
<item name="x" comment="x coor"></item>
<item name="y"/>
</enum>
</channel>
<channel id="2" name="ChanThree" data_type="string">
<comment>Channel Three</comment>
</channel>
</telemetry>

View File

@ -0,0 +1,16 @@
<?xml version="1.0" encoding="UTF-8"?>
<telemetry telemetry_base="x1">
<channel id="0x0" name="ChanOne" update="always" high_orange="30">
<comment>Channel One</comment>
</channel>
<channel id = "x1" name = "ChanTwo" update="on_change" data_type="ENUM">
<comment>Channel Two Coordinates</comment>
<enum name="coor">
<item name="x" comment="x coor"></item>
<item name="y"/>
</enum>
</channel>
<channel id="2" name="ChanThree" data_type="string">
<comment>Channel Three</comment>
</channel>
</telemetry>

View File

@ -0,0 +1,12 @@
<?xml version="1.0" encoding="UTF-8"?>
<telemetry telemetry_base="x1">
<channel id="0x0" name="ChanOne" update="always" data_type="I8" high_orange="30">
<comment>Channel One</comment>
</channel>
<channel id = "x1" name = "ChanTwo" update="on_change" data_type="ENUM">
<comment>Channel Two Coordinates</comment>
</channel>
<channel id="2" name="ChanThree" data_type="string">
<comment>Channel Three</comment>
</channel>
</telemetry>

View File

@ -0,0 +1,25 @@
<?xml version="1.0" encoding="UTF-8"?>
<commands opcode_base="$OPCODEBASE">
<command kind="async" opcode="0" mnemonic="Flight Command Zero">
<comment>Flight Command Zero executes very simple command.</comment>
</command>
<command kind = "sync" opcode="1" mnemonic="Flight Command One">
<comment>Flight Command One executes complex command.</comment>
<args>
<arg name="Throttle" type="I8" size="8" comment="Throttle Value">
<comment>Controls the throttle value.</comment>
</arg>
<arg name="Rotation" type = "ENUM">
<enum name="RotationValues">
<item name="Yaw" value="-1"></item>
<item name="Pitch" value = "0" comment = "Around y."/>
<item name = "Role" comment = "Around z."></item>
</enum>
</arg>
<arg name="Display" type="string" size="15">
<comment>Message to pop up onto onboard flight 8 bit display.</comment>
</arg>
</args>
</command>
<command kind="async" opcode="2" mnemonic="Flight Command Two"></command>
</commands>

View File

@ -0,0 +1,25 @@
<?xml version="1.0" encoding="UTF-8"?>
<commands opcode_base="$OPCODEBASE">
<command kind="async" opcode="0" mnemonic="Flight Command Zero" size = "10">
<comment>Flight Command Zero executes very simple command.</comment>
</command>
<command kind = "sync" opcode="1" mnemonic="Flight Command One">
<comment>Flight Command One executes complex command.</comment>
<args>
<arg name="Throttle" type="I8" size="8" pass_by="reference" comment="Throttle Value">
<comment>Controls the throttle value.</comment>
</arg>
<arg name="Rotation" type = "ENUM">
<enum name="RotationValues">
<item name="Yaw" value="-1"></item>
<item name="Pitch" value = "0" comment = "Around y."/>
<item name = "Role" comment = "Around z."></item>
</enum>
</arg>
<arg name="Display" type="string" size="15">
<comment>Message to pop up onto onboard flight 8 bit display.</comment>
</arg>
</args>
</command>
<command kind="async-sync" opcode="2" mnemonic="Flight Command Two"></command>
</commands>

View File

@ -0,0 +1,21 @@
<?xml version="1.0" encoding="UTF-8"?>
<commands opcode_base="$OPCODEBASE">
<command kind="async" opcode="0" mnemonic="Flight Command Zero" size = "10">
<comment>Flight Command Zero executes very simple command.</comment>
</command>
<command kind = "sync" opcode="1" mnemonic="Flight Command One">
<comment>Flight Command One executes complex command.</comment>
<args>
<arg name="Throttle" type="I8" size="8" pass_by="reference" comment="Throttle Value">
<comment>Controls the throttle value.</comment>
</arg>
<arg name="Rotation" type = "ENUM">
<comment>Enum is missing here!</comment>
</arg>
<arg name="Display" type="string" size="15">
<comment>Message to pop up onto onboard flight 8 bit display.</comment>
</arg>
</args>
</command>
<command kind="async" opcode="2" mnemonic="Flight Command Two"></command>
</commands>

View File

@ -0,0 +1,25 @@
<?xml version="1.0" encoding="UTF-8"?>
<commands opcode_base="$OPCODEBASE">
<command kind="async" opcode="0" mnemonic="Flight Command Zero" size = "-10">
<comment>Flight Command Zero executes very simple command.</comment>
</command>
<command kind = "sync" opcode="1" mnemonic="Flight Command One">
<comment>Flight Command One executes complex command.</comment>
<args>
<arg name="Throttle" type="I8" size="8" pass_by="reference" comment="Throttle Value">
<comment>Controls the throttle value.</comment>
</arg>
<arg name="Rotation" type = "ENUM">
<enum name="RotationValues">
<item name="Yaw" value="-1"></item>
<item name="Pitch" value = "0" comment = "Around y."/>
<item name = "Role" comment = "Around z."></item>
</enum>
</arg>
<arg name="Display" type="string" size="15">
<comment>Message to pop up onto onboard flight 8 bit display.</comment>
</arg>
</args>
</command>
<command kind="async" opcode="2" mnemonic="Flight Command Two"></command>
</commands>

View File

@ -0,0 +1,25 @@
<?xml version="1.0" encoding="UTF-8"?>
<commands opcode_base="$OPCODEBASE">
<command kind="async" opcode="0" mnemonic="Flight Command Zero" size = "10">
<comment>Flight Command Zero executes very simple command.</comment>
</command>
<command kind = "sync" opcode="1" mnemonic="Flight Command One">
<comment>Flight Command One executes complex command.</comment>
<args>
<arg name="Throttle" type="I8" size="8" pass_by="reference" comment="Throttle Value">
<comment>Controls the throttle value.</comment>
</arg>
<arg name="Rotation" type = "ENUM">
<enum name="RotationValues">
<item name="Yaw" value="-1"></item>
<item name="Pitch" value = "0" comment = "Around y."/>
<item name = "Role" comment = "Around z."></item>
</enum>
</arg>
<arg name="Display" type="string">
<comment>Message to pop up onto onboard flight 8 bit display.</comment>
</arg>
</args>
</command>
<command kind="async" opcode="2" mnemonic="Flight Command Two"></command>
</commands>

Some files were not shown because too many files have changed in this diff Show More