Compare commits

..

1 Commits

Author SHA1 Message Date
Cizz22 aa106a5a85 WIP: tm using relibility 5 months ago

22
.env

@ -1,5 +1,5 @@
ENV=development
LOG_LEVEL=INFO
LOG_LEVEL=ERROR
PORT=3021
HOST=0.0.0.0
@ -9,15 +9,15 @@ DATABASE_CREDENTIAL_USER=postgres
DATABASE_CREDENTIAL_PASSWORD=postgres
DATABASE_NAME=digital_twin
COLLECTOR_HOSTNAME=192.168.1.82
COLLECTOR_PORT=1111
COLLECTOR_CREDENTIAL_USER=digital_twin
COLLECTOR_CREDENTIAL_PASSWORD=Pr0jec7@D!g!tTwiN
COLLECTOR_NAME=digital_twin
# COLLECTOR_HOSTNAME=192.168.1.82
# COLLECTOR_PORT=1111
# COLLECTOR_CREDENTIAL_USER=digital_twin
# COLLECTOR_CREDENTIAL_PASSWORD=Pr0jec7@D!g!tTwiN
# COLLECTOR_NAME=digital_twin
# COLLECTOR_HOSTNAME=192.168.1.86
# COLLECTOR_PORT=5432
# COLLECTOR_CREDENTIAL_USER=postgres
# COLLECTOR_CREDENTIAL_PASSWORD=postgres
# COLLECTOR_NAME=digital_twin
COLLECTOR_HOSTNAME=192.168.1.86
COLLECTOR_PORT=5432
COLLECTOR_CREDENTIAL_USER=postgres
COLLECTOR_CREDENTIAL_PASSWORD=postgres
COLLECTOR_NAME=digital_twin

@ -26,6 +26,8 @@ ENV POETRY_VIRTUALENVS_IN_PROJECT=1 \
COPY --from=builder /app/.venv /app/.venv
# Copy application files
COPY . /app/
# Delete Tests for production
RUN rm -rf /app/tests/
# Add custom configuration to root's .bashrc including password protection
RUN echo "# Custom configurations added by Dockerfile" >> /root/.bashrc && \

127
Jenkinsfile vendored

@ -1,96 +1,107 @@
pipeline {
agent any
environment {
// Replace with your Docker Hub username/organization
DOCKER_HUB_USERNAME = 'aimodocker'
// This creates DOCKER_AUTH_USR and DOCKER_AUTH_PSW
DOCKER_AUTH = credentials('aimodocker')
// Use credentials for Docker Hub
DOCKER_CREDENTIALS = credentials('aimodocker')
// Replace with your image name
IMAGE_NAME = 'oh-service'
SERVICE_NAME = 'ahm-app'
SECURITY_PREFIX = 'security'
// Replace with your docker compose service name
SERVICE_NAME = 'oh-app'
// Variable for Git commit hash
GIT_COMMIT_HASH = ''
// Initialize variables to be updated in script blocks
GIT_COMMIT_HASH = ""
IMAGE_TAG = ""
SECONDARY_TAG = ""
// Replace with the SSH credentials for development server
// SSH_CREDENTIALS = credentials('backend-server-digitaltwin')
// SSH_CREDENTIALS_USR = 'aimo'
// SSH_SERVER_IP = '192.168.1.82'
}
stages {
stage('Checkout & Setup') {
stage('Checkout') {
steps {
script {
// Checkout and get git commit hash
checkout scm
GIT_COMMIT_HASH = sh(script: 'git rev-parse --short HEAD', returnStdout: true).trim()
// Use env.BRANCH_NAME or logic to handle detached HEAD if necessary
def branch = env.BRANCH_NAME ?: 'unknown'
echo "Current Branch: ${branch}"
if (branch == 'main') {
IMAGE_TAG = GIT_COMMIT_HASH
SECONDARY_TAG = 'latest'
} else if (branch == 'oh_security') {
IMAGE_TAG = "${SECURITY_PREFIX}-${GIT_COMMIT_HASH}"
SECONDARY_TAG = "${SECURITY_PREFIX}-latest"
} else {
IMAGE_TAG = "temp-${GIT_COMMIT_HASH}"
SECONDARY_TAG = "" // Ensure it's empty for other branches
}
echo "Primary Tag: ${IMAGE_TAG}"
def commitHash = sh(script: 'git rev-parse --short HEAD', returnStdout: true).trim()
GIT_COMMIT_HASH = commitHash
echo "Git commit hash: ${GIT_COMMIT_HASH}"
}
}
}
stage('Docker Login') {
steps {
// Fixed variable names based on the 'DOCKER_AUTH' environment key
sh "echo ${DOCKER_AUTH_PSW} | docker login -u ${DOCKER_AUTH_USR} --password-stdin"
sh '''
echo ${DOCKER_CREDENTIALS_PSW} | docker login -u ${DOCKER_CREDENTIALS_USR} --password-stdin
'''
}
}
stage('Build & Tag') {
stage('Build Docker Image') {
steps {
script {
def fullImageName = "${DOCKER_HUB_USERNAME}/${IMAGE_NAME}"
sh "docker build -t ${fullImageName}:${IMAGE_TAG} ."
if (SECONDARY_TAG) {
sh "docker tag ${fullImageName}:${IMAGE_TAG} ${fullImageName}:${SECONDARY_TAG}"
}
// Build with commit hash tag
sh """
docker build -t ${DOCKER_HUB_USERNAME}/${IMAGE_NAME}:latest .
docker tag ${DOCKER_HUB_USERNAME}/${IMAGE_NAME}:latest ${DOCKER_HUB_USERNAME}/${IMAGE_NAME}:${GIT_COMMIT_HASH}
"""
}
}
}
stage('Push to Docker Hub') {
steps {
script {
def fullImageName = "${DOCKER_HUB_USERNAME}/${IMAGE_NAME}"
sh "docker push ${fullImageName}:${IMAGE_TAG}"
if (SECONDARY_TAG) {
sh "docker push ${fullImageName}:${SECONDARY_TAG}"
}
}
sh """
# Push both tags
docker push ${DOCKER_HUB_USERNAME}/${IMAGE_NAME}:${GIT_COMMIT_HASH}
docker push ${DOCKER_HUB_USERNAME}/${IMAGE_NAME}:latest
"""
}
}
// stage('Deploy') {
// steps {
// script {
// sshagent(credentials: ['backend-server-digitaltwin']) {
// sh """
// ssh -o StrictHostKeyChecking=no -p 12558 aimo@0.tcp.ap.ngrok.io '
// cd ~/digital-twin/Docker
// sudo docker compose pull ${SERVICE_NAME}
// sudo docker compose up -d ${SERVICE_NAME}
// '
// """
// }
// }
// }
// }
}
post {
always {
// Clean up
sh 'docker logout'
// Clean up local images
script {
sh 'docker logout'
def fullImageName = "${DOCKER_HUB_USERNAME}/${IMAGE_NAME}"
// Clean up images to save agent disk space
sh "docker rmi ${fullImageName}:${IMAGE_TAG} || true"
if (SECONDARY_TAG) {
sh "docker rmi ${fullImageName}:${SECONDARY_TAG} || true"
try {
sh """
# Push both tags
docker rmi ${DOCKER_HUB_USERNAME}/${IMAGE_NAME}:${GIT_COMMIT_HASH}
docker rmi ${DOCKER_HUB_USERNAME}/${IMAGE_NAME}:latest
"""
} catch (err) {
echo "Failed to clean up images: ${err}"
}
}
}
success {
echo "Successfully processed ${env.BRANCH_NAME}."
echo "Successfully built, pushed, and deployed Docker image with tags: latest and ${GIT_COMMIT_HASH}"
}
failure {
echo 'Failed to build/push/deploy Docker image!'
}
}
}

@ -1,44 +0,0 @@
# Unit Testing Guide - be-optimumoh
This document provides instructions on how to set up and run unit tests for the **be-optimumoh** project.
## 1. Preparation
### Install Dependencies
Ensure you have all dependencies installed. This project uses `poetry`.
```bash
# Install dependencies
poetry install
```
## 2. Configuration
### Pytest Configuration
Ensure the `pytest.ini` file in the root directory points to the `unit` test folder:
```ini
[pytest]
testpaths = tests/unit
python_files = test_*.py
asyncio_mode = auto
```
## 3. Running Tests
### Run Unit Tests
To run all unit tests in the project:
```bash
poetry run pytest tests/unit
```
### Run Specific Unit Test File
```bash
poetry run pytest tests/unit/test_specific_feature.py
```
## 4. Best Practices
- **Isolation**: Unit tests should be isolated from external services. Use mocking for APIs and databases.
- **Async Testing**: Use `@pytest.mark.asyncio` for asynchronous test functions.

224
poetry.lock generated

@ -1,17 +1,5 @@
# This file is automatically @generated by Poetry 2.1.4 and should not be changed by hand.
[[package]]
name = "absl-py"
version = "2.3.1"
description = "Abseil Python Common Libraries, see https://github.com/abseil/abseil-py."
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "absl_py-2.3.1-py3-none-any.whl", hash = "sha256:eeecf07f0c2a93ace0772c92e596ace6d3d3996c042b2128459aaae2a76de11d"},
{file = "absl_py-2.3.1.tar.gz", hash = "sha256:a97820526f7fbfd2ec1bce83f3f25e3a14840dac0d8e02a0b71cd75db3f77fc9"},
]
[[package]]
name = "aiohappyeyeballs"
version = "2.6.1"
@ -148,25 +136,6 @@ files = [
frozenlist = ">=1.1.0"
typing-extensions = {version = ">=4.2", markers = "python_version < \"3.13\""}
[[package]]
name = "aiosqlite"
version = "0.20.0"
description = "asyncio bridge to the standard sqlite3 module"
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "aiosqlite-0.20.0-py3-none-any.whl", hash = "sha256:36a1deaca0cac40ebe32aac9977a6e2bbc7f5189f23f4a54d5908986729e5bd6"},
{file = "aiosqlite-0.20.0.tar.gz", hash = "sha256:6d35c8c256637f4672f843c31021464090805bf925385ac39473fb16eaaca3d7"},
]
[package.dependencies]
typing_extensions = ">=4.0"
[package.extras]
dev = ["attribution (==1.7.0)", "black (==24.2.0)", "coverage[toml] (==7.4.1)", "flake8 (==7.0.0)", "flake8-bugbear (==24.2.6)", "flit (==3.9.0)", "mypy (==1.8.0)", "ufmt (==2.3.0)", "usort (==1.0.8.post1)"]
docs = ["sphinx (==7.2.6)", "sphinx-mdinclude (==0.5.3)"]
[[package]]
name = "annotated-types"
version = "0.7.0"
@ -1061,18 +1030,6 @@ files = [
[package.extras]
all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"]
[[package]]
name = "immutabledict"
version = "4.2.1"
description = "Immutable wrapper around dictionaries (a fork of frozendict)"
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "immutabledict-4.2.1-py3-none-any.whl", hash = "sha256:c56a26ced38c236f79e74af3ccce53772827cef5c3bce7cab33ff2060f756373"},
{file = "immutabledict-4.2.1.tar.gz", hash = "sha256:d91017248981c72eb66c8ff9834e99c2f53562346f23e7f51e7a5ebcf66a3bcc"},
]
[[package]]
name = "importlib-resources"
version = "6.4.5"
@ -1463,63 +1420,6 @@ rsa = ["cryptography (>=3.0.0)"]
signals = ["blinker (>=1.4.0)"]
signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"]
[[package]]
name = "ortools"
version = "9.14.6206"
description = "Google OR-Tools python libraries and modules"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "ortools-9.14.6206-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:6e2364edd1577cd094e7c7121ec5fb0aa462a69a78ce29cdc40fa45943ff0091"},
{file = "ortools-9.14.6206-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:164b726b4d358ae68a018a52ff1999c0646d6f861b33676c2c83e2ddb60cfa13"},
{file = "ortools-9.14.6206-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ebb0e210969cc3246fe78dadf9038936a3a18edc8156e23a394e2bbcec962431"},
{file = "ortools-9.14.6206-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:174de2f04c106c7dcc5989560f2c0e065e78fba0ad0d1fd029897582f4823c3a"},
{file = "ortools-9.14.6206-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e6d994ebcf9cbdda1e20a75662967124e7e6ffd707c7f60b2db1a11f2104d384"},
{file = "ortools-9.14.6206-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5763472f8b05072c96c36c4eafadd9f6ffcdab38a81d8f0142fc408ad52a4342"},
{file = "ortools-9.14.6206-cp310-cp310-win_amd64.whl", hash = "sha256:6711516f837f06836ff9fda66fe4337b88c214f2ba6a921b84d3b05876f1fa8c"},
{file = "ortools-9.14.6206-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:8bcd8481846090585a4fac82800683555841685c49fa24578ad1e48a37918568"},
{file = "ortools-9.14.6206-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5af2bbf2fff7d922ba036e27d7ff378abecb24749380c86a77fa6208d5ba35cd"},
{file = "ortools-9.14.6206-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a6ab43490583c4bbf0fff4e51bb1c15675d5651c2e8e12ba974fd08e8c05a48f"},
{file = "ortools-9.14.6206-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9aa2c0c50a765c6a060960dcb0207bd6aeb6341f5adacb3d33e613b7e7409428"},
{file = "ortools-9.14.6206-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:64ec63fd92125499e9ca6b72700406dda161eefdfef92f04c35c5150391f89a4"},
{file = "ortools-9.14.6206-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:8651008f05257471f45a919ade5027afa12ab6f7a4fdf0a8bcc18c92032f8571"},
{file = "ortools-9.14.6206-cp311-cp311-win_amd64.whl", hash = "sha256:ca60877830a631545234e83e7f6bd55830334a4d0c2b51f1669b1f2698d58b84"},
{file = "ortools-9.14.6206-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:e38c8c4a184820cbfdb812a8d484f6506cf16993ce2a95c88bc1c9d23b17c63e"},
{file = "ortools-9.14.6206-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:db685073cbed9f8bfaa744f5e883f3dea57c93179b0abe1788276fd3b074fa61"},
{file = "ortools-9.14.6206-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d4bfb8bffb29991834cf4bde7048ca8ee8caed73e8dd21e5ec7de99a33bbfea0"},
{file = "ortools-9.14.6206-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:eb464a698837e7f90ca5f9b3d748b6ddf553198a70032bc77824d1cd88695d2b"},
{file = "ortools-9.14.6206-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8f33deaeb7c3dda8ca1d29c5b9aa9c3a4f2ca9ecf34f12a1f809bb2995f41274"},
{file = "ortools-9.14.6206-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:086e7c2dc4f23efffb20a5e20f618c7d6adb99b2d94f684cab482387da3bc434"},
{file = "ortools-9.14.6206-cp312-cp312-win_amd64.whl", hash = "sha256:17c13b0bfde17ac57789ad35243edf1318ecd5db23cf949b75ab62480599f188"},
{file = "ortools-9.14.6206-cp313-cp313-macosx_10_15_x86_64.whl", hash = "sha256:8d0df7eef8ba53ad235e29018389259bad2e667d9594b9c2a412ed6a5756bd4e"},
{file = "ortools-9.14.6206-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:57dfe10844ce8331634d4723040fe249263fd490407346efc314c0bc656849b5"},
{file = "ortools-9.14.6206-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5c0c2c00a6e5d5c462e76fdda7dbd40d0f9139f1df4211d34b36906696248020"},
{file = "ortools-9.14.6206-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:38044cf39952d93cbcc02f6acdbe0a9bd3628fbf17f0d7eb0374060fa028c22e"},
{file = "ortools-9.14.6206-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:98564de773d709e1e49cb3c32f6917589c314f047786d88bd5f324c0eb7be96e"},
{file = "ortools-9.14.6206-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:80528b0ac72dc3de00cbeef2ce028517a476450b5877b1cda1b8ecb9fa98505e"},
{file = "ortools-9.14.6206-cp313-cp313-win_amd64.whl", hash = "sha256:47b1b15dcb085d32c61621b790259193aefa9e4577abadf233d47fbe7d0b81ef"},
{file = "ortools-9.14.6206-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d26a0f9ed97ef9d3384a9069923585f5f974c3fde555a41f4d6381fbe7840bc4"},
{file = "ortools-9.14.6206-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d40d8141667d47405f296a9f687058c566d7816586e9a672b59e9fcec8493133"},
{file = "ortools-9.14.6206-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:aefea81ed81aa937873efc520381785ed65380e52917f492ab566f46bbb5660d"},
{file = "ortools-9.14.6206-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f044bb277db3ab6a1b958728fe1cf14ca87c3800d67d7b321d876b48269340f6"},
{file = "ortools-9.14.6206-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:070dc7cebfa0df066acb6b9a6d02339351be8f91b2352b782ee7f40412207e20"},
{file = "ortools-9.14.6206-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5eb558a03b4ada501ecdea7b89f0d3bdf2cc6752e1728759ccf27923f592a8c2"},
{file = "ortools-9.14.6206-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:646329fa74a5c48c591b7fabfd26743f6d2de4e632b3b96ec596c47bfe19177a"},
{file = "ortools-9.14.6206-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aa5161924f35b8244295acd0fab2a8171bb08ef8d5cfaf1913a21274475704cc"},
{file = "ortools-9.14.6206-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e253526a026ae194aed544a0d065163f52a0c9cb606a1061c62df546877d5452"},
{file = "ortools-9.14.6206-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:dcb496ef633d884036770783f43bf8a47ff253ecdd8a8f5b95f00276ec241bfd"},
{file = "ortools-9.14.6206-cp39-cp39-win_amd64.whl", hash = "sha256:2733f635675de631fdc7b1611878ec9ee2f48a26434b7b3c07d0a0f535b92e03"},
]
[package.dependencies]
absl-py = ">=2.0.0"
immutabledict = ">=3.0.0"
numpy = ">=1.13.3"
pandas = ">=2.0.0"
protobuf = ">=6.31.1,<6.32"
typing-extensions = ">=4.12"
[[package]]
name = "packaging"
version = "24.2"
@ -1762,21 +1662,23 @@ testing = ["google-api-core (>=1.31.5)"]
[[package]]
name = "protobuf"
version = "6.31.1"
version = "5.29.0"
description = ""
optional = false
python-versions = ">=3.9"
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "protobuf-6.31.1-cp310-abi3-win32.whl", hash = "sha256:7fa17d5a29c2e04b7d90e5e32388b8bfd0e7107cd8e616feef7ed3fa6bdab5c9"},
{file = "protobuf-6.31.1-cp310-abi3-win_amd64.whl", hash = "sha256:426f59d2964864a1a366254fa703b8632dcec0790d8862d30034d8245e1cd447"},
{file = "protobuf-6.31.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:6f1227473dc43d44ed644425268eb7c2e488ae245d51c6866d19fe158e207402"},
{file = "protobuf-6.31.1-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:a40fc12b84c154884d7d4c4ebd675d5b3b5283e155f324049ae396b95ddebc39"},
{file = "protobuf-6.31.1-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:4ee898bf66f7a8b0bd21bce523814e6fbd8c6add948045ce958b73af7e8878c6"},
{file = "protobuf-6.31.1-cp39-cp39-win32.whl", hash = "sha256:0414e3aa5a5f3ff423828e1e6a6e907d6c65c1d5b7e6e975793d5590bdeecc16"},
{file = "protobuf-6.31.1-cp39-cp39-win_amd64.whl", hash = "sha256:8764cf4587791e7564051b35524b72844f845ad0bb011704c3736cce762d8fe9"},
{file = "protobuf-6.31.1-py3-none-any.whl", hash = "sha256:720a6c7e6b77288b85063569baae8536671b39f15cc22037ec7045658d80489e"},
{file = "protobuf-6.31.1.tar.gz", hash = "sha256:d8cac4c982f0b957a4dc73a80e2ea24fab08e679c0de9deb835f4a12d69aca9a"},
{file = "protobuf-5.29.0-cp310-abi3-win32.whl", hash = "sha256:ea7fb379b257911c8c020688d455e8f74efd2f734b72dc1ea4b4d7e9fd1326f2"},
{file = "protobuf-5.29.0-cp310-abi3-win_amd64.whl", hash = "sha256:34a90cf30c908f47f40ebea7811f743d360e202b6f10d40c02529ebd84afc069"},
{file = "protobuf-5.29.0-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:c931c61d0cc143a2e756b1e7f8197a508de5365efd40f83c907a9febf36e6b43"},
{file = "protobuf-5.29.0-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:85286a47caf63b34fa92fdc1fd98b649a8895db595cfa746c5286eeae890a0b1"},
{file = "protobuf-5.29.0-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:0d10091d6d03537c3f902279fcf11e95372bdd36a79556311da0487455791b20"},
{file = "protobuf-5.29.0-cp38-cp38-win32.whl", hash = "sha256:0cd67a1e5c2d88930aa767f702773b2d054e29957432d7c6a18f8be02a07719a"},
{file = "protobuf-5.29.0-cp38-cp38-win_amd64.whl", hash = "sha256:e467f81fdd12ded9655cea3e9b83dc319d93b394ce810b556fb0f421d8613e86"},
{file = "protobuf-5.29.0-cp39-cp39-win32.whl", hash = "sha256:17d128eebbd5d8aee80300aed7a43a48a25170af3337f6f1333d1fac2c6839ac"},
{file = "protobuf-5.29.0-cp39-cp39-win_amd64.whl", hash = "sha256:6c3009e22717c6cc9e6594bb11ef9f15f669b19957ad4087214d69e08a213368"},
{file = "protobuf-5.29.0-py3-none-any.whl", hash = "sha256:88c4af76a73183e21061881360240c0cdd3c39d263b4e8fb570aaf83348d608f"},
{file = "protobuf-5.29.0.tar.gz", hash = "sha256:445a0c02483869ed8513a585d80020d012c6dc60075f96fa0563a724987b1001"},
]
[[package]]
@ -2069,25 +1971,6 @@ pluggy = ">=1.5,<2"
[package.extras]
dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
[[package]]
name = "pytest-asyncio"
version = "0.24.0"
description = "Pytest support for asyncio"
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "pytest_asyncio-0.24.0-py3-none-any.whl", hash = "sha256:a811296ed596b69bf0b6f3dc40f83bcaf341b155a269052d82efa2b25ac7037b"},
{file = "pytest_asyncio-0.24.0.tar.gz", hash = "sha256:d081d828e576d85f875399194281e92bf8a68d60d72d1a2faf2feddb6c46b276"},
]
[package.dependencies]
pytest = ">=8.2,<9"
[package.extras]
docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"]
testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"]
[[package]]
name = "python-dateutil"
version = "2.9.0.post0"
@ -2280,6 +2163,85 @@ files = [
[package.dependencies]
pyasn1 = ">=0.1.3"
[[package]]
name = "scipy"
version = "1.16.2"
description = "Fundamental algorithms for scientific computing in Python"
optional = false
python-versions = ">=3.11"
groups = ["main"]
files = [
{file = "scipy-1.16.2-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:6ab88ea43a57da1af33292ebd04b417e8e2eaf9d5aa05700be8d6e1b6501cd92"},
{file = "scipy-1.16.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:c95e96c7305c96ede73a7389f46ccd6c659c4da5ef1b2789466baeaed3622b6e"},
{file = "scipy-1.16.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:87eb178db04ece7c698220d523c170125dbffebb7af0345e66c3554f6f60c173"},
{file = "scipy-1.16.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:4e409eac067dcee96a57fbcf424c13f428037827ec7ee3cb671ff525ca4fc34d"},
{file = "scipy-1.16.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e574be127bb760f0dad24ff6e217c80213d153058372362ccb9555a10fc5e8d2"},
{file = "scipy-1.16.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f5db5ba6188d698ba7abab982ad6973265b74bb40a1efe1821b58c87f73892b9"},
{file = "scipy-1.16.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ec6e74c4e884104ae006d34110677bfe0098203a3fec2f3faf349f4cb05165e3"},
{file = "scipy-1.16.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:912f46667d2d3834bc3d57361f854226475f695eb08c08a904aadb1c936b6a88"},
{file = "scipy-1.16.2-cp311-cp311-win_amd64.whl", hash = "sha256:91e9e8a37befa5a69e9cacbe0bcb79ae5afb4a0b130fd6db6ee6cc0d491695fa"},
{file = "scipy-1.16.2-cp311-cp311-win_arm64.whl", hash = "sha256:f3bf75a6dcecab62afde4d1f973f1692be013110cad5338007927db8da73249c"},
{file = "scipy-1.16.2-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:89d6c100fa5c48472047632e06f0876b3c4931aac1f4291afc81a3644316bb0d"},
{file = "scipy-1.16.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:ca748936cd579d3f01928b30a17dc474550b01272d8046e3e1ee593f23620371"},
{file = "scipy-1.16.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:fac4f8ce2ddb40e2e3d0f7ec36d2a1e7f92559a2471e59aec37bd8d9de01fec0"},
{file = "scipy-1.16.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:033570f1dcefd79547a88e18bccacff025c8c647a330381064f561d43b821232"},
{file = "scipy-1.16.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ea3421209bf00c8a5ef2227de496601087d8f638a2363ee09af059bd70976dc1"},
{file = "scipy-1.16.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f66bd07ba6f84cd4a380b41d1bf3c59ea488b590a2ff96744845163309ee8e2f"},
{file = "scipy-1.16.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5e9feab931bd2aea4a23388c962df6468af3d808ddf2d40f94a81c5dc38f32ef"},
{file = "scipy-1.16.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:03dfc75e52f72cf23ec2ced468645321407faad8f0fe7b1f5b49264adbc29cb1"},
{file = "scipy-1.16.2-cp312-cp312-win_amd64.whl", hash = "sha256:0ce54e07bbb394b417457409a64fd015be623f36e330ac49306433ffe04bc97e"},
{file = "scipy-1.16.2-cp312-cp312-win_arm64.whl", hash = "sha256:2a8ffaa4ac0df81a0b94577b18ee079f13fecdb924df3328fc44a7dc5ac46851"},
{file = "scipy-1.16.2-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:84f7bf944b43e20b8a894f5fe593976926744f6c185bacfcbdfbb62736b5cc70"},
{file = "scipy-1.16.2-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:5c39026d12edc826a1ef2ad35ad1e6d7f087f934bb868fc43fa3049c8b8508f9"},
{file = "scipy-1.16.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e52729ffd45b68777c5319560014d6fd251294200625d9d70fd8626516fc49f5"},
{file = "scipy-1.16.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:024dd4a118cccec09ca3209b7e8e614931a6ffb804b2a601839499cb88bdf925"},
{file = "scipy-1.16.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7a5dc7ee9c33019973a470556081b0fd3c9f4c44019191039f9769183141a4d9"},
{file = "scipy-1.16.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c2275ff105e508942f99d4e3bc56b6ef5e4b3c0af970386ca56b777608ce95b7"},
{file = "scipy-1.16.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:af80196eaa84f033e48444d2e0786ec47d328ba00c71e4299b602235ffef9acb"},
{file = "scipy-1.16.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9fb1eb735fe3d6ed1f89918224e3385fbf6f9e23757cacc35f9c78d3b712dd6e"},
{file = "scipy-1.16.2-cp313-cp313-win_amd64.whl", hash = "sha256:fda714cf45ba43c9d3bae8f2585c777f64e3f89a2e073b668b32ede412d8f52c"},
{file = "scipy-1.16.2-cp313-cp313-win_arm64.whl", hash = "sha256:2f5350da923ccfd0b00e07c3e5cfb316c1c0d6c1d864c07a72d092e9f20db104"},
{file = "scipy-1.16.2-cp313-cp313t-macosx_10_14_x86_64.whl", hash = "sha256:53d8d2ee29b925344c13bda64ab51785f016b1b9617849dac10897f0701b20c1"},
{file = "scipy-1.16.2-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:9e05e33657efb4c6a9d23bd8300101536abd99c85cca82da0bffff8d8764d08a"},
{file = "scipy-1.16.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:7fe65b36036357003b3ef9d37547abeefaa353b237e989c21027b8ed62b12d4f"},
{file = "scipy-1.16.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:6406d2ac6d40b861cccf57f49592f9779071655e9f75cd4f977fa0bdd09cb2e4"},
{file = "scipy-1.16.2-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ff4dc42bd321991fbf611c23fc35912d690f731c9914bf3af8f417e64aca0f21"},
{file = "scipy-1.16.2-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:654324826654d4d9133e10675325708fb954bc84dae6e9ad0a52e75c6b1a01d7"},
{file = "scipy-1.16.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:63870a84cd15c44e65220eaed2dac0e8f8b26bbb991456a033c1d9abfe8a94f8"},
{file = "scipy-1.16.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:fa01f0f6a3050fa6a9771a95d5faccc8e2f5a92b4a2e5440a0fa7264a2398472"},
{file = "scipy-1.16.2-cp313-cp313t-win_amd64.whl", hash = "sha256:116296e89fba96f76353a8579820c2512f6e55835d3fad7780fece04367de351"},
{file = "scipy-1.16.2-cp313-cp313t-win_arm64.whl", hash = "sha256:98e22834650be81d42982360382b43b17f7ba95e0e6993e2a4f5b9ad9283a94d"},
{file = "scipy-1.16.2-cp314-cp314-macosx_10_14_x86_64.whl", hash = "sha256:567e77755019bb7461513c87f02bb73fb65b11f049aaaa8ca17cfaa5a5c45d77"},
{file = "scipy-1.16.2-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:17d9bb346194e8967296621208fcdfd39b55498ef7d2f376884d5ac47cec1a70"},
{file = "scipy-1.16.2-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:0a17541827a9b78b777d33b623a6dcfe2ef4a25806204d08ead0768f4e529a88"},
{file = "scipy-1.16.2-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:d7d4c6ba016ffc0f9568d012f5f1eb77ddd99412aea121e6fa8b4c3b7cbad91f"},
{file = "scipy-1.16.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9702c4c023227785c779cba2e1d6f7635dbb5b2e0936cdd3a4ecb98d78fd41eb"},
{file = "scipy-1.16.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d1cdf0ac28948d225decdefcc45ad7dd91716c29ab56ef32f8e0d50657dffcc7"},
{file = "scipy-1.16.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:70327d6aa572a17c2941cdfb20673f82e536e91850a2e4cb0c5b858b690e1548"},
{file = "scipy-1.16.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5221c0b2a4b58aa7c4ed0387d360fd90ee9086d383bb34d9f2789fafddc8a936"},
{file = "scipy-1.16.2-cp314-cp314-win_amd64.whl", hash = "sha256:f5a85d7b2b708025af08f060a496dd261055b617d776fc05a1a1cc69e09fe9ff"},
{file = "scipy-1.16.2-cp314-cp314-win_arm64.whl", hash = "sha256:2cc73a33305b4b24556957d5857d6253ce1e2dcd67fa0ff46d87d1670b3e1e1d"},
{file = "scipy-1.16.2-cp314-cp314t-macosx_10_14_x86_64.whl", hash = "sha256:9ea2a3fed83065d77367775d689401a703d0f697420719ee10c0780bcab594d8"},
{file = "scipy-1.16.2-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:7280d926f11ca945c3ef92ba960fa924e1465f8d07ce3a9923080363390624c4"},
{file = "scipy-1.16.2-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:8afae1756f6a1fe04636407ef7dbece33d826a5d462b74f3d0eb82deabefd831"},
{file = "scipy-1.16.2-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:5c66511f29aa8d233388e7416a3f20d5cae7a2744d5cee2ecd38c081f4e861b3"},
{file = "scipy-1.16.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:efe6305aeaa0e96b0ccca5ff647a43737d9a092064a3894e46c414db84bc54ac"},
{file = "scipy-1.16.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7f3a337d9ae06a1e8d655ee9d8ecb835ea5ddcdcbd8d23012afa055ab014f374"},
{file = "scipy-1.16.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:bab3605795d269067d8ce78a910220262711b753de8913d3deeaedb5dded3bb6"},
{file = "scipy-1.16.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:b0348d8ddb55be2a844c518cd8cc8deeeb8aeba707cf834db5758fc89b476a2c"},
{file = "scipy-1.16.2-cp314-cp314t-win_amd64.whl", hash = "sha256:26284797e38b8a75e14ea6631d29bda11e76ceaa6ddb6fdebbfe4c4d90faf2f9"},
{file = "scipy-1.16.2-cp314-cp314t-win_arm64.whl", hash = "sha256:d2a4472c231328d4de38d5f1f68fdd6d28a615138f842580a8a321b5845cf779"},
{file = "scipy-1.16.2.tar.gz", hash = "sha256:af029b153d243a80afb6eabe40b0a07f8e35c9adc269c019f364ad747f826a6b"},
]
[package.dependencies]
numpy = ">=1.25.2,<2.6"
[package.extras]
dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy (==1.10.0)", "pycodestyle", "pydevtool", "rich-click", "ruff (>=0.0.292)", "types-psutil", "typing_extensions"]
doc = ["intersphinx_registry", "jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.19.1)", "jupytext", "linkify-it-py", "matplotlib (>=3.5)", "myst-nb (>=1.2.0)", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0,<8.2.0)", "sphinx-copybutton", "sphinx-design (>=0.4.0)"]
test = ["Cython", "array-api-strict (>=2.3.1)", "asv", "gmpy2", "hypothesis (>=6.30)", "meson", "mpmath", "ninja ; sys_platform != \"emscripten\"", "pooch", "pytest (>=8.0.0)", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"]
[[package]]
name = "shellingham"
version = "1.5.4"
@ -3046,4 +3008,4 @@ propcache = ">=0.2.1"
[metadata]
lock-version = "2.1"
python-versions = "^3.11"
content-hash = "256c8104c6eeb5b288dd0cdf02fe7cbad4f75aa93fc71f8d44da8b605d72f886"
content-hash = "91dbae2db3aade422091b46bec01e32cabc4457814e736775fc0d020d785fae5"

@ -12,8 +12,6 @@ fastapi = { extras = ["standard"], version = "^0.115.4" }
sqlalchemy = "^2.0.36"
httpx = "^0.27.2"
pytest = "^8.3.3"
pytest-asyncio = "^0.24.0"
aiosqlite = "^0.20.0"
faker = "^30.8.2"
factory-boy = "^3.3.1"
sqlalchemy-utils = "^0.41.2"
@ -32,7 +30,7 @@ google-api-python-client = "^2.169.0"
google-auth-httplib2 = "^0.2.0"
google-auth-oauthlib = "^1.2.2"
aiohttp = "^3.12.14"
ortools = "^9.14.6206"
scipy = "^1.16.2"
[build-system]

@ -1,6 +0,0 @@
[pytest]
asyncio_mode = auto
testpaths = tests/unit
python_files = test_*.py
filterwarnings =
ignore::pydantic.PydanticDeprecatedSince20

@ -10,7 +10,7 @@ from src.calculation_budget_constrains.router import \
from src.calculation_target_reliability.router import \
router as calculation_target_reliability
from src.calculation_time_constrains.router import \
router as calculation_time_constrains_router, get_calculation
router as calculation_time_constrains_router
# from src.job.router import router as job_router
from src.overhaul.router import router as overhaul_router
@ -32,7 +32,7 @@ from src.equipment_sparepart.router import router as equipment_sparepart_router
# from src.overhaul.router import router as overhaul_router
# from src.overhaul_history.router import router as overhaul_history_router
# from src.overhaul_activity.router import router as scope_equipment_activity_router
from src.overhaul_scope.router import router as ovehaul_schedule_router
# # from src.overhaul_schedule.router import router as ovehaul_schedule_router
# from src.scope_equipment_part.router import router as scope_equipment_part_router
# from src.calculation_target_reliability.router import router as calculation_target_reliability
#
@ -72,7 +72,6 @@ authenticated_api_router.include_router(
overhaul_router, prefix="/overhauls", tags=["overhaul"]
)
# authenticated_api_router.include_router(job_router, prefix="/jobs", tags=["job"])
# # # Overhaul session data
@ -143,9 +142,9 @@ authenticated_api_router.include_router(
# scope_equipment_part_router, prefix="/equipment-parts", tags=["scope_equipment_parts"]
# )
authenticated_api_router.include_router(
ovehaul_schedule_router, prefix="/overhaul-schedules", tags=["overhaul_schedules"]
)
# authenticated_api_router.include_router(
# ovehaul_schedule_router, prefix="/overhaul-schedules", tags=["overhaul_schedules"]
# )
# calculation
calculation_router = APIRouter(prefix="/calculation", tags=["calculations"])
@ -173,10 +172,4 @@ calculation_router.include_router(
authenticated_api_router.include_router(calculation_router)
api_router.include_router(
get_calculation,
prefix="/calculation/time-constraint",
tags=["calculation", "time_constraint"],
)
api_router.include_router(authenticated_api_router)

@ -1,6 +1,5 @@
# app/auth/auth_bearer.py
import json
from typing import Annotated, Optional
import requests
@ -44,22 +43,12 @@ class JWTBearer(HTTPBearer):
)
request.state.user = message
from src.context import set_user_id, set_username, set_role
if hasattr(message, "user_id"):
set_user_id(str(message.user_id))
if hasattr(message, "username"):
set_username(message.username)
elif hasattr(message, "name"):
set_username(message.name)
if hasattr(message, "role"):
set_role(message.role)
return message
else:
raise HTTPException(status_code=403, detail="Invalid authorization code.")
def verify_jwt(self, jwtoken: str, method: str, endpoint: str):
try:
response = requests.get(
f"{config.AUTH_SERVICE_API}/verify-token",
@ -81,155 +70,15 @@ class JWTBearer(HTTPBearer):
async def get_current_user(request: Request) -> UserBase:
return request.state.user
async def get_token(request: Request):
token = request.headers.get("Authorization")
if token:
return token.replace("Bearer ", "") # Menghapus prefix "Bearer "
else:
return request.cookies.get("access_token") # Fallback ke cookie
return "" # Mengembalikan token atau None jika tidak ada
async def internal_key(request: Request):
async def get_token(request: Request):
token = request.headers.get("Authorization")
if not token:
api_key = request.headers.get("X-Internal-Key")
if api_key != config.API_KEY:
raise HTTPException(
status_code=403, detail="Invalid Key."
)
try:
headers = {
'Content-Type': 'application/json'
}
response = requests.post(
f"{config.AUTH_SERVICE_API}/sign-in",
headers=headers,
data=json.dumps({
"username": "ohuser",
"password": "123456789"
})
)
if not response.ok:
print(str(response.json()))
raise Exception("error auth")
user_data = response.json()
return user_data['data']['access_token']
except Exception as e:
raise Exception(str(e))
else:
try:
response = requests.get(
f"{config.AUTH_SERVICE_API}/verify-token",
headers={"Authorization": f"{token}"},
)
if not response.ok:
raise HTTPException(
status_code=403, detail="Invalid token."
)
return token.split(" ")[1]
except Exception as e:
print(f"Token verification error: {str(e)}")
return False, str(e)
import httpx
import logging
from typing import Dict, Any
import src.config as config
log = logging.getLogger(__name__)
AUTH_NOTIFY_ENDPOINT = f"{config.AUTH_SERVICE_API}/admin/notify-limit"
async def notify_admin_on_rate_limit(
endpoint_name: str,
ip_address: str,
method: str = "POST",
cooldown: int = 900,
timeout: int = 5
) -> Dict[str, Any]:
"""
Kirim notifikasi ke admin via be-auth service ketika rate limit terlampaui.
Async version - gunakan di async context.
"""
payload = {
"endpoint_name": endpoint_name,
"ip_address": ip_address,
"method": method,
"cooldown": cooldown,
}
try:
async with httpx.AsyncClient(timeout=timeout) as client:
response = await client.post(AUTH_NOTIFY_ENDPOINT, json=payload)
response.raise_for_status()
result = response.json()
log.info(f"Notifikasi admin sent | Endpoint: {endpoint_name}")
return result
except Exception as e:
log.error(f"Error notifying admin: {str(e)}")
return {"status": False, "message": str(e), "data": payload}
def notify_admin_on_rate_limit_sync(
endpoint_name: str,
ip_address: str,
method: str = "POST",
cooldown: int = 900,
timeout: int = 5
) -> Dict[str, Any]:
"""
Kirim notifikasi ke admin via be-auth service.
Sync version - gunakan di exception handler atau sync context.
RECOMMENDED untuk use case ini.
"""
payload = {
"endpoint_name": endpoint_name,
"ip_address": ip_address,
"method": method,
"cooldown": cooldown,
}
try:
response = httpx.post(AUTH_NOTIFY_ENDPOINT, json=payload, timeout=timeout)
response.raise_for_status()
result = response.json()
log.info(f"Notifikasi admin sent | Endpoint: {endpoint_name}")
return result
if token:
return token.split(" ")[1]
except Exception as e:
log.error(f"Error notifying admin: {str(e)}")
return {"status": False, "message": str(e), "data": payload}
return ""
CurrentUser = Annotated[UserBase, Depends(get_current_user)]
Token = Annotated[str, Depends(get_token)]
InternalKey = Annotated[str, Depends(internal_key)]

@ -1,12 +1,10 @@
from typing import Annotated, Dict, List, Optional
from typing import Dict, List, Optional
from fastapi import APIRouter, HTTPException, status
from fastapi.params import Query
from src.auth.service import Token
from src.calculation_budget_constrains.schema import BudgetContraintQuery
from src.calculation_target_reliability.service import get_simulation_results
from src.config import TC_RBD_ID
from src.database.core import CollectorDbSession, DbSession
from src.models import StandardResponse
@ -21,12 +19,12 @@ async def get_target_reliability(
token: Token,
session_id: str,
collector_db: CollectorDbSession,
params: Annotated[BudgetContraintQuery, Query()],
cost_threshold: float = Query(100),
):
"""Get all scope pagination."""
cost_threshold = params.cost_threshold
results = await get_simulation_results(
simulation_id = TC_RBD_ID,
simulation_id = "default",
token=token
)

@ -32,9 +32,6 @@ class OverhaulRead(OverhaulBase):
systemComponents: Dict[str, Any]
class BudgetContraintQuery(DefultBase):
cost_threshold: float = 100
# {
# "overview": {
# "totalEquipment": 30,

@ -84,7 +84,7 @@ async def get_all_budget_constrains(
for item in result:
cost = item["total_cost"] or 1.0
efficiency = item["contribution_norm"] / cost
item["priority_score"] = item["contribution_norm"]
item["priority_score"] = 0.7 * item["contribution_norm"] + 0.3 * efficiency
# Choose method
if use_optimal:
@ -104,7 +104,7 @@ def calculate_asset_eaf_contributions(plant_result, eq_results):
for asset in eq_results:
node_name = asset.get("aeros_node", {}).get("node_name")
if node_name:
results[node_name] = asset.get("contribution_factor", 0.0)
results[node_name] = asset.get("contribution", 0.0)
return results
@ -150,7 +150,7 @@ def knapsack_selection(equipments: List[dict], budget: float, scale: int = 10_00
for i in range(n):
cost, value = costs[i], values[i]
for w in range(W, cost - 1, -1):
if dp[w - cost] + value >= dp[w]: # <= FIXED HERE
if dp[w - cost] + value > dp[w]:
dp[w] = dp[w - cost] + value
keep[i][w] = True
@ -164,15 +164,5 @@ def knapsack_selection(equipments: List[dict], budget: float, scale: int = 10_00
else:
excluded.append(equipments[i])
# Optional: fill leftover budget with zero-priority items
remaining_budget = budget - sum(eq["total_cost"] for eq in selected)
if remaining_budget > 0:
for eq in excluded[:]:
if eq["total_cost"] <= remaining_budget:
selected.append(eq)
excluded.remove(eq)
remaining_budget -= eq["total_cost"]
return selected, excluded

@ -1,18 +1,14 @@
import asyncio
from typing import Dict, List, Optional
from typing_extensions import Annotated
from temporalio.client import Client
from fastapi import APIRouter, HTTPException, status
from fastapi.params import Query
from src.calculation_target_reliability.utils import wait_for_workflow
from src.config import TEMPORAL_URL, TR_RBD_ID
from src.database.core import DbSession, CollectorDbSession
from src.auth.service import Token
from src.models import StandardResponse
from .service import run_rbd_simulation, get_simulation_results, identify_worst_eaf_contributors
from .schema import OptimizationResult, TargetReliabiltiyQuery
from .schema import OptimizationResult
router = APIRouter()
@ -38,20 +34,12 @@ async def get_target_reliability(
db_session: DbSession,
token: Token,
collector_db: CollectorDbSession,
params: Annotated[TargetReliabiltiyQuery, Query()],
# oh_session_id: Optional[str] = Query(None),
# eaf_input: float = Query(99.8),
# duration: int = Query(17520),
# simulation_id: Optional[str] = Query(None),
# cut_hours = Query(0)
oh_session_id: Optional[str] = Query(None),
eaf_input: float = Query(99.8),
duration: int = Query(8760),
simulation_id: Optional[str] = Query(None)
):
"""Get all scope pagination."""
oh_session_id = params.oh_session_id
eaf_input = params.eaf_input
duration = params.duration
simulation_id = params.simulation_id
cut_hours = params.cut_hours
if not oh_session_id:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
@ -64,57 +52,27 @@ async def get_target_reliability(
# eaf_input=eaf_input,
# oh_duration=duration
# )
if duration != 17520:
if not simulation_id:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Simulation ID is required for non-default duration. Please run simulation first.",
)
else:
try:
temporal_client = await Client.connect(TEMPORAL_URL)
handle = temporal_client.get_workflow_handle(f"simulation-{simulation_id}")
desc = await handle.describe()
status_name = desc.status.name
if status_name in ["RUNNING", "CONTINUED_AS_NEW"]:
raise HTTPException(
status_code=status.HTTP_425_TOO_EARLY,
detail="Simulation is still running.",
)
elif status_name != "COMPLETED":
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=f"Simulation failed with status: {status_name}",
)
except HTTPException:
raise
except Exception as e:
# Handle connection errors or invalid workflow IDs
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Simulation not found or error checking status: {str(e)}",
)
else:
simulation_id = TR_RBD_ID
# simulation_id = await run_rbd_simulation(
# sim_hours=duration,
# token=token
# )
if not simulation_id:
simulation_id = "default"
results = await get_simulation_results(
simulation_id=simulation_id,
token=token
)
optimize_result = await identify_worst_eaf_contributors(
simulation_result=results,
target_eaf=eaf_input,
db_session=db_session,
oh_session_id=oh_session_id,
collector_db=collector_db,
simulation_id=simulation_id,
duration=duration,
po_duration=1200,
cut_hours=float(cut_hours)
simulation_id=simulation_id
)

@ -39,8 +39,6 @@ class AssetWeight(OverhaulBase):
num_of_failures: int
down_time: float
efficiency: float
improvement_impact:float
birbaum: float
class MaintenanceScenario(OverhaulBase):
location_tag: str
@ -54,22 +52,11 @@ class MaintenanceScenario(OverhaulBase):
class OptimizationResult(OverhaulBase):
current_plant_eaf: float
target_plant_eaf: float
possible_plant_eaf:float
eaf_gap: float
eaf_improvement_text:str
recommended_reduced_outage:Optional[float] = 0
warning_message:Optional[str]
asset_contributions: List[dict]
optimization_success: bool = False
simulation_id: Optional[str] = None
class TargetReliabiltiyQuery(DefultBase):
oh_session_id: Optional[str] = Field(None)
eaf_input: float = Field(99.8)
duration: int = Field(17520)
simulation_id: Optional[str] = Field(None)
cut_hours:int = Field(0)
# {
# "overview": {

@ -1,10 +1,8 @@
import math
from typing import Optional, List
from dataclasses import dataclass
from sqlalchemy import Delete, Select
import httpx
from src.auth.service import CurrentUser
from src.config import RBD_SERVICE_API
from src.contribution_util import calculate_contribution, calculate_contribution_accurate
from src.database.core import DbSession, CollectorDbSession
from datetime import datetime, timedelta
@ -18,20 +16,18 @@ from .schema import AssetWeight,MaintenanceScenario,OptimizationResult
from src.overhaul_activity.service import get_standard_scope_by_session_id
RBD_SERVICE_API = "http://192.168.1.82:8000/rbd"
client = httpx.AsyncClient(timeout=300.0)
async def run_rbd_simulation(*, sim_hours: int, token):
sim_data = {
"SimulationName": f"Simulasi TR OH {sim_hours}",
"SchematicName": "- TJB - Unit 3 -",
"SimSeed": 1,
"SimDuration": sim_hours,
"OverhaulInterval": sim_hours - 1201,
"DurationUnit": "UHour",
"SimNumRun": 1,
"IsDefault": False,
"OverhaulDuration": 1200
"SimulationName": "Simulation OH Reliability Target",
"SchematicName": "- TJB - Unit 3 -",
"SimSeed": 1,
"SimDuration": sim_hours,
"DurationUnit": "UHour",
}
headers = {
@ -78,111 +74,77 @@ async def get_simulation_results(*, simulation_id: str, token: str):
"plant_result": plant_data
}
def calculate_asset_eaf_contributions(plant_result, eq_results, standard_scope, eaf_gap, scheduled_outage):
def calculate_asset_eaf_contributions(plant_result, eq_results, standard_scope, eaf_gap):
"""
Calculate each asset's contribution to plant EAF with realistic, fair improvement allocation.
The total EAF gap is distributed among assets proportionally to their contribution potential.
Automatically skips equipment with no unplanned downtime (only scheduled outages).
Calculate each asset's contribution to plant EAF with realistic improvement potential.
Ranking:
1. Highest contribution (Birnbaum Importance)
2. Tie-breaker: Contribution per downtime (efficiency)
"""
eaf_gap_fraction = eaf_gap / 100.0 if eaf_gap > 1.0 else eaf_gap
total_hours = plant_result.get("total_uptime") + plant_result.get("total_downtime")
plant_operating_fraction = (total_hours - scheduled_outage) / total_hours
REALISTIC_MAX_TECHNICAL = 0.995
REALISTIC_MAX_AVAILABILITY = REALISTIC_MAX_TECHNICAL * plant_operating_fraction
MIN_IMPROVEMENT_PERCENT = 0.0001
MIN_BIRNBAUM_IMPORTANCE = 0.0005
REALISTIC_MAX_AVAILABILITY = 0.995 # 99.5%
MIN_IMPROVEMENT_PERCENT = 0.005 # 0.5%
min_improvement_fraction = MIN_IMPROVEMENT_PERCENT / 100.0
EPSILON = 0.001 # 1 ms or a fraction of an hour for comparison tolerance
results = []
weighted_assets = []
# Step 1: Collect eligible assets and their weights
for asset in eq_results:
node = asset.get("aeros_node")
if not node:
continue
asset_name = node.get("node_name")
num_of_events = asset.get("num_events", 0)
asset_name = asset.get("aeros_node").get("node_name")
if asset_name not in standard_scope:
continue
contribution_factor = asset.get("contribution_factor", 0.0)
birbaum = asset.get("contribution", 0.0)
birnbaum = asset.get("contribution", 0.0)
current_availability = asset.get("availability", 0.0)
downtime = asset.get("total_downtime", 0.0)
# --- NEW: Skip equipment with no failures and near-maximum availability ---
if (
num_of_events < 2 # no unplanned events
or contribution_factor <= 0
):
# This equipment has nothing to improve realistically
# Filter 1: Importance too low
if birnbaum < MIN_BIRNBAUM_IMPORTANCE:
continue
# --- Compute realistic possible improvement ---
if REALISTIC_MAX_AVAILABILITY > current_availability:
max_possible_improvement = REALISTIC_MAX_AVAILABILITY - current_availability
else:
max_possible_improvement = 0.0 # No improvement possible
# Max possible availability improvement
max_possible_improvement = REALISTIC_MAX_AVAILABILITY - current_availability
if max_possible_improvement <= 0:
continue
# Required improvement (limited by plant gap and availability ceiling)
required_impr = min(eaf_gap_fraction, max_possible_improvement) * birnbaum
# Compute weighted importance (Birnbaum × FV)
raw_weight = birbaum
weight = math.sqrt(max(raw_weight, 0.0))
weighted_assets.append((asset, weight, 0))
# Step 2: Compute total weight
total_weight = sum(w for _, w, _ in weighted_assets) or 1.0
# Step 3: Distribute improvement proportionally to weight
for asset, weight, max_possible_improvement in weighted_assets:
node = asset.get("aeros_node")
contribution_factor = asset.get("contribution_factor", 0.0)
birbaum = asset.get("contribution", 0.0)
current_availability = asset.get("availability", 0.0)
downtime = asset.get("total_downtime", 0.0)
required_improvement = eaf_gap_fraction * (weight/total_weight)
required_improvement = min(required_improvement, max_possible_improvement)
required_improvement = max(required_improvement, min_improvement_fraction)
# Filter 2: Improvement too small
if required_impr < min_improvement_fraction:
continue
improvement_impact = required_improvement * contribution_factor
efficiency = birbaum / downtime if downtime > 0 else birbaum
# Contribution efficiency (secondary metric)
efficiency = birnbaum / downtime if downtime > 0 else birnbaum
contribution = AssetWeight(
node=node,
node=asset.get("aeros_node"),
availability=current_availability,
contribution=contribution_factor,
required_improvement=required_improvement,
improvement_impact=improvement_impact,
contribution=birnbaum,
required_improvement=required_impr,
num_of_failures=asset.get("num_events", 0),
down_time=downtime,
efficiency=efficiency,
birbaum=birbaum,
efficiency= efficiency
)
results.append(contribution)
# Step 4: Sort by Birnbaum importance
results.sort(key=lambda x: x.birbaum, reverse=True)
# Sort: 1) contribution (desc), 2) efficiency (desc)
results.sort(key=lambda x: (x.contribution, x.efficiency), reverse=True)
return results
def project_eaf_improvement(asset: AssetWeight, improvement_factor: float = 0.3) -> float:
"""
Project EAF improvement after maintenance
This is a simplified model - you should replace with your actual prediction logic
"""
current_downtime_pct = 100 - asset.eaf
# Assume maintenance reduces downtime by improvement_factor
improved_downtime_pct = current_downtime_pct * (1 - improvement_factor)
projected_eaf = 100 - improved_downtime_pct
return min(projected_eaf, 99.9) # Cap at 99.9%
@ -195,69 +157,24 @@ async def identify_worst_eaf_contributors(
oh_session_id: str,
collector_db: CollectorDbSession,
simulation_id: str,
duration: int,
po_duration: int,
cut_hours: float = 0, # new optional parameter: how many hours of planned outage user wants to cut
):
"""
Identify equipment that contributes most to plant EAF reduction,
evaluate if target EAF is physically achievable, and optionally
calculate the additional improvement if user cuts scheduled outage.
Identify equipment that contributes most to plant EAF reduction
in order to reach a target EAF.
"""
# Extract results
calc_result = simulation_result["calc_result"]
plant_result = simulation_result["plant_result"]
# Ensure list of equipment
eq_results = calc_result if isinstance(calc_result, list) else [calc_result]
# Base parameters
# Current plant EAF and gap
current_plant_eaf = plant_result.get("eaf", 0)
total_hours = duration
scheduled_outage = int(po_duration)
reduced_outage = max(scheduled_outage - cut_hours, 0)
max_eaf_possible = (total_hours - reduced_outage) / total_hours * 100
# Improvement purely from outage reduction (global)
scheduled_eaf_gain = (cut_hours / total_hours) * 100 if cut_hours > 0 else 0.0
# Target feasibility check
warning_message = None
if target_eaf > max_eaf_possible:
impossible_gap = target_eaf - max_eaf_possible
required_scheduled_hours = total_hours * (1 - target_eaf / 100)
required_reduction = reduced_outage - required_scheduled_hours
# Build dynamic phrase for clarity
if cut_hours > 0:
reduction_phrase = f" even after reducing planned outage by {cut_hours}h"
else:
reduction_phrase = ""
warning_message = (
f"⚠️ Target EAF {target_eaf:.2f}% exceeds theoretical maximum {max_eaf_possible:.2f}%"
f"{reduction_phrase}.\n"
f"To achieve it, planned outage must be further reduced by approximately "
f"{required_reduction:.1f} hours (from {reduced_outage:.0f}h → {required_scheduled_hours:.0f}h)."
)
# Cap target EAF to max achievable for calculation
target_eaf = max_eaf_possible
eaf_gap = (target_eaf - current_plant_eaf) / 100.0
if eaf_gap <= 0:
return OptimizationResult(
current_plant_eaf=current_plant_eaf,
target_plant_eaf=target_eaf,
possible_plant_eaf=current_plant_eaf,
eaf_gap=0,
warning_message=warning_message or "Target already achieved or exceeded.",
asset_contributions=[],
optimization_success=True,
simulation_id=simulation_id,
eaf_improvement_text=""
)
# Get standard scope (equipment in OH)
# Get standard scope (equipment allowed for overhaul/optimization)
standard_scope = await get_standard_scope_by_session_id(
db_session=db_session,
overhaul_session_id=oh_session_id,
@ -265,92 +182,43 @@ async def identify_worst_eaf_contributors(
)
standard_scope_location_tags = [tag.location_tag for tag in standard_scope]
# Compute contributions for reliability improvements
# Compute contributions
asset_contributions = calculate_asset_eaf_contributions(
plant_result, eq_results, standard_scope_location_tags, eaf_gap, reduced_outage
plant_result, eq_results, standard_scope_location_tags, eaf_gap=eaf_gap
)
# Greedy improvement allocation
project_eaf_improvement_total = 0.0
project_eaf_improvement = 0.0
selected_eq = []
# Greedy select until gap is closed
for asset in asset_contributions:
if project_eaf_improvement_total >= eaf_gap:
if project_eaf_improvement >= eaf_gap:
break
if (project_eaf_improvement_total + asset.improvement_impact) <= eaf_gap:
if (project_eaf_improvement + asset.required_improvement) <= eaf_gap:
selected_eq.append(asset)
project_eaf_improvement_total += asset.improvement_impact
project_eaf_improvement += asset.required_improvement
else:
# allow overshoot tolerance by skipping large ones, continue with smaller ones
continue
# Total EAF after improvements + optional outage cut
possible_eaf_plant = current_plant_eaf + project_eaf_improvement_total * 100 + scheduled_eaf_gain
possible_eaf_plant = min(possible_eaf_plant, max_eaf_possible)
selected_eq.sort(key=lambda x: x.birbaum, reverse=True)
required_cut_hours = 0
# --- 2. Optimization feasible but cannot reach target (underperformance case) ---
if possible_eaf_plant < target_eaf:
# Calculate shortfall
performance_gap = target_eaf - possible_eaf_plant
# Estimate how many scheduled outage hours must be reduced to close the remaining gap
# Each hour reduced adds (1 / total_hours) * 100 % to plant EAF
required_cut_hours = (performance_gap / 100) * total_hours
reliability_limit_msg = (
f"⚠️ Optimization was unable to reach target EAF {target_eaf:.2f}%.\n"
f"The best achievable EAF based on current reliability is "
f"{possible_eaf_plant:.2f}% (short by {performance_gap:.2f}%)."
)
# Add actionable recommendation
recommendation_msg = (
f"To achieve the target EAF, consider reducing planned outage by approximately "
f"{required_cut_hours:.1f} hours or {int(required_cut_hours/24)} days (from {reduced_outage:.0f}h → {reduced_outage - required_cut_hours:.0f}h)."
)
if warning_message:
warning_message = warning_message + "\n\n" + reliability_limit_msg + "\n" + recommendation_msg
else:
warning_message = reliability_limit_msg + "\n" + recommendation_msg
# --- EAF improvement reporting ---
eaf_improvement_points = (possible_eaf_plant - current_plant_eaf)
# Express as text for user readability
if eaf_improvement_points > 0:
improvement_text = f"{eaf_improvement_points:.6f} percentage points increase"
else:
improvement_text = "No measurable improvement achieved"
# Build result
# Build output with efficiency included
return OptimizationResult(
current_plant_eaf=current_plant_eaf,
target_plant_eaf=target_eaf,
possible_plant_eaf=possible_eaf_plant,
eaf_gap=eaf_gap,
warning_message=warning_message, # numeric
eaf_improvement_text=improvement_text,
recommended_reduced_outage=required_cut_hours,
asset_contributions=[
{
"node": asset.node,
"availability": asset.availability,
"contribution": asset.contribution,
"sensitivy": asset.birbaum,
"required_improvement": asset.required_improvement,
"system_impact": asset.improvement_impact,
"num_of_failures": asset.num_of_failures,
"down_time": asset.down_time,
"efficiency": asset.efficiency,
"efficiency": asset.efficiency,
}
for asset in selected_eq
],
outage_reduction_hours=cut_hours,
optimization_success=(current_plant_eaf + project_eaf_improvement_total * 100 + scheduled_eaf_gain)
>= target_eaf,
optimization_success=(current_plant_eaf + project_eaf_improvement) >= target_eaf,
simulation_id=simulation_id,
)
)

@ -1,10 +1,6 @@
import asyncio
from datetime import datetime, timedelta
import random
from typing import List, Optional
from temporalio.client import Client
from src.config import TEMPORAL_URL, TR_RBD_ID
def generate_down_periods(start_date: datetime, end_date: datetime,
num_periods: Optional[int] = None, min_duration: int = 3,
@ -56,36 +52,3 @@ def generate_down_periods(start_date: datetime, end_date: datetime,
down_periods.append((period_start, period_end))
return sorted(down_periods)
async def wait_for_workflow(simulation_id, max_retries=3):
workflow_id = f"simulation-{simulation_id}" # use returned ID
retries = 0
temporal_client = await Client.connect(TEMPORAL_URL)
while True:
try:
handle = temporal_client.get_workflow_handle(workflow_id=workflow_id)
desc = await handle.describe()
status = desc.status.name
if status not in ["RUNNING", "CONTINUED_AS_NEW"]:
print(f"✅ Workflow {workflow_id} finished with status: {status}")
break
print(f"⏳ Workflow {workflow_id} still {status}, checking again in 10s...")
except Exception as e:
retries += 1
if retries > max_retries:
print(f"⚠️ Workflow {workflow_id} not found after {max_retries} retries, treating as done. Error: {e}")
break
else:
print(f"⚠️ Workflow {workflow_id} not found (retry {retries}/{max_retries}), waiting 10s before retry...")
await asyncio.sleep(10)
continue
retries = 0 # reset retries if describe() worked
await asyncio.sleep(30)
return simulation_id

@ -7,7 +7,6 @@ from sqlalchemy import Select, func, select
from sqlalchemy.orm import joinedload
from src.auth.service import Token
from src.config import TC_RBD_ID
from src.database.core import DbSession
from src.overhaul_scope.service import get_all
from src.standard_scope.model import StandardScope
@ -21,7 +20,8 @@ from .service import (create_calculation_result_service, create_param_and_data,
get_avg_cost_by_asset,
get_calculation_by_reference_and_parameter,
get_calculation_data_by_id, get_calculation_result,
run_simulation_with_spareparts)
get_corrective_cost_time_chart,
get_overhaul_cost_by_time_chart, run_simulation, run_simulation_with_spareparts)
from src.database.core import CollectorDbSession
@ -86,25 +86,22 @@ async def create_calculation(
db_session: DbSession,
collector_db_session: CollectorDbSession,
calculation_time_constrains_in: CalculationTimeConstrainsParametersCreate,
created_by: str,
simulation_id
created_by: str
):
calculation_data = await create_param_and_data(
db_session=db_session,
calculation_param_in=calculation_time_constrains_in,
created_by=created_by,
)
rbd_simulation_id = simulation_id or TC_RBD_ID
# results = await create_calculation_result_service(
# db_session=db_session, calculation=calculation_data, token=token
# )
results = await run_simulation_with_spareparts(
db_session=db_session, calculation=calculation_data, token=token, collector_db_session=collector_db_session, simulation_id=rbd_simulation_id
db_session=db_session, calculation=calculation_data, token=token, collector_db_session=collector_db_session
)
return results
return results["id"]
async def get_or_create_scope_equipment_calculation(

@ -69,10 +69,6 @@ class CalculationData(Base, DefaultMixin, IdentityMixin):
optimum_oh_day = Column(Integer, nullable=True)
max_interval = Column(Integer, nullable=True)
rbd_simulation_id = Column(UUID(as_uuid=True), nullable=True)
optimum_analysis = Column(JSON, nullable=True)
session = relationship("OverhaulScope", lazy="raise")
@ -82,9 +78,7 @@ class CalculationData(Base, DefaultMixin, IdentityMixin):
"CalculationEquipmentResult", lazy="raise", viewonly=True
)
results = relationship("CalculationResult", lazy="raise", viewonly=True)
@classmethod
async def create_with_param(
@ -158,7 +152,6 @@ class CalculationEquipmentResult(Base, DefaultMixin):
optimum_day = Column(Integer, default=1)
is_included = Column(Boolean, default=True)
procurement_details = Column(JSON, nullable=True)
is_initial = Column(Boolean, default=True)
master_equipment = relationship(
"MasterEquipment",

@ -1,12 +1,9 @@
from typing import Annotated, List, Optional, Union
from typing import List, Optional, Union
from fastapi import APIRouter
from fastapi.params import Query
import requests
from src import config
from src.auth.service import CurrentUser, InternalKey, Token
from src.config import DEFAULT_TC_ID
from src.auth.service import CurrentUser, Token
from src.database.core import DbSession
from src.models import StandardResponse
@ -18,17 +15,16 @@ from .schema import (CalculationResultsRead,
CalculationTimeConstrainsParametersCreate,
CalculationTimeConstrainsParametersRead,
CalculationTimeConstrainsParametersRetrive,
CalculationTimeConstrainsRead, CreateCalculationQuery, EquipmentResult, CalculationTimeConstrainsReadNoResult)
CalculationTimeConstrainsRead, EquipmentResult)
from .service import (bulk_update_equipment, get_calculation_result,
get_calculation_result_by_day, get_calculation_by_assetnum, get_all_calculations)
get_calculation_result_by_day, get_calculation_by_assetnum)
from src.database.core import CollectorDbSession
router = APIRouter()
get_calculation = APIRouter()
@router.post(
"", response_model=StandardResponse[Union[dict, CalculationTimeConstrainsRead]]
"", response_model=StandardResponse[Union[str, CalculationTimeConstrainsRead]]
)
async def create_calculation_time_constrains(
token: Token,
@ -36,15 +32,10 @@ async def create_calculation_time_constrains(
collector_db_session: CollectorDbSession,
current_user: CurrentUser,
calculation_time_constrains_in: CalculationTimeConstrainsParametersCreate,
params: Annotated[CreateCalculationQuery, Query()],
# scope_calculation_id: Optional[str] = Query(None),
# with_results: Optional[int] = Query(0),
# simulation_id = Query(None)
scope_calculation_id: Optional[str] = Query(None),
with_results: Optional[int] = Query(0),
):
"""Save calculation time constrains Here"""
scope_calculation_id = params.scope_calculation_id
with_results = params.with_results
simulation_id = params.simulation_id
if scope_calculation_id:
results = await get_or_create_scope_equipment_calculation(
@ -59,32 +50,9 @@ async def create_calculation_time_constrains(
collector_db_session=collector_db_session,
calculation_time_constrains_in=calculation_time_constrains_in,
created_by=current_user.name,
simulation_id=simulation_id
)
return StandardResponse(data=results, message="Data created successfully")
@router.get(
"", response_model=StandardResponse[List[CalculationTimeConstrainsReadNoResult]]
)
async def get_all_simulation_calculations(
db_session: DbSession,
token: Token,
current_user: CurrentUser,
):
"""Get all calculation time constrains Here"""
calculations = await get_all_calculations(
db_session=db_session,
)
return StandardResponse(
data=calculations,
message="Data retrieved successfully",
)
return StandardResponse(data=str(results), message="Data created successfully")
@router.get(
@ -111,20 +79,13 @@ async def get_calculation_parameters(
)
@get_calculation.get(
@router.get(
"/{calculation_id}", response_model=StandardResponse[CalculationTimeConstrainsRead]
)
async def get_calculation_results(db_session: DbSession, calculation_id, token:InternalKey, include_risk_cost:int = Query(1, alias="risk_cost")):
if calculation_id == 'default':
calculation_id = DEFAULT_TC_ID
async def get_calculation_results(db_session: DbSession, calculation_id):
results = await get_calculation_result(
db_session=db_session, calculation_id=calculation_id, token=token, include_risk_cost=include_risk_cost
db_session=db_session, calculation_id=calculation_id
)
# requests.post(f"{config.AUTH_SERVICE_API}/sign-out", headers={
# "Authorization": f"Bearer {token}"
# })
return StandardResponse(
data=results,
@ -167,15 +128,12 @@ async def get_simulation_result(
)
@router.post("/update/{calculation_id}", response_model=StandardResponse[List[str]])
@router.put("/{calculation_id}", response_model=StandardResponse[List[str]])
async def update_selected_equipment(
db_session: DbSession,
calculation_id,
calculation_time_constrains_in: List[CalculationSelectedEquipmentUpdate],
):
if calculation_id == 'default':
calculation_id = "3b9a73a2-bde6-418c-9e2f-19046f501a05"
results = await bulk_update_equipment(
db=db_session,
selected_equipments=calculation_time_constrains_in,

@ -1,4 +1,3 @@
from src.overhaul_scope.schema import ScopeRead
from dataclasses import dataclass
from datetime import datetime
from typing import Any, Dict, List, Optional, Union
@ -105,18 +104,6 @@ class AnalysisMetadata(CalculationTimeConstrainsBase):
total_equipment_analyzed: int
included_in_optimization: int
class CalculationTimeConstrainsReadNoResult(CalculationTimeConstrainsBase):
id: UUID
created_at: datetime
optimum_oh_day: Optional[int]
max_interval: Optional[int]
optimum_analysis: Optional[dict]
session: ScopeRead
# optimum_oh_day: int
# max_interval: int
# optimal_analysis: dict
# analysis_metadata: dict
class CalculationTimeConstrainsRead(CalculationTimeConstrainsBase):
id: UUID
reference: UUID
@ -141,9 +128,3 @@ class CalculationTimeConstrainsSimulationRead(CalculationTimeConstrainsBase):
class CalculationSelectedEquipmentUpdate(CalculationTimeConstrainsBase):
is_included: bool
location_tag: str
name:str
class CreateCalculationQuery(DefultBase):
scope_calculation_id: Optional[str] = Field(None)
with_results: Optional[int] = Field(0)
simulation_id: Optional[UUID] = Field(None)

File diff suppressed because it is too large Load Diff

@ -1,10 +1,11 @@
import datetime
import json
import numpy as np
import pandas as pd
import requests
from src.config import RBD_SERVICE_API
from src.config import REALIBILITY_SERVICE_API
def get_months_between(start_date: datetime.datetime, end_date: datetime.datetime) -> int:
"""
@ -15,42 +16,39 @@ def get_months_between(start_date: datetime.datetime, end_date: datetime.datetim
return months
def create_time_series_data(chart_data, max_hours=None):
# Filter out ON_OH
filtered_data = [d for d in chart_data if d["currentEvent"] != "ON_OH"]
sorted_data = sorted(filtered_data, key=lambda x: x["cumulativeTime"])
def create_time_series_data(chart_data, max_hours=24096):
# Filter out data points with currentEvent = "ON_OH"
filtered_data = [data for data in chart_data if data['currentEvent'] != 'ON_OH']
# Sort filtered data by cumulative time
sorted_data = sorted(filtered_data, key=lambda x: x['cumulativeTime'])
if not sorted_data:
return []
hourly_data = []
current_state_index = 0
current_flow_rate = sorted_data[0]["flowRate"]
current_eq_status = sorted_data[0]["currentEQStatus"]
# Determine maximum bound (either given or from data)
last_time = int(sorted_data[-1]["cumulativeTime"])
if max_hours is None:
max_hours = last_time
for hour in range(0, max_hours + 1): # start from 0
# Advance state if needed
while (current_state_index < len(sorted_data) - 1 and
hour >= sorted_data[current_state_index + 1]["cumulativeTime"]):
current_flow_rate = sorted_data[0]['flowRate']
current_eq_status = sorted_data[0]['currentEQStatus']
for hour in range(1, max_hours + 1):
# Check if we need to advance to the next state
while (current_state_index < len(sorted_data) - 1 and
hour >= int(sorted_data[current_state_index + 1]['cumulativeTime'])):
current_state_index += 1
current_flow_rate = sorted_data[current_state_index]["flowRate"]
current_eq_status = sorted_data[current_state_index]["currentEQStatus"]
current_flow_rate = sorted_data[current_state_index]['flowRate']
current_eq_status = sorted_data[current_state_index]['currentEQStatus']
# Add hourly data point
hourly_data.append({
"cumulativeTime": hour,
"flowRate": current_flow_rate,
"currentEQStatus": current_eq_status
'cumulativeTime': hour,
'flowRate': current_flow_rate,
'currentEQStatus': current_eq_status
})
return hourly_data
def calculate_failures_per_month(hourly_data):
"""
Calculate the cumulative number of failures up to each month from hourly data.
@ -99,110 +97,96 @@ def calculate_failures_per_month(hourly_data):
return result
import pandas as pd
import datetime
import datetime
import pandas as pd
async def plant_simulation_metrics(simulation_id: str, location_tag: str, max_interval, token, last_oh_date, use_location_tag: int = 1):
"""Get failure predictions for equipment from simulation service"""
calc_result_url = f"{RBD_SERVICE_API}/aeros/simulation/result/calc/{simulation_id}/{location_tag}"
try:
response = requests.get(
calc_result_url,
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {token}",
},
timeout=30
)
response.raise_for_status()
prediction_data = response.json()['data']
except (requests.RequestException, ValueError) as e:
raise Exception(str(e))
return prediction_data
def analyze_monthly_metrics(timestamp_outs):
"""
Analyze time series data to calculate monthly metrics:
1. Failure count per month
2. Cumulative failure count each month
3. Total out-of-service time per month
4. Average flow rate per month
"""
def analyze_monthly_metrics(timestamp_outs, start_date, max_flow_rate: float = 550):
if not timestamp_outs:
# Check if timestamp_outs is None or empty
if timestamp_outs is None or not timestamp_outs:
# Return empty results with zero values
return {}
# Convert to DataFrame for easier manipulation
df = pd.DataFrame(timestamp_outs)
# Check if DataFrame is empty after creation
if df.empty:
return {}
# Check if required columns exist
required_columns = ['cumulativeTime', 'currentEQStatus', 'flowRate']
if not all(col in df.columns for col in required_columns):
missing_columns = [col for col in required_columns if col not in df.columns]
if missing_columns:
return {}
start_oh = datetime.datetime(start_date.year, start_date.month, start_date.day)
# Actual datetime from cumulative hours
df['datetime'] = df['cumulativeTime'].apply(lambda x: start_oh + datetime.timedelta(hours=x))
# Assuming the simulation starts from a reference date (you can modify this)
# For this example, I'll use January 1, 2024 as the start date
start_date = datetime.datetime(2025, 10, 22)
# Convert cumulative hours to actual datetime
df['datetime'] = df['cumulativeTime'].apply(
lambda x: start_date + datetime.timedelta(hours=x)
)
# Extract month-year for grouping
df['month_year'] = df['datetime'].dt.to_period('M')
# Duration until next timestamp
df['duration_hours'] = df['cumulativeTime'].shift(-1) - df['cumulativeTime']
df['duration_hours'] = df['duration_hours'].fillna(0)
# Failure detection
df['status_change'] = df['currentEQStatus'].shift() != df['currentEQStatus']
df['failure'] = (df['currentEQStatus'] == 'OoS') & df['status_change']
# Cumulative tracking
df['cumulative_failures'] = df['failure'].cumsum()
df['cumulative_oos'] = (df['duration_hours'] * (df['currentEQStatus'] == 'OoS')).cumsum()
# Derating calculation
# Derating = capacity reduction below max but not outage
df['derating'] = (max_flow_rate - df['flowRate']).clip(lower=0)
df['is_derated'] = (df['currentEQStatus'] == 'Svc') & (df['derating'] > 0)
# Equivalent Derated Hours (EFDH) → sum of derating * hours, then normalized by max capacity
df['derated_mwh'] = df['derating'] * df['duration_hours']
df['derated_hours_equivalent'] = df['derated_mwh'] / max_flow_rate
# Calculate time duration for each record (difference between consecutive cumulative times)
df['duration_hours'] = df['cumulativeTime'].diff().fillna(df['cumulativeTime'].iloc[0])
# Initialize results dictionary
monthly_results = {}
for month_period, group in df.groupby('month_year', sort=True):
# Track cumulative failures across all months
cumulative_failures = 0
cummulative_oos = 0
# Group by month-year and ensure chronological order
for month_period, group in df.groupby('month_year'):
month_str = str(month_period)
monthly_results[month_str] = {}
# Failures
monthly_results[month_str]['failures_count'] = int(group['failure'].sum())
monthly_results[month_str]['cumulative_failures'] = int(group['cumulative_failures'].max())
# OOS hours
oos_time = group.loc[group['currentEQStatus'] == 'OoS', 'duration_hours'].sum()
# 1. Count failures per month
# A failure is when currentEQStatus changes from "Svc" to "OoS"
status_changes = group['currentEQStatus'].shift() != group['currentEQStatus']
failures = ((group['currentEQStatus'] == 'OoS') & status_changes).sum()
monthly_results[month_str]['failures_count'] = int(failures)
# 2. Add failures to cumulative count
cumulative_failures += failures
monthly_results[month_str]['cumulative_failures'] = int(cumulative_failures)
# 3. Total out-of-service time per month (in hours)
oos_time = group[group['currentEQStatus'] == 'OoS']['duration_hours'].sum()
monthly_results[month_str]['total_oos_hours'] = float(oos_time)
monthly_results[month_str]['cummulative_oos'] = float(group['cumulative_oos'].max())
# Flow rate (weighted average)
cummulative_oos += oos_time
monthly_results[month_str]['cummulative_oos'] = float(cummulative_oos)
# 4. Average flow rate per month (weighted by duration)
# Calculate weighted average flow rate
total_flow_time = (group['flowRate'] * group['duration_hours']).sum()
total_time = group['duration_hours'].sum()
avg_flow_rate = total_flow_time / total_time if total_time > 0 else 0
monthly_results[month_str]['avg_flow_rate'] = float(avg_flow_rate)
# Extra metrics
# Additional useful metrics
monthly_results[month_str]['total_hours'] = float(total_time)
service_hours = group.loc[group['currentEQStatus'] == 'Svc', 'duration_hours'].sum()
monthly_results[month_str]['service_hours'] = float(service_hours)
monthly_results[month_str]['service_hours'] = float(
group[group['currentEQStatus'] == 'Svc']['duration_hours'].sum()
)
monthly_results[month_str]['availability_percentage'] = float(
(service_hours / total_time * 100) if total_time > 0 else 0
(monthly_results[month_str]['service_hours'] / total_time * 100) if total_time > 0 else 0
)
# Derating metrics
derating_hours = group.loc[group['is_derated'], 'duration_hours'].sum()
derated_mwh = group['derated_mwh'].sum()
equivalent_derated_hours = group['derated_hours_equivalent'].sum()
monthly_results[month_str]['derating_hours'] = float(derating_hours)
monthly_results[month_str]['derated_mwh'] = float(derated_mwh)
monthly_results[month_str]['equivalent_derated_hours'] = float(equivalent_derated_hours)
return monthly_results
def calculate_risk_cost_per_failure(monthly_results, birnbaum_importance, energy_price):
"""
Calculate risk cost per failure for each month based on:
@ -293,10 +277,272 @@ def get_monthly_risk_analysis(timestamp_outs, birnbaum_importance, energy_price)
'risk_cost_array': risk_analysis['risk_cost_per_failure_array']
}
# Usage example:
# birnbaum_importance = 0.85 # Example value
# energy_price = 100 # Example: $100 per unit
#
# results = get_monthly_risk_analysis(timestamp_outs, birnbaum_importance, energy_price)
# risk_cost_array = results['risk_cost_array']
# print("Risk cost per failure each month:", risk_cost_array)
def fetch_reliability(location_tags):
url = f"{REALIBILITY_SERVICE_API}/asset/batch"
resp = requests.get(url, json={"location_tags": location_tags})
resp.raise_for_status()
return resp.json().get("data", [])
import math
from scipy.stats import lognorm, norm
def get_reliability(distribution: str, params: dict, t: float) -> float:
d = (distribution or "").lower()
if d in ["weibull_2p", "weibull_3p"]:
eta = params.get("eta"); beta = params.get("beta"); gamma_ = params.get("gamma", 0)
if eta is None or beta is None: return 1.0
if t <= gamma_: return 1.0
return math.exp(-((t - gamma_) / eta) ** beta)
elif d in ["exponential", "exponential_2p"]:
lam = params.get("lambda") or params.get("Lambda")
if lam is None: return 1.0
return math.exp(-lam * t)
elif "lognormal" in d:
mu = params.get("mu"); sigma = params.get("sigma"); gamma_ = params.get("gamma", 0)
if mu is None or sigma is None: return 1.0
return 1 - lognorm.cdf(max(t-gamma_,0), s=sigma, scale=math.exp(mu))
elif "normal" in d:
mu = params.get("mu"); sigma = params.get("sigma")
if mu is None or sigma is None: return 1.0
return 1 - norm.cdf(t, loc=mu, scale=sigma)
elif "nhpp" in d:
eta = params.get("eta")
beta = params.get("beta")
lam = params.get("lambda", 1)
if eta is None or beta is None:
return 1.0
if t <= 0:
return 1.0 # at time 0, survival = 1
return math.exp(-(t / eta) ** beta)
else:
return 1.0
import numpy as np
def failures_per_month(distribution, params, mttr, design_flow_rate=100,
population=1, months=24, hours_per_month=720,
mode="expected", runs=1):
"""
Calculate monthly failures, cumulative failures, downtime, and avg flowrate.
- mode="expected": returns smooth fractional expected values.
- mode="simulate": returns integer values per run (stochastic).
- runs: number of Monte Carlo runs (only used if simulate).
- If simulate with runs>1, returns P50 (median) summary across runs.
"""
all_runs = []
for r in range(runs):
results = []
cumulative = 0
total_oos_hours = 0
for m in range(1, months+1):
t_start = (m-1) * hours_per_month
t_end = m * hours_per_month
R_start = get_reliability(distribution, params, t_start)
R_end = get_reliability(distribution, params, t_end)
# Probability of failure in this month
prob_failure = max(0.0, R_start - R_end)
if mode == "expected":
failures = population * prob_failure # fractional
else: # simulate
failures = np.random.binomial(population, prob_failure)
cumulative += failures
# Downtime (failures × MTTR)
oos_hours = failures * mttr
total_oos_hours += oos_hours
service_hours = hours_per_month - oos_hours
if service_hours < 0:
service_hours = 0
# Availability = service / total
availability = service_hours / hours_per_month
# Avg flowrate scaled
avg_flowrate = design_flow_rate * availability
results.append({
"month": m,
"failures": failures,
"cumulative_failures": cumulative,
"oos_hours": oos_hours,
"total_oos_hours": total_oos_hours,
"service_hours": service_hours,
"availability": availability,
"avg_flowrate": avg_flowrate
})
all_runs.append(results)
# === OUTPUTS ===
if mode == "expected" or runs == 1:
return all_runs[0] # smooth or single trajectory
# === Summarize multiple runs (return only P50 for each field) ===
summary = []
keys = ["failures", "cumulative_failures", "oos_hours",
"total_oos_hours", "service_hours", "availability", "avg_flowrate"]
total_oos_hours = 0
cumulative = 0
for m in range(months):
row = {"month": m+1}
for key in keys:
values = [r[m][key] for r in all_runs]
if key == 'failures':
failures = float(np.percentile(values, 90)) # P50 median
oos_hours = failures * mttr
total_oos_hours += oos_hours
service_hours = hours_per_month - oos_hours
availability = service_hours / hours_per_month
avg_flowrate = design_flow_rate * availability
cumulative += failures
summary.append({
"month": m,
"failures": failures,
"cumulative_failures": cumulative,
"oos_hours": oos_hours,
"total_oos_hours": total_oos_hours,
"service_hours": service_hours,
"availability": availability,
"avg_flowrate": avg_flowrate
})
return summary
import pandas as pd
def get_reliability_data(location_tags, months=24):
# 1. Fetch parameters
data = fetch_reliability(location_tags)
all_results = []
for asset in data:
distribution = asset.get("distribution")
params = asset.get("parameters", {})
mttr = 3
tag = asset.get("location_tag")
# 2. Predict monthly
results = failures_per_month(distribution, params, mttr, design_flow_rate, months=months)
# 3. Store with location_tag
for row in results:
row["location_tag"] = tag
all_results.append(row)
return all_results
import numpy as np
import math
def sample_failure_time(distribution, params):
"""Draw one failure time from the reliability distribution."""
d = (distribution or "").lower()
u = np.random.rand()
if d in ["weibull_2p", "weibull_3p"]:
eta = params.get("eta"); beta = params.get("beta"); gamma_ = params.get("gamma", 0)
if eta is None or beta is None: return np.inf
return gamma_ + eta * (-math.log(1-u))**(1/beta)
elif "exponential" in d or "exponential_2p" in d:
lam = params.get("lambda") or params.get("Lambda")
if lam is None: return np.inf
return -math.log(1-u) / lam
elif "lognormal" in d:
mu = params.get("mu"); sigma = params.get("sigma"); gamma_ = params.get("gamma", 0)
if mu is None or sigma is None: return np.inf
return gamma_ + np.random.lognormal(mean=mu, sigma=sigma)
elif "normal" in d:
mu = params.get("mu"); sigma = params.get("sigma")
if mu is None or sigma is None: return np.inf
return max(0, np.random.normal(mu, sigma))
else:
return np.inf
def simulate_failures(distribution, params, mttr, design_flow_rate=100,
population=1, months=24, hours_per_month=720,
runs=1000):
"""
Simulate failures over a given horizon using renewal process.
Always in stochastic mode, results aggregated to P50 across runs.
"""
horizon = months * hours_per_month
all_runs = []
for r in range(runs):
results = []
failures_by_month = [0] * months
for _ in range(population):
# First failure
t = sample_failure_time(distribution, params)
while t < horizon:
month_idx = int(t // hours_per_month)
if month_idx < months:
failures_by_month[month_idx] += 1
# Renewal: after repair (mttr), draw new TTF
t += mttr + sample_failure_time(distribution, params)
# Build results for this run
cumulative = 0
total_oos_hours = 0
for m in range(months):
failures = failures_by_month[m]
cumulative += failures
oos_hours = failures * mttr
total_oos_hours += oos_hours
service_hours = max(0, hours_per_month - oos_hours)
availability = service_hours / hours_per_month
avg_flowrate = design_flow_rate * availability
results.append({
"month": m+1,
"failures": failures,
"cumulative_failures": cumulative,
"oos_hours": oos_hours,
"total_oos_hours": total_oos_hours,
"service_hours": service_hours,
"availability": availability,
"avg_flowrate": avg_flowrate
})
all_runs.append(results)
# === Aggregate to P50 ===
summary = []
for m in range(months):
row = {"month": m+1}
for key in ["failures", "cumulative_failures", "oos_hours",
"total_oos_hours", "service_hours", "availability", "avg_flowrate"]:
values = [r[m][key] for r in all_runs]
row[key] = float(np.percentile(values, 50)) # median
summary.append(row)
return summary

@ -45,7 +45,7 @@ def get_config():
config = get_config()
LOG_LEVEL = config("LOG_LEVEL", default="INFO")
LOG_LEVEL = config("LOG_LEVEL", default=logging.WARNING)
ENV = config("ENV", default="local")
PORT = config("PORT", cast=int, default=8000)
HOST = config("HOST", default="localhost")
@ -83,12 +83,3 @@ MAXIMO_API_KEY = config("MAXIMO_API_KEY", default="keys")
AUTH_SERVICE_API = config("AUTH_SERVICE_API", default="http://192.168.1.82:8000/auth")
REALIBILITY_SERVICE_API = config("REALIBILITY_SERVICE_API", default="http://192.168.1.82:8000/reliability")
RBD_SERVICE_API = config("RBD_SERVICE_API", default="http://192.168.1.82:8000/rbd")
TEMPORAL_URL = config("TEMPORAL_URL", default="http://192.168.1.86:7233")
API_KEY = config("API_KEY", default="0KFvcB7zWENyKVjoma9FKZNofVSViEshYr59zEQNGaYjyUP34gCJKDuqHuk9VfvE")
TR_RBD_ID = config("TR_RBD_ID", default="f04f365e-25d8-4036-87c2-ba1bfe1f9229")
TC_RBD_ID = config("TC_RBD_ID", default="f8523cb0-dc3c-4edb-bcf1-eea7b62582f1")
DEFAULT_TC_ID = config("DEFAULT_TC_ID", default="44f483f3-bfe4-4094-a59f-b97a10f2fea6")

@ -1,42 +0,0 @@
from contextvars import ContextVar
from typing import Optional, Final
REQUEST_ID_CTX_KEY: Final[str] = "request_id"
USER_ID_CTX_KEY: Final[str] = "user_id"
USERNAME_CTX_KEY: Final[str] = "username"
ROLE_CTX_KEY: Final[str] = "role"
_request_id_ctx_var: ContextVar[Optional[str]] = ContextVar(REQUEST_ID_CTX_KEY, default=None)
_user_id_ctx_var: ContextVar[Optional[str]] = ContextVar(USER_ID_CTX_KEY, default=None)
_username_ctx_var: ContextVar[Optional[str]] = ContextVar(USERNAME_CTX_KEY, default=None)
_role_ctx_var: ContextVar[Optional[str]] = ContextVar(ROLE_CTX_KEY, default=None)
def get_request_id() -> Optional[str]:
return _request_id_ctx_var.get()
def set_request_id(request_id: str):
return _request_id_ctx_var.set(request_id)
def reset_request_id(token):
_request_id_ctx_var.reset(token)
def get_user_id() -> Optional[str]:
return _user_id_ctx_var.get()
def set_user_id(user_id: str):
return _user_id_ctx_var.set(user_id)
def get_username() -> Optional[str]:
return _username_ctx_var.get()
def set_username(username: str):
return _username_ctx_var.set(username)
def get_role() -> Optional[str]:
return _role_ctx_var.get()
def set_role(role: str):
return _role_ctx_var.set(role)

@ -256,12 +256,12 @@ def calculate_contribution_accurate(availabilities: Dict[str, float], structure_
key=lambda x: x[1]['birnbaum_importance'],
reverse=True)
# print("\n=== COMPONENT IMPORTANCE ANALYSIS ===")
# print(f"System Availability: {system_info['system_availability']:.6f} ({system_info['system_availability']*100:.4f}%)")
# print(f"System Unavailability: {system_info['system_unavailability']:.6f}")
# print("\nComponent Rankings (by Birnbaum Importance):")
# print(f"{'Component':<20} {'Availability':<12} {'Birnbaum':<12} {'Criticality':<12} {'F-V':<12} {'Contribution%':<12}")
# print("-" * 92)
print("\n=== COMPONENT IMPORTANCE ANALYSIS ===")
print(f"System Availability: {system_info['system_availability']:.6f} ({system_info['system_availability']*100:.4f}%)")
print(f"System Unavailability: {system_info['system_unavailability']:.6f}")
print("\nComponent Rankings (by Birnbaum Importance):")
print(f"{'Component':<20} {'Availability':<12} {'Birnbaum':<12} {'Criticality':<12} {'F-V':<12} {'Contribution%':<12}")
print("-" * 92)
for component, measures in sorted_components:
print(f"{component:<20} {measures['component_availability']:<12.6f} "

@ -1,6 +1,5 @@
# src/database.py
import re
import operator
from contextlib import asynccontextmanager
from typing import Annotated, Any
@ -51,7 +50,7 @@ class Base(DeclarativeBase):
def dict(self):
"""Returns a dict representation of a model."""
if hasattr(self, '__table__'):
return {c.name: operator.attrgetter(c.name)(self) for c in self.__table__.columns}
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
return {}
class CollectorBase(DeclarativeBase):
@ -62,7 +61,7 @@ class CollectorBase(DeclarativeBase):
def dict(self):
"""Returns a dict representation of a model."""
if hasattr(self, '__table__'):
return {c.name: operator.attrgetter(c.name)(self) for c in self.__table__.columns}
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
return {}
@asynccontextmanager

@ -1,22 +0,0 @@
from typing import List, Optional
from pydantic import Field
from src.models import DefultBase
class CommonParams(DefultBase):
# This ensures no extra query params are allowed
current_user: Optional[str] = Field(None, alias="currentUser")
page: int = Field(1, gt=0, lt=2147483647)
items_per_page: int = Field(5, gt=0, le=50, multiple_of=5, alias="itemsPerPage")
query_str: Optional[str] = Field(None, alias="q")
filter_spec: Optional[str] = Field(None, alias="filter")
sort_by: List[str] = Field(default_factory=list, alias="sortBy[]")
descending: List[bool] = Field(default_factory=list, alias="descending[]")
exclude: List[str] = Field(default_factory=list, alias="exclude[]")
all_params: int = Field(0, alias="all")
# Property to mirror your original return dict's bool conversion
@property
def is_all(self) -> bool:
return bool(self.all_params)

@ -1,5 +1,5 @@
import logging
from typing import Annotated, List, Type, TypeVar
from typing import Annotated, List
from fastapi import Depends, Query
from pydantic.types import Json, constr
@ -7,8 +7,6 @@ from sqlalchemy import Select, desc, func, or_
from sqlalchemy.exc import ProgrammingError
from sqlalchemy_filters import apply_pagination
from src.database.schema import CommonParams
from .core import DbSession
log = logging.getLogger(__name__)
@ -49,21 +47,6 @@ CommonParameters = Annotated[
Depends(common_parameters),
]
T = TypeVar("T", bound=CommonParams)
def get_params_factory(model_type: Type[T]):
async def wrapper(
db_session: DbSession,
params: Annotated[model_type, Query()] # type: ignore
):
res = params.model_dump()
return {
"db_session": db_session,
"all": params.is_all,
**res
}
return wrapper
def search(*, query_str: str, query: Query, model, sort=False):
"""Perform a search based on the query."""

@ -5,7 +5,7 @@ from sqlalchemy import Delete, Select, and_, text
from sqlalchemy.orm import selectinload
from src.auth.service import CurrentUser
from src.database.core import CollectorDbSession, DbSession
from src.database.core import DbSession
from src.database.service import CommonParameters, search_filter_sort_paginate
from .model import ScopeEquipmentPart
@ -16,203 +16,139 @@ from .schema import ScopeEquipmentActivityCreate, ScopeEquipmentActivityUpdate
# result = await db_session.get(ScopeEquipmentActivity, scope_equipment_activity_id)
# return result
from typing import Optional, List, Dict, Any
from sqlalchemy.ext.asyncio import AsyncSession as DbSession
from sqlalchemy.sql import text
import logging
logger = logging.getLogger(__name__)
# async def get_all(
# db_session: CollectorDbSession,
# location_tag: Optional[str] = None,
# start_year: int = 2023,
# end_year: Optional[int] = None,
# parent_wonum: Optional[str] = None
# ) -> List[Dict[str, Any]]:
# """
# Retrieve overhaul spare parts consumption data.
# Handles missing data, null parent WO, and query safety.
# Args:
# db_session: Async SQLAlchemy session
# location_tag: Optional location filter
# start_year: Year to start analysis (default 2023)
# end_year: Optional year to end analysis (default start_year + 1)
# parent_wonum: Parent work order number (required for context)
# Returns:
# List of dictionaries with spare part usage per overhaul WO.
# """
# # --- 1. Basic validation ---
# if not parent_wonum:
# logger.warning("Parent WO number not provided. Returning empty result.")
# return []
# if start_year < 1900 or (end_year and end_year < start_year):
# raise ValueError("Invalid year range provided.")
# if end_year is None:
# end_year = start_year + 1
# # --- 2. Build SQL safely ---
# base_query = """
# WITH filtered_wo AS (
# SELECT wonum, location_tag
# FROM public.wo_max
# WHERE worktype = 'OH'
# AND xx_parent = :parent_wonum
# """
# params = {
# "parent_wonum": parent_wonum,
# }
# if location_tag:
# base_query += " AND location_tag = :location_tag"
# params["location_tag"] = location_tag
# base_query += """
# ),
# filtered_materials AS (
# SELECT wonum, itemnum, itemqty, inv_curbaltotal, inv_avgcost
# FROM public.wo_max_material
# WHERE wonum IN (SELECT wonum FROM filtered_wo)
# )
# SELECT
# fwo.location_tag AS location_tag,
# fm.itemnum,
# spl.description AS sparepart_name,
# COALESCE(SUM(fm.itemqty), 0) AS parts_consumed_in_oh,
# COALESCE(AVG(fm.inv_avgcost), 0) AS avgcost,
# COALESCE(AVG(fm.inv_curbaltotal), 0) AS inv_curbaltotal
# FROM filtered_wo fwo
# INNER JOIN filtered_materials fm ON fwo.wonum = fm.wonum
# LEFT JOIN public.maximo_sparepart_pr_po_line spl ON fm.itemnum = spl.item_num
# GROUP BY fwo.location_tag, fm.itemnum, spl.description
# ORDER BY fwo.location_tag, fm.itemnum;
# """
# # --- 3. Execute query ---
# try:
# result = await db_session.execute(text(base_query), params)
# rows = result.fetchall()
# # Handle "no data found"
# if not rows:
# logger.info(f"No spare part data found for parent WO {parent_wonum}.")
# return []
# # --- 4. Map results cleanly ---
# equipment_parts = []
# for row in rows:
# try:
# equipment_parts.append({
# "location_tag": row.location_tag,
# "itemnum": row.itemnum,
# "sparepart_name": row.sparepart_name or "-",
# "parts_consumed_in_oh": float(row.parts_consumed_in_oh or 0),
# "avgcost": float(row.avgcost or 0),
# "inv_curbaltotal": float(row.inv_curbaltotal or 0)
# })
# except Exception as parse_err:
# logger.error(f"Failed to parse row {row}: {parse_err}")
# continue # Skip malformed rows
# return equipment_parts
# except Exception as e:
# logger.exception(f"Database query failed: {e}")
# raise RuntimeError("Failed to fetch overhaul spare parts data.") from e
from typing import List, Dict, Any, Optional
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.sql import text
def create_dummy_parts(assetnum: str, count: int = 5):
"""
Create a list of dummy ScopeEquipmentPart objects with random stock values.
Args:
assetnum (str): The base asset number to generate dummy parts for.
count (int): The number of parts to create. Default is 5.
Returns:
List[ScopeEquipmentPart]: A list of dummy ScopeEquipmentPart objects.
"""
parts = []
for i in range(1, count + 1):
# Generate a unique part asset number
part_assetnum = f"{assetnum}_PART_{i}"
stock = random.randint(1, 100) # Random stock value between 1 and 100
parts.append({"assetnum": part_assetnum, "stock": stock})
return parts
from sqlalchemy import text
from typing import Optional, List, Dict, Any
from datetime import datetime
async def get_all(
db_session: AsyncSession,
db_session: DbSession,
location_tag: Optional[str] = None,
start_year: int = 2023,
end_year: Optional[int] = None
) -> List[Dict[str, Any]]:
"""
Get overhaul spare parts consumption data with optimized query.
Get overhaul spare parts consumption data with optimized query
Args:
db_session: SQLAlchemy async database session
location_tag: Optional filter for location (asset_location)
start_year: Starting year (default: 2023)
end_year: Ending year (default: start_year + 1)
db_session: Database session
location_tag: Optional location filter
start_year: Year to start analysis (default: 2023)
end_year: Year to end analysis (default: start_year + 1)
Returns:
List of dictionaries with spare parts consumption data
List of dictionaries containing spare parts consumption data
"""
# Set default end year
# Set end year if not provided
if end_year is None:
end_year = start_year + 1
# Build query dynamically
query_str = """
# Build dynamic query
base_query = """
WITH filtered_wo AS (
SELECT DISTINCT wonum, asset_location, asset_unit
FROM public.wo_maximo ma
WHERE ma.xx_parent IN ('155026', '155027', '155029', '155030')
SELECT wonum, asset_location
FROM public.wo_staging_maximo_2
WHERE worktype = 'OH'
"""
params = {}
# Optional filter for location
# Add location filter to CTE if provided
if location_tag:
query_str += " AND asset_location = :location_tag"
base_query += " AND asset_location = :location_tag"
params["location_tag"] = location_tag
query_str += """
base_query += """
),
filtered_materials AS (
SELECT
mat.wonum,
mat.itemnum,
mat.itemqty,
mat.inv_curbaltotal AS inv_curbaltotal,
mat.inv_avgcost AS inv_avgcost
FROM public.wo_maximo_material AS mat
WHERE mat.wonum IN (SELECT wonum FROM filtered_wo)
filtered_transactions AS (
SELECT wonum, itemnum, curbal
FROM public.maximo_material_use_transactions
WHERE issuetype = 'ISSUE'
AND wonum IN (SELECT wonum FROM filtered_wo)
)
SELECT
fwo.asset_location AS location_tag,
ft.itemnum,
COALESCE(spl.description, 'Unknown') AS sparepart_name,
AVG(ft.itemqty) AS total_parts_used,
COALESCE(AVG(ft.inv_avgcost), 0) AS avg_cost,
COALESCE(AVG(ft.inv_curbaltotal), 0) AS avg_inventory_balance
FROM filtered_wo AS fwo
INNER JOIN filtered_materials AS ft
ON fwo.wonum = ft.wonum
LEFT JOIN public.maximo_sparepart_pr_po_line AS spl
ON ft.itemnum = spl.item_num
spl.description AS sparepart_name,
COUNT(*) AS parts_consumed_in_oh,
MIN(ft.curbal) AS min_remaining_balance,
MAX(mi.curbaltotal) AS inv_curbaltotal
FROM filtered_wo fwo
INNER JOIN filtered_transactions ft ON fwo.wonum = ft.wonum
INNER JOIN public.maximo_inventory mi ON ft.itemnum = mi.itemnum
LEFT JOIN public.maximo_sparepart_pr_po_line spl ON ft.itemnum = spl.item_num
GROUP BY fwo.asset_location, ft.itemnum, spl.description
ORDER BY fwo.asset_location, ft.itemnum;
ORDER BY fwo.asset_location, ft.itemnum
"""
query = text(base_query)
try:
result = await db_session.execute(text(query_str), params)
rows = result.fetchall()
results = await db_session.execute(query, params)
equipment_parts = []
for row in rows:
for row in results:
equipment_parts.append({
"location_tag": row.location_tag,
"itemnum": row.itemnum,
"sparepart_name": row.sparepart_name,
"parts_consumed_in_oh": float(row.total_parts_used or 0),
"avg_cost": float(row.avg_cost or 0),
"inv_curbaltotal": float(row.avg_inventory_balance or 0),
"parts_consumed_in_oh": row.parts_consumed_in_oh,
"min_remaining_balance": float(row.min_remaining_balance),
"inv_curbaltotal": float(row.inv_curbaltotal)
})
return equipment_parts
except Exception as e:
print(f"[get_all] Database query error: {e}")
# Log the error appropriately in your application
print(f"Database query error: {e}")
raise
# async def create(*, db_session: DbSession, scope_equipment_activty_in: ScopeEquipmentActivityCreate):
# activity = ScopeEquipmentActivity(
# **scope_equipment_activty_in.model_dump())
# db_session.add(activity)
# await db_session.commit()
# return activity
# async def update(*, db_session: DbSession, activity: ScopeEquipmentActivity, scope_equipment_activty_in: ScopeEquipmentActivityUpdate):
# """Updates a document."""
# data = scope_equipment_activty_in.model_dump()
# update_data = scope_equipment_activty_in.model_dump(exclude_defaults=True)
# for field in data:
# if field in update_data:
# setattr(activity, field, update_data[field])
# await db_session.commit()
# return activity
# async def delete(*, db_session: DbSession, scope_equipment_activity_id: str):
# """Deletes a document."""
# activity = await db_session.get(ScopeEquipmentActivity, scope_equipment_activity_id)
# await db_session.delete(activity)
# await db_session.commit()

@ -40,7 +40,7 @@ async def create_scope_equipment_jobs(
)
@router.post("/delete/{scope_job_id}", response_model=StandardResponse[None])
@router.delete("/{scope_job_id}", response_model=StandardResponse[None])
async def delete_scope_equipment_job(db_session: DbSession, scope_job_id):
await delete(db_session=db_session, scope_job_id=scope_job_id)

@ -14,11 +14,7 @@ from sqlalchemy.exc import (DataError, DBAPIError, IntegrityError,
SQLAlchemyError)
from src.enums import ResponseStatus
from src.auth.service import notify_admin_on_rate_limit_sync
from starlette.exceptions import HTTPException as StarletteHTTPException
log = logging.getLogger(__name__)
class ErrorDetail(BaseModel):
field: Optional[str] = None
@ -61,7 +57,7 @@ def get_request_context(request: Request):
return {
"endpoint": request.url.path,
"url": str(request.url),
"url": request.url,
"method": request.method,
"remote_addr": get_client_ip(),
}
@ -71,34 +67,32 @@ def handle_sqlalchemy_error(error: SQLAlchemyError):
"""
Handle SQLAlchemy errors and return user-friendly error messages.
"""
try:
original_error = error.orig
except AttributeError:
original_error = None
original_error = getattr(error, "orig", None)
print(original_error)
if isinstance(error, IntegrityError):
if "unique constraint" in str(error).lower():
return "This record already exists.", 422
return "This record already exists.", 409
elif "foreign key constraint" in str(error).lower():
return "Related record not found.", 422
return "Related record not found.", 400
else:
return "Data integrity error.", 422
return "Data integrity error.", 400
elif isinstance(error, DataError) or isinstance(original_error, AsyncPGDataError):
return "Invalid data provided.", 422
return "Invalid data provided.", 400
elif isinstance(error, DBAPIError):
if "unique constraint" in str(error).lower():
return "This record already exists.", 422
return "This record already exists.", 409
elif "foreign key constraint" in str(error).lower():
return "Related record not found.", 422
return "Related record not found.", 400
elif "null value in column" in str(error).lower():
return "Required data missing.", 422
return "Required data missing.", 400
elif "invalid input for query argument" in str(error).lower():
return "Invalid data provided.", 422
return "Invalid data provided.", 400
else:
return "Database error.", 500
else:
# Log the full error for debugging purposes
log.error(f"Unexpected database error: {str(error)}")
logging.error(f"Unexpected database error: {str(error)}")
return "An unexpected database error occurred.", 500
@ -106,100 +100,31 @@ def handle_exception(request: Request, exc: Exception):
"""
Global exception handler for Fastapi application.
"""
import uuid
error_id = str(uuid.uuid1())
request_info = get_request_context(request)
# Store error_id in request.state for middleware/logging
request.state.error_id = error_id
if isinstance(exc, RateLimitExceeded):
# Kirim notifikasi ke admin
notify_admin_on_rate_limit_sync(
endpoint_name=request_info["endpoint"],
ip_address=request_info["remote_addr"],
method=request_info["method"],
)
log.warning(
f"Rate limit exceeded: {str(exc.description) if hasattr(exc, 'description') else str(exc)} | Error ID: {error_id}",
extra={
"error_id": error_id,
"error_category": "rate_limit",
"detail": str(exc.description) if hasattr(exc, "description") else str(exc),
"request": request_info,
},
)
return JSONResponse(
status_code=429,
content={
"data": None,
"message": "Rate limit exceeded",
"status": ResponseStatus.ERROR,
"error_id": error_id
}
)
if isinstance(exc, RequestValidationError):
log.warning(
f"Validation error: {exc.errors()} | Error ID: {error_id}",
extra={
"error_id": error_id,
"error_category": "validation",
"errors": exc.errors(),
"request": request_info,
},
)
return JSONResponse(
status_code=422,
content={
"data": exc.errors(),
"message": "Validation Error",
"status": ResponseStatus.ERROR,
"error_id": error_id
},
)
if isinstance(exc, (HTTPException, StarletteHTTPException)):
# Log as warning for 4xx, error for 5xx
status_code = exc.status_code if hasattr(exc, "status_code") else 500
detail = exc.detail if hasattr(exc, "detail") else str(exc)
log_level = logging.WARNING if 400 <= status_code < 500 else logging.ERROR
log.log(
log_level,
f"HTTP {status_code}: {detail} | Error ID: {error_id}",
extra={
"error_id": error_id,
"error_category": "http",
"status_code": status_code,
"request": request_info,
},
_rate_limit_exceeded_handler(request, exc)
if isinstance(exc, HTTPException):
logging.error(
f"HTTP exception | Code: {exc.status_code} | Error: {exc.detail} | Request: {request_info}",
extra={"error_category": "http"},
)
return JSONResponse(
status_code=status_code,
status_code=exc.status_code,
content={
"data": None,
"message": str(detail),
"message": str(exc.detail),
"status": ResponseStatus.ERROR,
"error_id": error_id
"errors": [ErrorDetail(message=str(exc.detail)).model_dump()],
},
)
if isinstance(exc, SQLAlchemyError):
error_message, status_code = handle_sqlalchemy_error(exc)
# Log integrity errors as warning, others as error
log_level = logging.WARNING if 400 <= status_code < 500 else logging.ERROR
log.log(
log_level,
f"Database error: {error_message} | Error ID: {error_id}",
extra={
"error_id": error_id,
"error_category": "database",
"violation": str(exc).split('\n')[0],
"request": request_info,
},
logging.error(
f"Database Error | Error: {str(error_message)} | Request: {request_info}",
extra={"error_category": "database"},
)
return JSONResponse(
@ -208,27 +133,24 @@ def handle_exception(request: Request, exc: Exception):
"data": None,
"message": error_message,
"status": ResponseStatus.ERROR,
"error_id": error_id
"errors": [ErrorDetail(message=error_message).model_dump()],
},
)
# Log unexpected errors
log.error(
f"Unexpected error: {str(exc)} | Error ID: {error_id}",
extra={
"error_id": error_id,
"error_category": "unexpected",
"request": request_info,
},
logging.error(
f"Unexpected Error | Error: {str(exc)} | Request: {request_info}",
extra={"error_category": "unexpected"},
)
return JSONResponse(
status_code=500,
content={
"data": None,
"message": "An unexpected error occurred",
"message": str(exc),
"status": ResponseStatus.ERROR,
"error_id": error_id
"errors": [
ErrorDetail(message="An unexpected error occurred.").model_dump()
],
},
)

@ -1,27 +1,11 @@
import logging
import json
import datetime
import os
import sys
from typing import Optional
from src.config import LOG_LEVEL
from src.enums import OptimumOHEnum
LOG_FORMAT_DEBUG = "%(levelname)s:%(message)s:%(pathname)s:%(funcName)s:%(lineno)d"
# ANSI Color Codes
RESET = "\033[0m"
COLORS = {
"DEBUG": "\033[36m", # Cyan
"INFO": "\033[32m", # Green
"WARNING": "\033[33m", # Yellow
"WARN": "\033[33m", # Yellow
"ERROR": "\033[31m", # Red
"CRITICAL": "\033[1;31m", # Bold Red
}
class LogLevels(OptimumOHEnum):
info = "INFO"
@ -30,104 +14,32 @@ class LogLevels(OptimumOHEnum):
debug = "DEBUG"
class JSONFormatter(logging.Formatter):
"""
Custom formatter to output logs in JSON format.
"""
def format(self, record):
from src.context import get_request_id, get_user_id, get_username, get_role
request_id = None
user_id = None
username = None
role = None
try:
request_id = get_request_id()
user_id = get_user_id()
username = get_username()
role = get_role()
except Exception:
pass
# Standard fields from requirements
log_record = {
"timestamp": datetime.datetime.fromtimestamp(record.created).strftime("%Y-%m-%d %H:%M:%S"),
"level": record.levelname,
"message": record.getMessage(),
}
# Add Context information if available
if request_id:
log_record["request_id"] = request_id
# Add Error context if available
if hasattr(record, "error_id"):
log_record["error_id"] = record.error_id
elif "error_id" in record.__dict__:
log_record["error_id"] = record.error_id
if user_id:
log_record["user_id"] = user_id
# Add any extra attributes passed to the log call
standard_attrs = {
"args", "asctime", "created", "exc_info", "exc_text", "filename",
"funcName", "levelname", "levelno", "lineno", "module", "msecs",
"message", "msg", "name", "pathname", "process", "processName",
"relativeCreated", "stack_info", "thread", "threadName", "error_id"
}
for key, value in record.__dict__.items():
if key not in standard_attrs and not key.startswith("_"):
log_record[key] = value
log_json = json.dumps(log_record)
# Apply color if the output is a terminal
if sys.stdout.isatty():
level_color = COLORS.get(record.levelname, "")
return f"{level_color}{log_json}{RESET}"
return log_json
def configure_logging():
log_level = str(LOG_LEVEL).upper() # cast to string
log_levels = list(LogLevels)
if log_level not in log_levels:
log_level = LogLevels.error
# we use error as the default log level
logging.basicConfig(level=LogLevels.error)
return
# Get the root logger
root_logger = logging.getLogger()
root_logger.setLevel(log_level)
if log_level == LogLevels.debug:
logging.basicConfig(level=log_level, format=LOG_FORMAT_DEBUG)
return
# Clear existing handlers to avoid duplicate logs
if root_logger.hasHandlers():
root_logger.handlers.clear()
logging.basicConfig(level=log_level)
# Create a stream handler that outputs to stdout
handler = logging.StreamHandler(sys.stdout)
# Use JSONFormatter for all environments, or could be conditional
# For now, let's assume the user wants JSON everywhere as requested
formatter = JSONFormatter()
# If debug mode is specifically requested and we want the old format for debug:
# if log_level == LogLevels.debug:
# formatter = logging.Formatter(LOG_FORMAT_DEBUG)
handler.setFormatter(formatter)
root_logger.addHandler(handler)
# Reconfigure uvicorn loggers to use our JSON formatter
for logger_name in ["uvicorn", "uvicorn.access", "uvicorn.error", "fastapi"]:
logger = logging.getLogger(logger_name)
logger.handlers = []
logger.propagate = True
# sometimes the slack client can be too verbose
logging.getLogger("slack_sdk.web.base_client").setLevel(logging.CRITICAL)
# Silence the chatty uvicorn access logs as we have custom middleware logging
logging.getLogger("uvicorn.access").setLevel(logging.WARNING)
# sometimes the slack client can be too verbose
logging.getLogger("slack_sdk.web.base_client").setLevel(logging.CRITICAL)
def setup_logging(logger):
# Your logging configuration here
logger.setLevel(logging.DEBUG)
# Create formatter
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# Create console handler
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)

@ -1,6 +1,5 @@
import logging
import time
from src.context import set_request_id, reset_request_id, get_request_id
from contextvars import ContextVar
from os import path
from typing import Final, Optional
@ -28,38 +27,37 @@ from src.database.core import async_session, engine, async_collector_session
from src.enums import ResponseStatus
from src.exceptions import handle_exception
from src.logging import configure_logging
from src.middleware import RequestValidationMiddleware
from src.rate_limiter import limiter
from fastapi.exceptions import RequestValidationError
from starlette.exceptions import HTTPException as StarletteHTTPException
log = logging.getLogger(__name__)
# we configure the logging level and format
configure_logging()
# we define the exception handlers
exception_handlers = {Exception: handle_exception}
# we create the ASGI for the app
app = FastAPI(
exception_handlers=exception_handlers,
openapi_url="",
title="LCCA API",
description="Welcome to LCCA's API documentation!",
version="0.1.0",
)
# we define the exception handlers
app.add_exception_handler(Exception, handle_exception)
app.add_exception_handler(HTTPException, handle_exception)
app.add_exception_handler(StarletteHTTPException, handle_exception)
app.add_exception_handler(RequestValidationError, handle_exception)
app.add_exception_handler(RateLimitExceeded, handle_exception)
app.state.limiter = limiter
app.add_middleware(GZipMiddleware, minimum_size=1000)
app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)
app.add_middleware(GZipMiddleware, minimum_size=2000)
# credentials: "include",
REQUEST_ID_CTX_KEY: Final[str] = "request_id"
_request_id_ctx_var: ContextVar[Optional[str]] = ContextVar(
REQUEST_ID_CTX_KEY, default=None
)
def get_request_id() -> Optional[str]:
return _request_id_ctx_var.get()
def security_headers_middleware(app: FastAPI):
is_production = False
@ -122,19 +120,13 @@ def security_headers_middleware(app: FastAPI):
security_headers_middleware(app)
app.add_middleware(RequestValidationMiddleware)
@app.middleware("http")
async def db_session_middleware(request: Request, call_next):
request_id = str(uuid1())
# we create a per-request id such that we can ensure that our session is scoped for a particular request.
# see: https://github.com/tiangolo/fastapi/issues/726
ctx_token = set_request_id(request_id)
ctx_token = _request_id_ctx_var.set(request_id)
try:
session = async_scoped_session(async_session, scopefunc=get_request_id)
@ -143,68 +135,14 @@ async def db_session_middleware(request: Request, call_next):
collector_session = async_scoped_session(async_collector_session, scopefunc=get_request_id)
request.state.collector_db = collector_session()
start_time = time.time()
response = await call_next(request)
process_time = (time.time() - start_time) * 1000
# Skip logging in middleware if it's an error (already logged in handle_exception)
if response.status_code >= 400:
return response
from src.context import get_username, get_role, get_user_id, set_user_id, set_username, set_role
# Pull from context or fallback to request.state.user
username = get_username()
role = get_role()
user_id = get_user_id()
user_obj = getattr(request.state, "user", None)
if user_obj:
# message is UserBase dict/obj in this project
if isinstance(user_obj, dict):
u_id = user_obj.get("user_id")
u_name = user_obj.get("name") or user_obj.get("username")
u_role = user_obj.get("role")
else:
u_id = getattr(user_obj, "user_id", None)
u_name = getattr(user_obj, "name", None) or getattr(user_obj, "username", None)
u_role = getattr(user_obj, "role", None)
if not user_id and u_id:
user_id = str(u_id)
set_user_id(user_id)
if not username and u_name:
username = u_name
set_username(username)
if not role and u_role:
role = u_role
set_role(role)
user_info_str = ""
if user_id:
user_info_str = f" | User ID: {user_id}"
error_id = getattr(request.state, "error_id", None)
log_msg = f"HTTP {request.method} {request.url.path} completed in {round(process_time, 2)}ms{user_info_str}"
if error_id:
log_msg += f" | Error ID: {error_id}"
log.info(
log_msg,
extra={
"method": request.method,
"path": request.url.path,
"status_code": response.status_code,
"duration_ms": round(process_time, 2),
"user_id": user_id,
"error_id": error_id,
},
)
except Exception as e:
raise e from None
finally:
await request.state.db.close()
await request.state.collector_db.close()
reset_request_id(ctx_token)
_request_id_ctx_var.reset(ctx_token)
return response

@ -1,332 +1,79 @@
from datetime import datetime
from typing import Optional, Union
from sqlalchemy import select, func, cast, Numeric, text
from sqlalchemy import select, func, cast, Numeric
from sqlalchemy.orm import Session
from sqlalchemy import and_
from sqlalchemy.sql import not_
from src.maximo.model import WorkOrderData # Assuming this is where your model is
from src.database.core import CollectorDbSession, DbSession
from src.overhaul_scope.model import OverhaulScope
from src.database.core import CollectorDbSession
async def get_cm_cost_summary(collector_db: CollectorDbSession, last_oh_date:datetime, upcoming_oh_date:datetime):
query = text("""WITH part_costs AS (
SELECT
mu.wonum,
SUM(mu.itemqty * COALESCE(inv.avgcost, po.unit_cost, 0)) AS parts_total_cost
FROM maximo_workorder_materials mu
LEFT JOIN maximo_inventory inv
ON mu.itemnum = inv.itemnum
LEFT JOIN (
SELECT item_num, AVG(unit_cost) AS unit_cost
FROM maximo_sparepart_pr_po_line
GROUP BY item_num
) po
ON mu.itemnum = po.item_num
GROUP BY mu.wonum
),
wo_costs AS (
SELECT
w.wonum,
w.asset_location,
(COALESCE(w.mat_cost_max, 0) + COALESCE(pc.parts_total_cost, 0)) AS total_wo_cost
FROM wo_staging_maximo_2 w
LEFT JOIN part_costs pc
ON w.wonum = pc.wonum
WHERE
w.worktype IN ('CM', 'EM', 'PROACTIVE')
AND w.asset_system IN (
'HPB','AH','APC','SCR','CL','DM','CRH','ASH','BAD','DS','WTP',
'MT','SUP','DCS','FF','EG','AI','SPS','EVM','SCW','KLH','CH',
'TUR','LOT','HRH','ESP','CAE','GMC','BFT','LSH','CHB','BSS',
'LOS','LPB','SAC','CP','EHS','RO','GG','MS','CW','SO','ATT',
'AFG','EHB','RP','FO','PC','APE','AF','DMW','BRS','GEN','ABS',
'CHA','TR','H2','BDW','LOM','ACR','AL','FW','COND','CCCW','IA',
'GSS','BOL','SSB','CO','OA','CTH-UPD','AS','DP'
query = select(
WorkOrderData.location,
(func.sum(WorkOrderData.total_cost_max).cast(Numeric) / func.count(WorkOrderData.wonum)).label('avg_cost')
).where(
and_(
# WorkOrderData.wo_start >= last_oh_date,
# WorkOrderData.wo_start <= upcoming_oh_date,
WorkOrderData.worktype.in_(['CM', 'EM', 'PROACTIVE']),
WorkOrderData.system_tag.in_(['HPB', 'AH', 'APC', 'SCR', 'CL', 'DM', 'CRH', 'ASH', 'BAD', 'DS', 'WTP',
'MT', 'SUP', 'DCS', 'FF', 'EG', 'AI', 'SPS', 'EVM', 'SCW', 'KLH', 'CH',
'TUR', 'LOT', 'HRH', 'ESP', 'CAE', 'GMC', 'BFT', 'LSH', 'CHB', 'BSS',
'LOS', 'LPB', 'SAC', 'CP', 'EHS', 'RO', 'GG', 'MS', 'CW', 'SO', 'ATT',
'AFG', 'EHB', 'RP', 'FO', 'PC', 'APE', 'AF', 'DMW', 'BRS', 'GEN', 'ABS',
'CHA', 'TR', 'H2', 'BDW', 'LOM', 'ACR', 'AL', 'FW', 'COND', 'CCCW', 'IA',
'GSS', 'BOL', 'SSB', 'CO', 'OA', 'CTH-UPD', 'AS', 'DP']),
WorkOrderData.reportdate.is_not(None),
WorkOrderData.actstart.is_not(None),
WorkOrderData.actfinish.is_not(None),
WorkOrderData.unit.in_([3, 0]),
WorkOrderData.reportdate >= datetime.strptime('2015-01-01', '%Y-%m-%d'),
not_(WorkOrderData.wonum.like('T%'))
)
AND w.reportdate IS NOT NULL
AND w.actstart IS NOT NULL
AND w.actfinish IS NOT NULL
AND w.asset_unit IN ('3','00')
AND w.reportdate >= '2015-01-01'
AND w.wonum NOT LIKE 'T%'
),
-- find max cost per location
location_max AS (
SELECT asset_location, MAX(total_wo_cost) AS max_cost
FROM wo_costs
WHERE total_wo_cost > 0
GROUP BY asset_location
),
-- filter WO costs to only reasonable range (e.g. >0 and >=10% of max)
filtered_wo AS (
SELECT w.*
FROM wo_costs w
JOIN location_max lm ON w.asset_location = lm.asset_location
WHERE w.total_wo_cost > 0
)
SELECT
asset_location,
SUM(total_wo_cost)::numeric / COUNT(wonum) AS avg_cost
FROM filtered_wo
GROUP BY asset_location
ORDER BY avg_cost DESC;
""")
results = await collector_db.execute(query)
data = []
for row in results:
data.append({
"location_tag": row.asset_location,
"avg_cost": row.avg_cost
})
).group_by(
WorkOrderData.location
).order_by(
func.count(WorkOrderData.wonum).desc()
)
result = await collector_db.execute(query)
data = result.all()
return {
item["location_tag"]: item["avg_cost"] for item in data
data.location: data.avg_cost for data in data
}
# async def get_oh_cost_summary(collector_db: CollectorDbSession, last_oh_date:datetime, upcoming_oh_date:datetime):
# query = text("""
# WITH target_wo AS (
# -- Get work orders under a specific parent(s)
# SELECT
# wonum,
# xx_parent,
# assetnum,
# location_tag AS asset_location,
# actmatcost,
# actservcost,
# reportdate
# FROM public.wo_maxim
# WHERE xx_parent = ANY(:parent_nums)
# ),
# part_costs AS (
# -- Calculate parts cost per WO if actmatcost = 0
# SELECT
# wm.wonum,
# SUM(
# wm.itemqty *
# COALESCE(wm.inv_avgcost, po.unit_cost, 0)
# ) AS parts_total_cost
# FROM public.wo_maxim_material wm
# LEFT JOIN (
# SELECT item_num, AVG(unit_cost) AS unit_cost
# FROM public.maximo_sparepart_pr_po_line
# GROUP BY item_num
# ) po ON wm.itemnum = po.item_num
# WHERE wm.itemnum IS NOT NULL
# GROUP BY wm.wonum
# ),
# wo_costs AS (
# SELECT
# w.wonum,
# w.asset_location,
# CASE
# WHEN COALESCE(w.actmatcost, 0) > 0 THEN COALESCE(w.actmatcost, 0)
# ELSE COALESCE(pc.parts_total_cost, 0)
# END AS material_cost,
# COALESCE(w.actservcost, 0) AS service_cost
# FROM target_wo w
# LEFT JOIN part_costs pc ON w.wonum = pc.wonum
# )
# SELECT
# asset_location,
# ROUND(SUM(material_cost + service_cost)::numeric / COUNT(wonum), 2) AS avg_cost,
# COUNT(wonum) AS total_wo_count
# FROM wo_costs
# GROUP BY asset_location
# ORDER BY total_wo_count DESC;
# """)
# parent_nums = []
# result = await collector_db.execute(query, {"parent_nums": parent_nums})
# data = []
# for row in result:
# data.append({
# "location_tag": row.asset_location,
# "avg_cost": float(row.avg_cost or 0.0),
# "total_wo_count": row.total_wo_count,
# })
# return {item["location_tag"]: item["avg_cost"] for item in data}
async def get_oh_cost_summary(collector_db: CollectorDbSession, last_oh_date:datetime, upcoming_oh_date:datetime):
# query = text("""
# WITH part_costs AS (
# SELECT
# wm.wonum,
# SUM(wm.itemqty * COALESCE(wm.inv_avgcost, po.unit_cost, 0)) AS parts_total_cost
# FROM public.wo_maxim_material wm
# LEFT JOIN (
# SELECT item_num, AVG(unit_cost) AS unit_cost
# FROM public.maximo_sparepart_pr_po_line
# GROUP BY item_num
# ) po ON wm.itemnum = po.item_num
# WHERE wm.itemnum IS NOT NULL
# GROUP BY wm.wonum
# ),
# wo_costs AS (
# SELECT
# w.wonum,
# w.asset_location,
# -- Use mat_cost_max if parts_total_cost = 0
# CASE
# WHEN COALESCE(pc.parts_total_cost, 0) = 0 THEN COALESCE(w.mat_cost_max , 0)
# ELSE COALESCE(pc.parts_total_cost, 0)
# END AS total_wo_cost
# FROM wo_staging_maximo_2 w
# LEFT JOIN part_costs pc
# ON w.wonum = pc.wonum
# WHERE
# w.worktype = 'OH'
# AND w.reportdate IS NOT NULL
# AND w.actstart IS NOT NULL
# AND w.actfinish IS NOT NULL
# AND w.asset_unit IN ('3', '00')
# AND w.wonum NOT LIKE 'T%'
# )
# SELECT
# asset_location,
# AVG(total_wo_cost) AS avg_cost
# FROM wo_costs
# GROUP BY asset_location
# ORDER BY COUNT(wonum) DESC;
# """)
query = text("""
WITH part_costs AS (
SELECT
wm.wonum,
SUM(wm.itemqty * COALESCE(inv.avgcost, po.unit_cost, 0)) AS parts_total_cost
FROM public.maximo_workorder_materials wm
JOIN public.maximo_inventory AS inv on inv.itemnum = wm.itemnum
LEFT JOIN (
SELECT item_num, AVG(unit_cost) AS unit_cost
FROM public.maximo_sparepart_pr_po_line
GROUP BY item_num
) po ON wm.itemnum = po.item_num
WHERE wm.itemnum IS NOT NULL
GROUP BY wm.wonum
),
wo_costs AS (
SELECT
w.wonum,
w.asset_location,
-- Use mat_cost_max if parts_total_cost = 0
CASE
WHEN COALESCE(pc.parts_total_cost, 0) = 0 THEN COALESCE(w.mat_cost_max , 0)
ELSE COALESCE(pc.parts_total_cost, 0)
END AS total_wo_cost
FROM wo_staging_maximo_2 w
LEFT JOIN part_costs pc
ON w.wonum = pc.wonum
WHERE
w.worktype = 'OH'
AND w.reportdate IS NOT NULL
AND w.actstart IS NOT NULL
AND w.actfinish IS NOT NULL
AND w.asset_unit IN ('3', '00')
AND w.wonum NOT LIKE 'T%'
query = select(
WorkOrderData.location,
(func.sum(WorkOrderData.total_cost_max).cast(Numeric) / func.count(WorkOrderData.wonum)).label('avg_cost')
).where(
and_(
# WorkOrderData.wo_start >= last_oh_date,
# WorkOrderData.wo_start <= upcoming_oh_date,
WorkOrderData.worktype.in_(['OH']),
WorkOrderData.system_tag.in_(['HPB', 'AH', 'APC', 'SCR', 'CL', 'DM', 'CRH', 'ASH', 'BAD', 'DS', 'WTP',
'MT', 'SUP', 'DCS', 'FF', 'EG', 'AI', 'SPS', 'EVM', 'SCW', 'KLH', 'CH',
'TUR', 'LOT', 'HRH', 'ESP', 'CAE', 'GMC', 'BFT', 'LSH', 'CHB', 'BSS',
'LOS', 'LPB', 'SAC', 'CP', 'EHS', 'RO', 'GG', 'MS', 'CW', 'SO', 'ATT',
'AFG', 'EHB', 'RP', 'FO', 'PC', 'APE', 'AF', 'DMW', 'BRS', 'GEN', 'ABS',
'CHA', 'TR', 'H2', 'BDW', 'LOM', 'ACR', 'AL', 'FW', 'COND', 'CCCW', 'IA',
'GSS', 'BOL', 'SSB', 'CO', 'OA', 'CTH-UPD', 'AS', 'DP']),
WorkOrderData.reportdate.is_not(None),
WorkOrderData.actstart.is_not(None),
WorkOrderData.actfinish.is_not(None),
WorkOrderData.unit.in_([3, 0]),
WorkOrderData.reportdate >= datetime.strptime('2015-01-01', '%Y-%m-%d'),
not_(WorkOrderData.wonum.like('T%'))
)
).group_by(
WorkOrderData.location
).order_by(
func.count(WorkOrderData.wonum).desc()
)
SELECT
asset_location,
AVG(total_wo_cost) AS avg_cost
FROM wo_costs
GROUP BY asset_location
ORDER BY COUNT(wonum) DESC;
""")
result = await collector_db.execute(query)
data = []
for row in result:
data.append({
"location_tag": row.asset_location,
"avg_cost": row.avg_cost
})
data = result.all()
return {
item["location_tag"]: item["avg_cost"] for item in data
data.location: data.avg_cost for data in data
}
from uuid import UUID
async def get_history_oh_wo(*, db_session: DbSession, collector_db_session: CollectorDbSession, oh_session_id: UUID, parent_wo_num: Optional[Union[str, list]] = None):
## Get Parent wo num from oh session table
if not parent_wo_num:
query = select(OverhaulScope.wo_parent).where(OverhaulScope.id == oh_session_id)
result = await db_session.execute(query)
parent_wo_num = result.scalar()
if not parent_wo_num:
return []
# Ensure parent_wo_num is a list and removed duplicates if any
if isinstance(parent_wo_num, str):
parent_wo_num = [parent_wo_num]
else:
parent_wo_num = list(set(parent_wo_num))
sql_query = text("""
WITH target_wos AS (
SELECT
w.wonum,
w.assetnum,
COALESCE(w.actmatcost, 0) as actmatcost,
COALESCE(w.actservcost, 0) as actservcost
FROM public.wo_maximo w
WHERE w.xx_parent = ANY(:parent_wo_num)
),
wo_tasks AS (
SELECT
t.xx_parent AS parent_wonum,
JSON_AGG(t.description) AS task_list
FROM public.wo_maximo t
JOIN target_wos tw ON t.xx_parent = tw.wonum
GROUP BY t.xx_parent
)
SELECT
w.assetnum,
e.name AS equipment_name,
e.location_tag,
JSON_OBJECT_AGG(w.wonum, COALESCE(wt.task_list, '[]'::json)) AS wonum_list,
COUNT(w.wonum) AS total_wo_count,
COALESCE(SUM(w.actmatcost), 0) AS total_material_cost,
COALESCE(SUM(w.actservcost), 0) AS total_service_cost,
COALESCE(SUM(w.actmatcost + w.actservcost), 0) AS total_actual_cost
FROM target_wos w
INNER JOIN public.ms_equipment_master e
ON w.assetnum = e.assetnum
LEFT JOIN wo_tasks wt
ON w.wonum = wt.parent_wonum
GROUP BY
w.assetnum,
e.name,
e.location_tag
ORDER BY total_actual_cost DESC;
""")
results = await collector_db_session.execute(sql_query, {"parent_wo_num": parent_wo_num})
return [
{
"assetnum": row.assetnum,
"equipment_name": row.equipment_name,
"location_tag": row.location_tag,
"wonum_list": row.wonum_list,
"total_wo_count": row.total_wo_count,
"total_material_cost": float(row.total_material_cost),
"total_service_cost": float(row.total_service_cost),
"total_actual_cost": float(row.total_actual_cost)
}
for row in results
]

@ -1,426 +0,0 @@
import json
import re
import logging
from collections import Counter
from fastapi import Request, HTTPException
from starlette.middleware.base import BaseHTTPMiddleware
# =========================
# Configuration
# =========================
ALLOWED_MULTI_PARAMS = {
"sortBy[]",
"descending[]",
"exclude[]",
"assetnums",
"plant_ids",
"job_ids",
}
ALLOWED_DATA_PARAMS = {
"actual_shutdown", "all_params", "analysis_metadata", "asset_contributions",
"assetnum", "assetnums", "assigned_date", "availability", "availableScopes",
"avg_cost", "birbaum", "calculation_type", "capacity_weight", "code",
"contribution", "corrective_cost", "corrective_costs", "cost", "costPerFailure",
"cost_savings_vs_planned", "cost_threshold", "cost_trend", "created_at",
"crew_number", "criticalParts", "critical_procurement_items", "current_eaf",
"current_plant_eaf", "current_stock", "current_user", "cut_hours",
"daily_failures", "data", "datetime", "day", "days", "descending",
"description", "down_time", "duration", "duration_oh", "eaf_gap",
"eaf_improvement_text", "eaf_input", "efficiency", "end_date",
"equipment_name", "equipment_results", "equipment_with_sparepart_constraints",
"exclude", "excluded_equipment", "expected_delivery_date", "filter_spec",
"finish", "fleet_statistics", "id", "improvement_impact", "included_equipment",
"included_in_optimization", "intervalDays", "is_included", "itemnum",
"items", "itemsPerPage", "items_per_page", "job", "job_ids",
"last_overhaul_date", "lead_time", "location", "location_tag", "location_tags",
"maintenance_type", "master_equipment", "material_cost", "max_interval",
"max_interval_months", "message", "month", "months_from_planned", "name",
"next_planned_overhaul", "node", "num_failures", "num_of_failures",
"ohSessionId", "oh_scope", "oh_session_id", "oh_type", "oh_types",
"optimal_analysis", "optimal_breakdown", "optimal_month", "optimal_total_cost",
"optimization_success", "optimum_analysis", "optimum_day", "optimum_oh",
"optimum_oh_day", "optimum_oh_month", "order_date", "overhaulCost",
"overhaul_activity", "overhaul_cost", "overhaul_costs",
"overhaul_reference_type", "overhaul_scope", "overhaul_scope_id", "overview",
"page", "parent", "parent_id", "plan_duration", "planned_month",
"planned_outage", "plant_level_benefit", "po_pr_id", "po_vendor_delivery_date",
"possible_plant_eaf", "priority_score", "procurement_cost", "procurement_costs",
"procurement_details", "projected_eaf_improvement", "quantity",
"quantity_required", "query_str", "recommendedScope", "recommended_reduced_outage",
"reference", "reference_id", "remark", "removal_date", "required_improvement",
"results", "schedules", "scope", "scope_calculation_id", "scope_equipment_job",
"scope_name", "scope_overhaul", "service_cost", "session", "simulation",
"simulation_id", "sort_by", "sortBy[]", "descending[]", "exclude[]",
"sparepart_id", "sparepart_impact", "sparepart_name", "sparepart_summary",
"spreadsheet_link", "start", "start_date", "status", "subsystem", "system",
"systemComponents", "target_plant_eaf", "tasks", "timing_recommendation",
"total", "totalPages", "total_cost", "total_equipment", "total_equipment_analyzed",
"total_procurement_items", "type", "unit_cost", "warning_message", "with_results",
"workscope", "workscope_group", "year", "_", "t", "timestamp",
"q", "filter", "currentUser", "risk_cost", "all", "with_results",
"eaf_threshold", "simulation_id", "scope_calculation_id", "calculation_id"
}
ALLOWED_HEADERS = {
"host",
"user-agent",
"accept",
"accept-language",
"accept-encoding",
"connection",
"upgrade-insecure-requests",
"if-modified-since",
"if-none-match",
"cache-control",
"authorization",
"content-type",
"content-length",
"origin",
"referer",
"sec-fetch-dest",
"sec-fetch-mode",
"sec-fetch-site",
"sec-fetch-user",
"sec-ch-ua",
"sec-ch-ua-mobile",
"sec-ch-ua-platform",
"pragma",
"dnt",
"priority",
"x-forwarded-for",
"x-forwarded-proto",
"x-forwarded-host",
"x-forwarded-port",
"x-real-ip",
"x-request-id",
"x-correlation-id",
"x-requested-with",
"x-csrf-token",
"x-xsrf-token",
"postman-token",
"x-forwarded-path",
"x-forwarded-prefix",
"cookie",
"x-kong-request-id"
}
MAX_QUERY_PARAMS = 50
MAX_QUERY_LENGTH = 2000
MAX_JSON_BODY_SIZE = 1024 * 500 # 500 KB
XSS_PATTERN = re.compile(
r"("
r"<(script|iframe|embed|object|svg|img|video|audio|base|link|meta|form|button|details|animate)\b|"
r"javascript\s*:|vbscript\s*:|data\s*:[^,]*base64[^,]*|data\s*:text/html|"
r"\bon[a-z]+\s*=|" # Catch-all for any 'on' event (onerror, onclick, etc.)
r"style\s*=.*expression\s*\(|" # Old IE specific
r"\b(eval|setTimeout|setInterval|Function)\s*\("
r")",
re.IGNORECASE,
)
SQLI_PATTERN = re.compile(
r"("
# 1. Keywords followed by whitespace and common SQL characters
r"\b(UNION|SELECT|INSERT|UPDATE|DELETE|DROP|ALTER|CREATE|TRUNCATE|EXEC(UTE)?|DECLARE)\b\s+[\w\*\(\']|"
# 2. Time-based attacks (more specific than just 'SLEEP')
r"\b(WAITFOR\b\s+DELAY|PG_SLEEP|SLEEP\s*\()|"
# 3. System tables/functions
r"\b(INFORMATION_SCHEMA|SYS\.|SYSOBJECTS|XP_CMDSHELL|LOAD_FILE|INTO\s+OUTFILE)\b|"
# 4. Logical Tautologies (OR 1=1) - Optimized for boundaries
r"\b(OR|AND)\b\s+['\"]?\d+['\"]?\s*=\s*['\"]?\d+|"
# 5. Comments
# Match '--' if at start or preceded by whitespace
r"(?<!\S)--|"
# Match block comments, ensuring they aren't part of mime patterns like */*
r"(?<!\*)/\*|(?<!\*)\*/(?!\*)|"
# Match '#' if at start or preceded by whitespace
r"(?<!\S)#|"
# 6. Hex / Stacked Queries
r";\s*\b(SELECT|DROP|DELETE|UPDATE|INSERT)\b"
r")",
re.IGNORECASE
)
RCE_PATTERN = re.compile(
r"("
r"\$\(.*\)|`.*`|" # Command substitution $(...) or `...`
r"[;&|]\s*(cat|ls|id|whoami|pwd|ifconfig|ip|netstat|nc|netcat|nmap|curl|wget|python|php|perl|ruby|bash|sh|cmd|powershell|pwsh|sc\s+|tasklist|taskkill|base64|sudo|crontab|ssh|ftp|tftp)|"
# Only flag naked commands if they are clearly standalone or system paths
r"\b(/etc/passwd|/etc/shadow|/etc/group|/etc/issue|/proc/self/|/windows/system32/|C:\\Windows\\)\b"
r")",
re.IGNORECASE,
)
TRAVERSAL_PATTERN = re.compile(
r"(\.\.[/\\]|%2e%2e%2f|%2e%2e/|\.\.%2f|%2e%2e%5c|%252e%252e%252f|\\00)",
re.IGNORECASE,
)
FORBIDDEN_JSON_KEYS = {"__proto__", "constructor", "prototype"}
DYNAMIC_KEYS = {
"data",
"results",
"analysis_metadata",
"asset_contributions",
"equipment_results",
"optimal_analysis",
"optimum_analysis",
"schedules",
"tasks",
"all_params",
"parameters",
"program_data"
}
log = logging.getLogger("security_logger")
def has_control_chars(value: str) -> bool:
return any(ord(c) < 32 and c not in ("\n", "\r", "\t") for c in value)
def inspect_value(value: str, source: str):
if not isinstance(value, str) or value == "*/*":
return
if XSS_PATTERN.search(value):
log.warning(f"Security violation: Potential XSS payload detected in {source}, value: {value}")
raise HTTPException(
status_code=422,
detail="Invalid request parameters",
)
if SQLI_PATTERN.search(value):
log.warning(f"Security violation: Potential SQL injection payload detected in {source}, value: {value}")
raise HTTPException(
status_code=422,
detail="Invalid request parameters",
)
if RCE_PATTERN.search(value):
log.warning(f"Security violation: Potential RCE payload detected in {source}, value: {value}")
raise HTTPException(
status_code=422,
detail="Invalid request parameters",
)
if TRAVERSAL_PATTERN.search(value):
log.warning(f"Security violation: Potential Path Traversal payload detected in {source}, value: {value}")
raise HTTPException(
status_code=422,
detail="Invalid request parameters",
)
if has_control_chars(value):
log.warning(f"Security violation: Invalid control characters detected in {source}")
raise HTTPException(
status_code=422,
detail="Invalid request parameters",
)
def inspect_json(obj, path="body", check_whitelist=True):
if isinstance(obj, dict):
for key, value in obj.items():
if key in FORBIDDEN_JSON_KEYS:
log.warning(f"Security violation: Forbidden JSON key detected: {path}.{key}")
raise HTTPException(
status_code=422,
detail="Invalid request parameters",
)
if check_whitelist and key not in ALLOWED_DATA_PARAMS:
log.warning(f"Security violation: Unknown JSON key detected: {path}.{key}")
raise HTTPException(
status_code=422,
detail="Invalid request parameters",
)
# Recurse. If the key is a dynamic container, we stop whitelist checking for children.
should_check_subkeys = check_whitelist and (key not in DYNAMIC_KEYS)
inspect_json(value, f"{path}.{key}", check_whitelist=should_check_subkeys)
elif isinstance(obj, list):
for i, item in enumerate(obj):
inspect_json(item, f"{path}[{i}]", check_whitelist=check_whitelist)
elif isinstance(obj, str):
inspect_value(obj, path)
# =========================
# Middleware
# =========================
class RequestValidationMiddleware(BaseHTTPMiddleware):
async def dispatch(self, request: Request, call_next):
# -------------------------
# 0. Header validation
# -------------------------
header_keys = [key.lower() for key, _ in request.headers.items()]
# Check for duplicate headers
header_counter = Counter(header_keys)
duplicate_headers = [key for key, count in header_counter.items() if count > 1]
ALLOW_DUPLICATE_HEADERS = {'accept', 'accept-encoding', 'accept-language', 'accept-charset', 'cookie'}
real_duplicates = [h for h in duplicate_headers if h not in ALLOW_DUPLICATE_HEADERS]
if real_duplicates:
log.warning(f"Security violation: Duplicate headers detected: {real_duplicates}")
raise HTTPException(
status_code=422,
detail="Invalid request parameters",
)
# Whitelist headers
unknown_headers = [key for key in header_keys if key not in ALLOWED_HEADERS]
if unknown_headers:
filtered_unknown = [h for h in unknown_headers if not h.startswith('sec-')]
if filtered_unknown:
log.warning(f"Security violation: Unknown headers detected: {filtered_unknown}")
raise HTTPException(
status_code=422,
detail="Invalid request parameters",
)
# Inspect header values
for key, value in request.headers.items():
if value:
inspect_value(value, f"header '{key}'")
# -------------------------
# 1. Query string limits
# -------------------------
if len(request.url.query) > MAX_QUERY_LENGTH:
log.warning(f"Security violation: Query string too long")
raise HTTPException(
status_code=422,
detail="Invalid request parameters",
)
params = request.query_params.multi_items()
if len(params) > MAX_QUERY_PARAMS:
log.warning(f"Security violation: Too many query parameters")
raise HTTPException(
status_code=422,
detail="Invalid request parameters",
)
# Check for unknown query parameters
unknown_params = [key for key, _ in params if key not in ALLOWED_DATA_PARAMS]
if unknown_params:
log.warning(f"Security violation: Unknown query parameters detected: {unknown_params}")
raise HTTPException(
status_code=422,
detail="Invalid request parameters",
)
# -------------------------
# 2. Duplicate parameters
# -------------------------
counter = Counter(key for key, _ in params)
duplicates = [
key for key, count in counter.items()
if count > 1 and key not in ALLOWED_MULTI_PARAMS
]
if duplicates:
log.warning(f"Security violation: Duplicate query parameters detected: {duplicates}")
raise HTTPException(
status_code=422,
detail="Invalid request parameters",
)
# -------------------------
# 3. Query param inspection & Pagination
# -------------------------
pagination_size_keys = {"size", "itemsPerPage", "per_page", "limit", "items_per_page"}
for key, value in params:
if value:
inspect_value(value, f"query param '{key}'")
if key in pagination_size_keys and value:
try:
size_val = int(value)
if size_val > 50:
log.warning(f"Security violation: Pagination size too large ({size_val})")
raise HTTPException(
status_code=422,
detail="Invalid request parameters",
)
if size_val % 5 != 0:
log.warning(f"Security violation: Pagination size not multiple of 5 ({size_val})")
raise HTTPException(
status_code=422,
detail="Invalid request parameters",
)
except ValueError:
log.warning(f"Security violation: Pagination size invalid value")
raise HTTPException(
status_code=422,
detail="Invalid request parameters",
)
# -------------------------
# 4. Content-Type sanity
# -------------------------
content_type = request.headers.get("content-type", "")
if content_type and not any(
content_type.startswith(t)
for t in ("application/json", "multipart/form-data", "application/x-www-form-urlencoded")
):
log.warning(f"Security violation: Unsupported Content-Type: {content_type}")
raise HTTPException(status_code=422, detail="Invalid request parameters")
# -------------------------
# 5. Single source check (Query vs JSON Body)
# -------------------------
has_query = len(params) > 0
has_body = False
if content_type.startswith("application/json"):
# We can't easily check body existence without consuming it,
# so we check if Content-Length > 0
content_length = request.headers.get("content-length")
if content_length and int(content_length) > 0:
has_body = True
if has_query and has_body:
log.warning(f"Security violation: Mixed parameters (query + JSON body)")
raise HTTPException(
status_code=422,
detail="Invalid request parameters",
)
# -------------------------
# 6. JSON body inspection
# -------------------------
if content_type.startswith("application/json"):
body = await request.body()
# if len(body) > MAX_JSON_BODY_SIZE:
# raise HTTPException(status_code=422, detail="JSON body too large")
if body:
try:
payload = json.loads(body)
except json.JSONDecodeError:
log.warning(f"Security violation: Invalid JSON body")
raise HTTPException(status_code=422, detail="Invalid request parameters")
inspect_json(payload)
# Re-inject body for downstream handlers
async def receive():
return {"type": "http.request", "body": body}
request._receive = receive
return await call_next(request)

@ -69,8 +69,6 @@ class DefultBase(BaseModel):
validate_assignment = True
arbitrary_types_allowed = True
str_strip_whitespace = True
populate_by_name = True
extra="forbid"
json_encoders = {
# custom output conversion for datetime

@ -3,7 +3,7 @@ from typing import List
from fastapi import APIRouter, HTTPException, status
from src.auth.service import Token
from src.database.core import CollectorDbSession, DbSession
from src.database.core import DbSession
from src.models import StandardResponse
from src.overhaul.service import (get_overhaul_critical_parts,
get_overhaul_overview,
@ -18,11 +18,11 @@ router = APIRouter()
@router.get("", response_model=StandardResponse[OverhaulRead])
async def get_overhaul(db_session: DbSession, token:Token, collector_db_session:CollectorDbSession):
async def get_overhaul(db_session: DbSession, token:Token):
"""Get all scope pagination."""
overview = await get_overhaul_overview(db_session=db_session)
schedules = await get_overhaul_schedules(db_session=db_session)
criticalParts = await get_overhaul_critical_parts(db_session=db_session, session_id=overview["overhaul"]["id"], token=token, collector_db_session=collector_db_session)
criticalParts = await get_overhaul_critical_parts(db_session=db_session, session_id=overview["overhaul"]["id"], token=token)
systemComponents = get_overhaul_system_components()
return StandardResponse(

@ -6,7 +6,6 @@ from sqlalchemy import Delete, Select
from src.auth.service import CurrentUser
from src.calculation_target_reliability.service import RBD_SERVICE_API
from src.config import TC_RBD_ID
from src.database.core import DbSession
from src.contribution_util import calculate_contribution
from src.overhaul_activity.service import get_standard_scope_by_session_id
@ -29,9 +28,9 @@ async def get_simulation_results(*, simulation_id: str, token: str):
"Content-Type": "application/json"
}
calc_result_url = f"{RBD_SERVICE_API}/aeros/simulation/result/calc/{simulation_id}?nodetype=RegularNode"
calc_result_url = f"{RBD_SERVICE_API}/aeros/simulation/result/calc/default?nodetype=RegularNode"
# plot_result_url = f"{RBD_SERVICE_API}/aeros/simulation/result/plot/{simulation_id}?nodetype=RegularNode"
calc_plant_result = f"{RBD_SERVICE_API}/aeros/simulation/result/calc/{simulation_id}/plant"
calc_plant_result = f"{RBD_SERVICE_API}/aeros/simulation/result/calc/default/plant"
async with httpx.AsyncClient(timeout=300.0) as client:
calc_task = client.get(calc_result_url, headers=headers)
@ -55,17 +54,16 @@ async def get_simulation_results(*, simulation_id: str, token: str):
"plant_result": plant_data
}
async def get_overhaul_critical_parts(db_session, session_id, token, collector_db_session):
async def get_overhaul_critical_parts(db_session, session_id, token):
"""Get all overhaul critical parts."""
equipments = await get_standard_scope_by_session_id(
equipments, _ = await get_by_oh_session_id(
db_session=db_session,
overhaul_session_id=session_id,
collector_db=collector_db_session
oh_session_id=session_id,
)
criticality_simulation = await get_simulation_results(
simulation_id = TC_RBD_ID,
simulation_id="default",
token=token
)
@ -81,7 +79,7 @@ async def get_overhaul_critical_parts(db_session, session_id, token, collector_d
{
"id": equipment.id,
"location_tag": equipment.location_tag,
"name": equipment.equipment_name,
"name": equipment.master_equipment.name,
"matrix": rbd_simulation.get(equipment.location_tag)
} for equipment in equipments
@ -105,7 +103,7 @@ async def get_overhaul_critical_parts(db_session, session_id, token, collector_d
)[:10]
return {
"availability" :availability_result,
"availability" : availability_result,
"criticality": criticality_result
}

@ -106,8 +106,8 @@ async def get_overhaul_equipment(
# )
@router.post(
"/delete/{overhaul_session}/{location_tag}",
@router.delete(
"/{overhaul_session}/{location_tag}",
response_model=StandardResponse[None],
)
async def delete_scope(db_session: DbSession, location_tag: str, overhaul_session:UUID):

@ -12,7 +12,6 @@ from src.auth.service import CurrentUser
from src.database.core import DbSession
from src.database.service import CommonParameters, search_filter_sort_paginate
from src.overhaul_activity.utils import get_material_cost, get_service_cost
from src.utils import update_model
from src.overhaul_scope.model import OverhaulScope
from src.overhaul_scope.service import get as get_session, get_prev_oh
from src.standard_scope.model import MasterEquipment, StandardScope
@ -106,21 +105,10 @@ async def get_all(
).distinct()
)
if location_tag:
query = query.filter(StandardScope.location_tag == location_tag)
equipments = (await common['db_session'].execute(query)).scalars().all()
# Use search_filter_sort_paginate for server-side pagination
# Prioritize the 'all' parameter passed to the function
common_params = {**common, "all": all or common.get("all", False)}
paginated_results = await search_filter_sort_paginate(
model=query,
**common_params
)
equipments = paginated_results["items"]
material_cost = await get_cm_cost_summary(collector_db=collector_db, last_oh_date=prev_oh_scope.end_date, upcoming_oh_date=overhaul.start_date)
service_cost = get_service_cost(scope=overhaul.maintenance_type.name, total_equipment=len(equipments))
overhaul_cost = await get_oh_cost_summary(collector_db=collector_db, last_oh_date=prev_oh_scope.end_date, upcoming_oh_date=overhaul.start_date)
results = []
@ -143,13 +131,22 @@ async def get_all(
)
results.append(res)
# Return paginated structure with transformed items
return {
**paginated_results,
"items": results
# # Pagination parameters
# page = common.get("page", 1)
# items_per_page = common.get("items_per_page", 10)
# Sort by overhaul_cost descending
results.sort(key=lambda x: x.overhaul_cost, reverse=True)
# Build response data
data = {
"items": results,
"total": len(results),
}
return data
async def get_standard_scope_by_session_id(*, db_session: DbSession, overhaul_session_id: UUID, collector_db: CollectorDbSession):
overhaul = await get_session(db_session=db_session, overhaul_session_id=overhaul_session_id)
@ -445,7 +442,9 @@ async def update(
update_data = overhaul_activity_in.model_dump(exclude_defaults=True)
update_model(activity, update_data)
for field in data:
if field in update_data:
setattr(activity, field, update_data[field])
await db_session.commit()

@ -1,17 +0,0 @@
from sqlalchemy import Column, String
from src.database.core import Base
from src.models import DefaultMixin
class OverhaulGantt(Base, DefaultMixin):
__tablename__ = "oh_ms_monitoring_spreadsheet"
spreadsheet_id = Column(String, nullable=True)
spreadsheet_link = Column(String, nullable=True)

@ -1,15 +1,11 @@
import re
from typing import List, Optional
from fastapi import APIRouter, HTTPException, status
from sqlalchemy import select
from src.auth.service import CurrentUser
from src.database.core import DbSession
from src.database.service import CommonParameters
from src.models import StandardResponse
from src.overhaul_gantt.model import OverhaulGantt
from src.overhaul_gantt.schema import OverhaulGanttIn
# from .schema import (OverhaulScheduleCreate, OverhaulSchedulePagination, OverhaulScheduleUpdate)
from .service import get_gantt_performance_chart
@ -18,93 +14,18 @@ router = APIRouter()
@router.get(
"", response_model=StandardResponse[dict]
"", response_model=StandardResponse[list]
)
async def get_gantt_performance(db_session: DbSession):
async def get_gantt_performance():
"""Get all scope pagination."""
# return
query = select(OverhaulGantt).limit(1)
data = (await db_session.execute(query)).scalar_one_or_none()
results, gantt_data = await get_gantt_performance_chart(spreadsheet_id=data.spreadsheet_id)
return StandardResponse(
data={
"chart_data": results,
"gantt_data": gantt_data
},
message="Data retrieved successfully",
)
@router.get(
"/spreadsheet", response_model=StandardResponse[dict]
)
async def get_gantt_spreadsheet(db_session: DbSession):
"""Get all scope pagination."""
# return
query = select(OverhaulGantt).limit(1)
data = (await db_session.execute(query)).scalar_one_or_none()
result = {
"spreadsheet_id": None,
"spreadsheet_link": None
}
if data:
result = {
"spreadsheet_id": data.spreadsheet_id,
"spreadsheet_link": data.spreadsheet_link
}
results = await get_gantt_performance_chart()
return StandardResponse(
data=result,
data=results,
message="Data retrieved successfully",
)
@router.post(
"/spreadsheet", response_model=StandardResponse[dict]
)
async def update_gantt_spreadsheet(db_session: DbSession, spreadsheet_in: OverhaulGanttIn):
"""Get all scope pagination."""
# return
match = re.search(r"/d/([a-zA-Z0-9-_]+)", spreadsheet_in.spreadsheet_link)
if not match:
raise ValueError("Invalid Google Sheets URL")
spreadsheet_id = match.group(1)
query = select(OverhaulGantt).limit(1)
data = (await db_session.execute(query)).scalar_one_or_none()
if data:
data.spreadsheet_link = spreadsheet_in.spreadsheet_link
data.spreadsheet_id = spreadsheet_id
else:
spreadsheet = OverhaulGantt(
spreadsheet_id=spreadsheet_id,
spreadsheet_link=spreadsheet_in.spreadsheet_link
)
db_session.add(spreadsheet)
await db_session.commit()
if data:
result = {
"spreadsheet_id": spreadsheet_id
}
return StandardResponse(
data=result,
message="Data retrieved successfully",
)
# @router.post("", response_model=StandardResponse[None])

@ -9,12 +9,8 @@
# from src.scope_equipment_job.schema import ScopeEquipmentJobRead
# from src.job.schema import ActivityMasterRead
from pydantic import Field
from src.models import DefultBase
class OverhaulGanttIn(DefultBase):
spreadsheet_link: str = Field(...)
# class OverhaulScheduleBase(DefultBase):
# pass
# class OverhaulScheduleCreate(OverhaulScheduleBase):

@ -6,7 +6,7 @@ from sqlalchemy.orm import selectinload
# from .model import OverhaulSchedule
# from .schema import OverhaulScheduleCreate, OverhaulScheduleUpdate
from .utils import fetch_all_sections, get_google_creds, get_spreatsheed_service, process_spreadsheet_data
from .utils import get_google_creds, get_spreatsheed_service, process_spreadsheet_data
# async def get_all(*, common):
# """Returns all documents."""
@ -53,60 +53,20 @@ from .utils import fetch_all_sections, get_google_creds, get_spreatsheed_service
async def get_gantt_performance_chart(*, spreadsheet_id = "1gZXuwA97zU1v4QBv56wKeiqadc6skHUucGKYG8qVFRk"):
creds = get_google_creds()
RANGE_NAME = "'SUMMARY'!K34:AZ38" # Or just "2024 schedule"
GANTT_DATA_NAME = "ACTUAL PROGRESS"
RANGE_NAME = "'2024 kurva s'!N79:BJ83" # Or just "2024 schedule"
try:
service = get_spreatsheed_service(creds)
sheet = service.spreadsheets()
response = sheet.values().get(
spreadsheetId=spreadsheet_id,
range=RANGE_NAME
).execute()
response = sheet.values().get(spreadsheetId=spreadsheet_id, range=RANGE_NAME).execute()
values = response.get("values", [])
if len(values) < 4:
raise Exception("Spreadsheet format invalid: need 4 rows (DAY, DATE, PLAN, ACTUAL).")
# Extract rows
day_row = values[0][1:]
date_row = values[1][1:]
plan_row = values[3][1:]
actual_row = values[4][1:]
total_days = len(day_row)
# PAD rows so lengths match day count
date_row += [""] * (total_days - len(date_row))
plan_row += [""] * (total_days - len(plan_row))
actual_row += [""] * (total_days - len(actual_row))
results = []
for i in range(total_days):
day = day_row[i]
date = date_row[i]
plan = plan_row[i]
actual = actual_row[i] if actual_row[i] else "0%" # <-- FIX HERE
results.append({
"day": day,
"date": date,
"plan": plan,
"actual": actual
})
keys = ['day', 'time', 'plan', 'actual', 'gap']
transposed = list(zip(*values))
results = [dict(zip(keys, result)) for result in transposed]
except Exception as e:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e))
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=e)
processed_data = process_spreadsheet_data(results)
gantt_data = fetch_all_sections(service=service, spreadsheet_id=spreadsheet_id, sheet_name=GANTT_DATA_NAME)
return processed_data, gantt_data
return processed_data

@ -22,176 +22,44 @@ def process_spreadsheet_data(rows):
processed_data = []
for row in rows:
processed_row = convert_spreadsheet_data(row)
processed_data.append(processed_row) if processed_row else None
processed_data.append(processed_row)
return processed_data
from datetime import datetime
from datetime import datetime
def convert_spreadsheet_data(data, default_year=None):
"""
Convert spreadsheet row into structured data.
Expected keys: day, date, plan, actual
"""
# Skip header or invalid rows
if not data.get("day") or not data["day"].isdigit():
return None
def convert_spreadsheet_data(data):
result = {}
# Convert day
result["day"] = int(data["day"])
# Determine default year
if default_year is None:
default_year = datetime.now().year
date_str = data.get("date", "").strip()
# ---------- DATE HANDLING ----------
# Accept formats like: "Nov 20", "Dec 3", "Jan 1"
parsed_date = None
if date_str:
try:
parsed_date = datetime.strptime(f"{date_str} {default_year}", "%b %d %Y")
except ValueError:
try:
parsed_date = datetime.strptime(f"{date_str} {default_year}", "%B %d %Y")
except:
parsed_date = None
# YEAR ROLLOVER (Dec → Jan next year)
if parsed_date and parsed_date.month == 1 and "Dec" in data.get("date", ""):
parsed_date = parsed_date.replace(year=default_year + 1)
result["date"] = parsed_date
# ---------- PERCENT HANDLING ----------
def parse_percent(value):
if not value:
return 0.0
v = value.strip().replace(",", ".").replace("%", "")
try:
return float(v) / 100.0
except:
return 0.0
result["plan"] = parse_percent(data.get("plan", "0"))
result["actual"] = parse_percent(data.get("actual", "0"))
# Gap calculation
result["gap"] = result["actual"] - result["plan"]
return result
def fetch_all_sections(service, spreadsheet_id, sheet_name):
# Fetch a wide range including columns AL
result = service.spreadsheets().values().get(
spreadsheetId=spreadsheet_id,
range=f"{sheet_name}!A5:M5000"
).execute()
values = result.get("values", [])
if not values:
raise ValueError("No data found in sheet")
data = []
current_section = None
current_subsystem = None
for row in values:
# Pad missing columns to avoid index errors
row += [""] * (13 - len(row))
colA, colB, colC, colD, colE, colF, colG, colH, colI, colJ, colK, colL, colM = row
# Detect a SECTION — bold blue rows in Column C
if colC and not colD and not colE:
current_section = colC.strip()
current_subsystem = None
continue
# Detect a SUBSYSTEM — indented header in Column D
if colD and not colE:
current_subsystem = colD.strip()
continue
# Detect a TASK — Column E populated
if colE:
task = colE.strip()
pic = colF.strip()
start_date = indo_formatted_date(colG.strip())
finish_date = indo_formatted_date(colH.strip())
duration = colI.strip()
plan = colK.strip()
actual = colL.strip()
gap = colM.strip()
data.append({
"system": current_section,
"subsystem": current_subsystem,
"task": task,
"PIC": pic,
"start_date": start_date,
"end_date": finish_date,
"duration": int(duration),
"plan": plan,
"actual": actual,
"gap": gap
})
return data
def indo_formatted_date(date_str, base_year=2025):
"""
Convert short date like 'Nov 20', '30-Dec', 'Jan 1'
into: 'Rabu, November 20, 2025'
If month is January, year becomes 2026.
"""
# Month mappings
eng_to_indo_month = {
"Jan": "Januari", "Feb": "Februari", "Mar": "Maret", "Apr": "April",
"May": "Mei", "Jun": "Juni", "Jul": "Juli", "Aug": "Agustus",
"Sep": "September", "Oct": "Oktober", "Nov": "November", "Dec": "Desember"
# Convert day to integer
result['day'] = int(data['day'])
# Convert time to a datetime object
from datetime import datetime
# Assuming Indonesian format with month names
# Replace Indonesian month names with English if needed
month_mapping = {
'Januari': 'January', 'Februari': 'February', 'Maret': 'March',
'April': 'April', 'Mei': 'May', 'Juni': 'June',
'Juli': 'July', 'Agustus': 'August', 'September': 'September',
'Oktober': 'October', 'November': 'November', 'Desember': 'December'
}
indo_days = {
0: "Senin",
1: "Selasa",
2: "Rabu",
3: "Kamis",
4: "Jumat",
5: "Sabtu",
6: "Minggu"
}
# Normalize formats ("30-Dec" → "Dec 30")
if "-" in date_str:
d, m = date_str.split("-")
date_str = f"{m} {d}"
# Parse using English abbreviation
try:
dt = datetime.strptime(f"{date_str} {base_year}", "%b %d %Y")
except:
return None
time_str = data['time']
for indo, eng in month_mapping.items():
time_str = time_str.replace(indo, eng)
# Format: "Sabtu, Juli 13, 2024" -> "Saturday, July 13, 2024"
# Removing the day of week to simplify parsing
time_str = time_str.split(', ', 1)[1] # Remove "Sabtu, "
result['time'] = datetime.strptime(time_str, '%B %d, %Y')
# Convert percentage strings to floats
# Handling format like "0,12%" -> 0.12
for key in ['plan', 'actual', 'gap']:
# Replace comma with dot (European to US decimal notation)
value = data[key].replace(',', '.')
# Remove percentage sign
value = value.rstrip('%')
# Convert to float
result[key] = float(value) / 100 # Divide by 100 to get the actual decimal value
# Handle year rollover (Jan -> next year)
if dt.month == 1:
dt = dt.replace(year=base_year + 1)
# Convert to Indonesian components
day_name = indo_days[dt.weekday()]
month_name = eng_to_indo_month[dt.strftime("%b")]
return f"{day_name}, {month_name} {dt.day}, {dt.year}"
return result

@ -46,7 +46,7 @@ async def create_overhaul_equipment_jobs(
)
@router.post("/delete/{overhaul_job_id}", response_model=StandardResponse[None])
@router.delete("/{overhaul_job_id}", response_model=StandardResponse[None])
async def delete_overhaul_equipment_job(db_session: DbSession, overhaul_job_id):
await delete(db_session=db_session, overhaul_job_id=overhaul_job_id)

@ -42,7 +42,7 @@ async def create_overhaul_equipment_jobs(
message="Data created successfully",
)
@router.post("/update/{overhaul_job_id}", response_model=StandardResponse[None])
@router.put("/{overhaul_job_id}", response_model=StandardResponse[None])
async def update_overhaul_schedule(
db_session: DbSession, overhaul_job_id: str, overhaul_job_in: OverhaulScheduleUpdate
):
@ -53,7 +53,7 @@ async def update_overhaul_schedule(
message="Data updated successfully",
)
@router.post("/delete/{overhaul_job_id}", response_model=StandardResponse[None])
@router.delete("/{overhaul_job_id}", response_model=StandardResponse[None])
async def delete_overhaul_equipment_job(db_session: DbSession, overhaul_job_id):
await delete(db_session=db_session, overhaul_schedule_id=overhaul_job_id)

@ -7,7 +7,6 @@ from sqlalchemy.orm import selectinload
from src.auth.service import CurrentUser
from src.database.core import DbSession
from src.database.service import search_filter_sort_paginate
from src.utils import update_model
from src.scope_equipment_job.model import ScopeEquipmentJob
from src.overhaul_activity.model import OverhaulActivity
@ -42,7 +41,9 @@ async def update(*, db_session: DbSession, overhaul_schedule_id: str, overhaul_j
update_data = overhaul_job_in.model_dump(exclude_defaults=True)
update_model(overhaul_schedule, update_data)
for field in data:
if field in update_data:
setattr(overhaul_schedule, field, update_data[field])
await db_session.commit()

@ -1,5 +1,4 @@
from sqlalchemy import JSON
from sqlalchemy import Column, DateTime , Integer, String, ForeignKey, UUID
from sqlalchemy import Column, DateTime, Float, Integer, String, ForeignKey, UUID
from sqlalchemy.orm import relationship
from src.database.core import Base
@ -15,7 +14,6 @@ class OverhaulScope(Base, DefaultMixin):
status = Column(String, nullable=False, default="Upcoming")
maintenance_type_id = Column(
UUID(as_uuid=True), ForeignKey("oh_ms_maintenance_type.id"), nullable=False)
wo_parent = Column(JSON, nullable=True)
maintenance_type = relationship("MaintenanceType", lazy="selectin", backref="overhaul_scopes")
# activity_equipments = relationship("OverhaulActivity", lazy="selectin")

@ -1,4 +1,3 @@
from typing import List
from typing import Optional
from fastapi import APIRouter, HTTPException, status
@ -10,7 +9,7 @@ from src.models import StandardResponse
from .model import OverhaulScope
from .schema import ScopeCreate, ScopePagination, ScopeRead, ScopeUpdate
from .service import create, delete, get, get_all, update,get_all_oh_with_history_service
from .service import create, delete, get, get_all, update
router = APIRouter()
@ -26,9 +25,6 @@ async def get_scopes(common: CommonParameters, scope_name: Optional[str] = None)
message="Data retrieved successfully",
)
@router.get("/history", response_model=StandardResponse[List[ScopeRead]])
async def get_history(db_session: DbSession):
return StandardResponse(data=await get_all_oh_with_history_service(db_session=db_session), message="Data retrieved successfully")
@router.get("/{overhaul_session_id}", response_model=StandardResponse[ScopeRead])
async def get_scope(db_session: DbSession, overhaul_session_id: str):
@ -49,7 +45,7 @@ async def create_scope(db_session: DbSession, scope_in: ScopeCreate):
return StandardResponse(data=scope, message="Data created successfully")
@router.post("/update/{scope_id}", response_model=StandardResponse[ScopeRead])
@router.put("/{scope_id}", response_model=StandardResponse[ScopeRead])
async def update_scope(
db_session: DbSession,
scope_id: str,
@ -70,7 +66,7 @@ async def update_scope(
)
@router.post("/delete/{scope_id}", response_model=StandardResponse[ScopeRead])
@router.delete("/{scope_id}", response_model=StandardResponse[ScopeRead])
async def delete_scope(db_session: DbSession, scope_id: str):
scope = await get(db_session=db_session, scope_id=scope_id)
@ -83,5 +79,3 @@ async def delete_scope(db_session: DbSession, scope_id: str):
await delete(db_session=db_session, scope_id=scope_id)
return StandardResponse(message="Data deleted successfully", data=scope)

@ -7,7 +7,7 @@ from src.auth.service import CurrentUser
from src.database.core import DbSession
from src.database.service import search_filter_sort_paginate
from src.overhaul_activity.model import OverhaulActivity
from src.utils import time_now, update_model
from src.utils import time_now
from src.standard_scope.model import MasterEquipment, StandardScope, EquipmentOHHistory
from src.workscope_group.model import MasterActivity
from src.workscope_group_maintenance_type.model import WorkscopeOHType
@ -132,7 +132,9 @@ async def update(*, db_session: DbSession, scope: OverhaulScope, scope_in: Scope
update_data = scope_in.model_dump(exclude_defaults=True)
update_model(scope, update_data)
for field in data:
if field in update_data:
setattr(scope, field, update_data[field])
await db_session.commit()
@ -159,7 +161,7 @@ async def get_overview_overhaul(*, db_session: DbSession):
)
)
ongoing_result = await db_session.execute(ongoing_query.options(selectinload(OverhaulScope.maintenance_type)))
ongoing_overhaul = ongoing_result.scalar_one_or_none()
ongoing_overhaul = ongoing_result.first()
# 2. If no ongoing overhaul, get the closest scheduled overhaul
if ongoing_overhaul is None:
@ -200,9 +202,6 @@ async def get_overview_overhaul(*, db_session: DbSession):
)
results = await db_session.execute(equipments)
#Remaining days based on status
remaining_days = (selected_overhaul.start_date - current_date).days if selected_overhaul.status == "Upcoming" else (selected_overhaul.end_date - current_date).days
return {
"status": selected_overhaul.status,
@ -213,12 +212,58 @@ async def get_overview_overhaul(*, db_session: DbSession):
"end_date": selected_overhaul.end_date,
"duration_oh": selected_overhaul.duration_oh,
"crew_number": selected_overhaul.crew_number,
"remaining_days": remaining_days,
"remaining_days": (selected_overhaul.start_date - current_date).days,
"equipment_count": len(results.scalars().all()),
},
}
async def get_all_oh_with_history_service(*, db_session: DbSession):
query = Select(OverhaulScope).options(selectinload(OverhaulScope.maintenance_type)).where(OverhaulScope.wo_parent.isnot(None))
results = await db_session.execute(query)
return results.scalars().all()
# if ongoing_result:
# ongoing_overhaul, equipment_count = ongoing_result # Unpack the result tuple
# return {
# "status": "Ongoing",
# "overhaul": {
# "id": ongoing_overhaul.id,
# "type": ongoing_overhaul.maintenance_type.name,
# "start_date": ongoing_overhaul.start_date,
# "end_date": ongoing_overhaul.end_date,
# "duration_oh": ongoing_overhaul.duration_oh,
# "crew_number": ongoing_overhaul.crew_number,
# "remaining_days": (ongoing_overhaul.end_date - current_date).days,
# "equipment_count": equipment_count,
# },
# }
# # For upcoming overhaul with count
# upcoming_query = (
# Select(OverhaulScope, func.count(OverhaulActivity.id).label("equipment_count"))
# .outerjoin(OverhaulScope.activity_equipments)
# .where(
# OverhaulScope.start_date > current_date,
# )
# .group_by(OverhaulScope.id)
# .order_by(OverhaulScope.start_date)
# )
# upcoming_result = await db_session.execute(upcoming_query)
# upcoming_result = upcoming_result.first()
# if upcoming_result:
# upcoming_overhaul, equipment_count = upcoming_result # Unpack the result tuple
# days_until = (upcoming_overhaul.start_date - current_date).days
# return {
# "status": "Upcoming",
# "overhaul": {
# "id": upcoming_overhaul.id,
# "type": upcoming_overhaul.type,
# "start_date": upcoming_overhaul.start_date,
# "end_date": upcoming_overhaul.end_date,
# "duration_oh": upcoming_overhaul.duration_oh,
# "crew_number": upcoming_overhaul.crew_number,
# "remaining_days": days_until,
# "equipment_count": equipment_count,
# },
# }
# return {"status": "no_overhaul", "overhaul": None}

@ -33,11 +33,3 @@ class MasterSparepartProcurement(Base, DefaultMixin):
eta_requisition = Column(Date, nullable=False)
eta_ordered = Column(Date, nullable=True)
eta_received = Column(Date, nullable=True)
class SparepartRemark(Base, DefaultMixin):
__tablename__ = "oh_ms_sparepart_remark"
itemnum = Column(String, nullable=False)
remark = Column(String, nullable=False)

@ -4,18 +4,17 @@ from src.database.core import CollectorDbSession
from src.database.service import (CommonParameters, DbSession,
search_filter_sort_paginate)
from src.models import StandardResponse
from src.sparepart.schema import SparepartRemark
from .service import create_remark, get_spareparts_paginated
from .service import get_all
router = APIRouter()
@router.get("", response_model=StandardResponse[list])
async def get_sparepart(collector_db_session:CollectorDbSession, db_session: DbSession):
async def get_sparepart(collector_db_session:CollectorDbSession):
"""Get all scope activity pagination."""
# return
data = await get_spareparts_paginated(db_session=db_session, collector_db_session=collector_db_session)
data = await get_all(collector_db_session)
@ -23,17 +22,6 @@ async def get_sparepart(collector_db_session:CollectorDbSession, db_session: DbS
data=data,
message="Data retrieved successfully",
)
@router.post("", response_model=StandardResponse[SparepartRemark])
async def create_remark_route(collector_db_session:CollectorDbSession, db_session: DbSession, remark_in:SparepartRemark):
sparepart_remark = await create_remark(db_session=db_session, collector_db_session=collector_db_session, remark_in=remark_in)
return StandardResponse(
data=sparepart_remark,
message="Remark Created successfully"
)
# @router.post("", response_model=StandardResponse[ActivityMasterCreate])

@ -1,6 +1,4 @@
from dataclasses import dataclass
from datetime import date, datetime
from enum import Enum
from datetime import datetime
from typing import Any, Dict, List, Optional
from uuid import UUID
@ -37,48 +35,41 @@ class ActivityMasterRead(ActivityMaster):
class ActivityMasterPagination(Pagination):
items: List[ActivityMasterRead] = []
class ProcurementStatus(Enum):
PLANNED = "planned"
ORDERED = "ordered"
RECEIVED = "received"
CANCELLED = "cancelled"
@dataclass
class SparepartRequirement:
"""Sparepart requirement for equipment overhaul"""
sparepart_id: str
quantity_required: int
lead_time: int
sparepart_name: str
unit_cost: float
avg_cost: float
remark:str
@dataclass
class SparepartStock:
"""Current sparepart stock information"""
sparepart_id: str
sparepart_name: str
current_stock: int
unit_cost: float
location: str
remark:str
@dataclass
class ProcurementRecord:
"""Purchase Order/Purchase Request record"""
po_pr_id: str
sparepart_id: str
sparepart_name: str
quantity: int
unit_cost: float
total_cost: float
order_date: date
expected_delivery_date: date
status: ProcurementStatus
po_vendor_delivery_date: date
class SparepartRemark(DefultBase):
itemnum: str
remark:str
# {
# "overview": {
# "totalEquipment": 30,
# "nextSchedule": {
# "date": "2025-01-12",
# "Overhaul": "B",
# "equipmentCount": 30
# }
# },
# "criticalParts": [
# "Boiler feed pump",
# "Boiler reheater system",
# "Drum Level (Right) Root Valve A",
# "BCP A Discharge Valve",
# "BFPT A EXH Press HI Root VLV"
# ],
# "schedules": [
# {
# "date": "2025-01-12",
# "Overhaul": "B",
# "status": "upcoming"
# }
# // ... other scheduled overhauls
# ],
# "systemComponents": {
# "boiler": {
# "status": "operational",
# "lastOverhaul": "2024-06-15"
# },
# "turbine": {
# "hpt": { "status": "operational" },
# "ipt": { "status": "operational" },
# "lpt": { "status": "operational" }
# }
# // ... other major components
# }
# }

@ -9,400 +9,147 @@ import numpy as np
from dataclasses import dataclass
from enum import Enum
from sqlalchemy import Delete, Select, select, text
from sqlalchemy import Delete, Select, text
from sqlalchemy.orm import joinedload, selectinload
from src.auth.service import CurrentUser
from src.database.core import DbSession
from src.database.service import CommonParameters, search_filter_sort_paginate
from src.overhaul_activity.service import get_standard_scope_by_session_id
from src.overhaul_scope.service import get as get_scope, get_overview_overhaul
from src.logging import setup_logging
from src.overhaul_scope.service import get as get_scope
from src.overhaul_scope.service import get_prev_oh
from src.sparepart.model import SparepartRemark
from src.sparepart.schema import ProcurementRecord, ProcurementStatus, SparepartRequirement, SparepartStock
log = logging.getLogger(__name__)
setup_logging(logger=log)
from sqlalchemy import text
import math
from sqlalchemy import text
# async def get_spareparts_paginated(
# *,
# db_session,
# collector_db_session,
# ):
# """
# Get spare parts for work orders under specific parent WO(s),
# including inventory and PR/PO data.
# """
# # Normalize parent_num to array for SQL ANY()
# # parent_nums = parent_num if isinstance(parent_num, (list, tuple)) else [parent_num]
# parent_nums = []
# data_query = text("""
# WITH selected_wo AS (
# SELECT
# wonum,
# xx_parent,
# location_tag,
# assetnum,
# siteid,
# reportdate
# FROM public.wo_maxim
# WHERE xx_parent = ANY(:parent_nums)
# ),
# wo_materials AS (
# SELECT
# wm.wonum,
# wm.itemnum,
# wm.itemqty,
# wm.inv_itemnum,
# wm.inv_location,
# wm.inv_curbaltotal,
# wm.inv_avgcost,
# sw.location_tag
# FROM public.wo_maxim_material wm
# JOIN selected_wo sw ON wm.wonum = sw.wonum
# ),
# -- PR Lines
# pr_lines AS (
# SELECT
# pl.item_num,
# h.num AS pr_number,
# h.issue_date AS pr_issue_date,
# h.status AS pr_status,
# pl.qty_ordered AS pr_qty_ordered,
# pl.qty_requested AS pr_qty_requested
# FROM public.maximo_sparepart_pr_po h
# JOIN public.maximo_sparepart_pr_po_line pl
# ON h.num = pl.num
# WHERE h.type = 'PR'
# AND EXTRACT(YEAR FROM h.issue_date) >= 2019
# ),
# -- PO Lines
# po_lines AS (
# SELECT
# pl.item_num,
# h.num AS po_number,
# h.estimated_arrival_date AS po_estimated_arrival_date,
# h.vendeliverydate AS po_vendeliverydate,
# h.receipts AS po_receipt,
# h.status AS po_status,
# pl.qty_ordered AS po_qty_ordered,
# pl.qty_received AS po_qty_received
# FROM public.maximo_sparepart_pr_po h
# JOIN public.maximo_sparepart_pr_po_line pl
# ON h.num = pl.num
# WHERE h.type = 'PO'
# AND (h.receipts = 'NONE')
# AND (h.status IS NOT NULL)
# ),
# -- Item Descriptions
# item_descriptions AS (
# SELECT DISTINCT
# item_num,
# FIRST_VALUE(description) OVER (
# PARTITION BY item_num
# ORDER BY created_at DESC NULLS LAST
# ) AS description
# FROM public.maximo_sparepart_pr_po_line
# WHERE description IS NOT NULL
# ),
# -- Unified PR/PO data
# pr_po_unified AS (
# SELECT
# pr.item_num,
# pr.pr_number,
# pr.pr_issue_date,
# pr.pr_qty_ordered,
# pr.pr_status,
# po.po_number,
# COALESCE(po.po_qty_ordered, 0) AS po_qty_ordered,
# COALESCE(po.po_qty_received, 0) AS po_qty_received,
# po.po_estimated_arrival_date,
# po.po_vendeliverydate,
# po.po_receipt,
# po.po_status,
# CASE WHEN po.po_number IS NOT NULL THEN 'YES' ELSE 'NO' END AS po_exists
# FROM pr_lines pr
# LEFT JOIN po_lines po
# ON pr.item_num = po.item_num
# AND pr.pr_number = po.po_number
# ),
# -- Aggregate PR/PO info
# pr_po_agg AS (
# SELECT
# item_num,
# SUM(COALESCE(pr_qty_ordered, 0)) AS total_pr_qty,
# SUM(COALESCE(po_qty_ordered, 0)) AS total_po_qty,
# SUM(COALESCE(po_qty_received, 0)) AS total_po_received,
# JSON_AGG(
# JSON_BUILD_OBJECT(
# 'pr_number', pr_number,
# 'pr_issue_date', pr_issue_date,
# 'pr_qty_requested', pr_qty_ordered,
# 'pr_status', pr_status,
# 'po_exists', po_exists,
# 'po_qty_ordered', po_qty_ordered,
# 'po_qty_received', po_qty_received,
# 'po_estimated_arrival_date', po_estimated_arrival_date,
# 'po_vendeliverydate', po_vendeliverydate,
# 'po_receipt', po_receipt,
# 'po_status', po_status
# )
# ORDER BY pr_issue_date DESC
# ) AS pr_po_details
# FROM pr_po_unified
# GROUP BY item_num
# )
# SELECT
# wm.itemnum,
# COALESCE(id.description, 'No description available') AS item_description,
# SUM(wm.itemqty) AS total_required_for_oh,
# COALESCE(MAX(wm.inv_curbaltotal), 0) AS current_balance_total,
# COALESCE(ap.total_pr_qty, 0) AS total_pr_qty,
# COALESCE(ap.total_po_qty, 0) AS total_po_qty,
# COALESCE(ap.total_po_received, 0) AS total_po_received,
# ap.pr_po_details
# FROM wo_materials wm
# LEFT JOIN item_descriptions id
# ON wm.itemnum = id.item_num
# LEFT JOIN pr_po_agg ap
# ON wm.itemnum = ap.item_num
# GROUP BY
# wm.itemnum, id.description,
# ap.total_pr_qty, ap.total_po_qty, ap.total_po_received, ap.pr_po_details
# ORDER BY wm.itemnum;
# """)
# rows = await collector_db_session.execute(data_query, {"parent_nums": parent_nums})
# spare_parts = []
# for row in rows:
# spare_parts.append({
# "item_num": row.itemnum,
# "description": row.item_description,
# "current_balance_total": float(row.current_balance_total or 0.0),
# "total_required_for_oh": float(row.total_required_for_oh or 0.0),
# "total_pr_qty": row.total_pr_qty,
# "total_po_qty": row.total_po_qty,
# "total_po_received": row.total_po_received,
# "pr_po_details": row.pr_po_details,
# })
# return spare_parts
async def get_spareparts_paginated(*, db_session, collector_db_session):
async def get_all(db_session: DbSession):
"""
Get paginated spare parts with usage, inventory, and PR/PO information.
Uses two queries: one for data, one for total count.
Get all spare parts with their latest PR and PO information.
Args:
db_session: SQLAlchemy database session
page (int): Page number (1-based)
items_per_page (int): Number of items per page
assetnum: Optional asset number filter (not used in this query but kept for compatibility)
Returns:
List of dictionaries containing spare part information
"""
# calculate limit/offset
# limit = items_per_page
# offset = (page - 1) * items_per_page
# wo_materials AS (
# SELECT
# wm.wonum,
# wm.itemnum,
# wm.itemqty,
# wm.inv_itemnum,
# wm.inv_location,
# wm.inv_curbaltotal,
# wm.inv_avgcost,
# sw.asset_location as location_tag
# FROM public.wo_maxim_material wm
# JOIN oh_workorders sw ON wm.wonum = sw.wonum
# ),
# -----------------------------
# Query #1: Fetch paginated rows
# -----------------------------
data_query = text("""
WITH oh_workorders AS (
SELECT DISTINCT wonum, asset_location, asset_unit
FROM public.wo_maximo ma
WHERE ma.xx_parent IN ('155026', '155027', '155029', '155030')
),
wo_materials AS (
SELECT
wm.wonum,
wm.itemnum,
wm.itemqty,
wm.inv_location AS inv_location,
wm.inv_curbaltotal AS inv_curbaltotal,
wm.inv_avgcost AS inv_avgcost,
sw.asset_location as location_tag
FROM public.wo_maximo_material wm
JOIN oh_workorders sw ON wm.wonum = sw.wonum
),
location_sparepart_stats AS (
SELECT location_tag, itemnum,
COUNT(DISTINCT wonum) as total_wo_count,
SUM(itemqty) as total_qty_used,
AVG(itemqty) as avg_qty_per_wo,
MIN(itemqty) as min_qty_used,
MAX(itemqty) as max_qty_used
FROM wo_materials
GROUP BY location_tag, itemnum
HAVING SUM(itemqty) > 0
),
pr_lines AS (
SELECT
pl.item_num,
h.num as pr_number,
h.issue_date as pr_issue_date,
h.status as pr_status,
pl.qty_ordered as pr_qty_ordered,
pl.qty_requested as pr_qty_requested
FROM public.maximo_sparepart_pr_po h
JOIN public.maximo_sparepart_pr_po_line pl ON h.num = pl.num
WHERE h.type = 'PR' AND EXTRACT(YEAR FROM h.issue_date) >= 2023
),
item_descriptions AS (
SELECT DISTINCT
item_num,
FIRST_VALUE(description) OVER (
PARTITION BY item_num
ORDER BY created_at DESC NULLS LAST
) as description
FROM public.maximo_sparepart_pr_po_line
WHERE description IS NOT NULL
),
po_lines AS (
# Define the SQL query
query = text("""
WITH latest_prs AS (
SELECT DISTINCT ON (pl.item_num)
pl.item_num,
h.num as pr_number,
h.issue_date as pr_issue_date,
h.status as pr_status,
pl.qty_ordered as pr_qty_ordered,
pl.description,
pl.unit_cost,
pl.line_cost
FROM public.maximo_sparepart_pr_po h
JOIN public.maximo_sparepart_pr_po_line pl ON h.num = pl.num
WHERE h.type = 'PR'
AND h.issue_date IS NOT NULL
AND h.num LIKE 'K%'
ORDER BY pl.item_num, h.issue_date DESC
)
SELECT DISTINCT ON (pr.item_num)
pr.item_num,
pr.line_cost,
pr.unit_cost,
pr.description,
COALESCE(i.curbaltotal, 0) as current_balance_total,
pr.pr_number,
pr.pr_issue_date,
pr.pr_qty_ordered,
CASE
WHEN po.po_number IS NOT NULL THEN 'YES'
ELSE 'NO'
END as po_exists,
COALESCE(po.qty_received, 0) as po_qty_received,
COALESCE(po.qty_ordered, 0) as po_qty_ordered,
po.estimated_arrival_date as po_estimated_arrival_date
FROM latest_prs pr
LEFT JOIN public.maximo_inventory i ON pr.item_num = i.itemnum
LEFT JOIN LATERAL (
SELECT
pl.item_num,
h.num as po_number,
h.estimated_arrival_date as po_estimated_arrival_date,
h.vendeliverydate as po_vendeliverydate,
h.receipts as po_receipt,
h.status as po_status,
pl.qty_ordered as po_qty_ordered,
pl.qty_received as po_qty_received
pl.qty_received,
pl.qty_ordered,
h.estimated_arrival_date
FROM public.maximo_sparepart_pr_po h
JOIN public.maximo_sparepart_pr_po_line pl ON h.num = pl.num
WHERE h.type = 'PO'
AND (h.receipts = 'NONE')
AND (h.status IS NOT NULL)
),
pr_po_unified AS (
SELECT
pr.item_num,
pr.pr_number,
pr.pr_issue_date,
pr.pr_qty_ordered,
pr.pr_status,
po.po_number,
COALESCE(po.po_qty_ordered,0) as po_qty_ordered,
COALESCE(po.po_qty_received,0) as po_qty_received,
po.po_estimated_arrival_date,
po.po_vendeliverydate,
po.po_receipt,
po.po_status,
CASE WHEN po.po_number IS NOT NULL THEN 'YES' ELSE 'NO' END as po_exists
FROM pr_lines pr
LEFT JOIN po_lines po
ON pr.item_num = po.item_num
AND pr.pr_number = po.po_number
),
pr_po_agg AS (
SELECT
item_num,
SUM(COALESCE(pr_qty_ordered,0)) as total_pr_qty,
SUM(COALESCE(po_qty_ordered,0)) as total_po_qty,
SUM(COALESCE(po_qty_received,0)) as total_po_received,
JSON_AGG(
JSON_BUILD_OBJECT(
'pr_number', pr_number,
'pr_issue_date', pr_issue_date,
'pr_qty_requested', pr_qty_ordered,
'pr_status', pr_status,
'po_exists', po_exists,
'po_qty_ordered', po_qty_ordered,
'po_qty_received', po_qty_received,
'po_estimated_arrival_date', po_estimated_arrival_date,
'po_vendeliverydate', po_vendeliverydate,
'po_receipt', po_receipt,
'po_status', po_status
) ORDER BY pr_issue_date DESC
) as pr_po_details
FROM pr_po_unified
GROUP BY item_num
),
inv_summary AS (
SELECT
itemnum,
MAX(inv_curbaltotal) AS total_curbaltotal,
AVG(inv_avgcost) AS avg_cost
FROM wo_materials
GROUP BY itemnum
)
SELECT
lss.itemnum,
COALESCE(id.description, 'No description available') as item_description,
lss.total_wo_count,
lss.total_qty_used,
ROUND(CAST(lss.avg_qty_per_wo AS NUMERIC), 2) as avg_qty_per_wo,
lss.min_qty_used,
lss.max_qty_used,
COALESCE(i.total_curbaltotal,0) as current_balance_total,
COALESCE(ap.total_pr_qty,0) as total_pr_qty,
COALESCE(ap.total_po_qty,0) as total_po_qty,
COALESCE(ap.total_po_received,0) as total_po_received,
ap.pr_po_details
FROM location_sparepart_stats lss
LEFT JOIN item_descriptions id ON lss.itemnum = id.item_num
LEFT JOIN inv_summary i ON lss.itemnum = i.itemnum
LEFT JOIN pr_po_agg ap ON lss.itemnum = ap.item_num
ORDER BY lss.location_tag, lss.itemnum;
AND h.num = pr.pr_number
AND pl.item_num = pr.item_num
LIMIT 1
) po ON true
ORDER BY pr.item_num;
""")
overhaul = await get_overview_overhaul(db_session=db_session)
standard_overhaul = await get_standard_scope_by_session_id(db_session=db_session, collector_db=collector_db_session, overhaul_session_id=overhaul['overhaul']['id'])
asset_locations = [eq.location_tag for eq in standard_overhaul]
rows = await collector_db_session.execute(
data_query,
{"asset_locations": asset_locations}
)
sparepart_remark = (await db_session.execute(
select(SparepartRemark)
)).scalars().all()
sparepart_remark_dict = {item.itemnum: item.remark for item in sparepart_remark}
# Execute the query
result = await db_session.execute(query)
# Fetch all results and convert to list of dictionaries
spare_parts = []
for row in rows:
for row in result:
spare_parts.append({
"item_num": row.itemnum,
"description": row.item_description,
"remark": sparepart_remark_dict.get(row.itemnum, ""),
"current_balance_total": float(row.current_balance_total) if row.current_balance_total else 0.0,
"total_required_for_oh": float(row.avg_qty_per_wo),
"total_pr_qty": row.total_pr_qty,
"total_po_qty": row.total_po_qty,
"total_po_received": row.total_po_received,
"pr_po_details": row.pr_po_details
"item_num": row.item_num,
"description": row.description,
"line_cost": row.line_cost,
"unit_cost": row.unit_cost,
"current_balance_total": float(row.current_balance_total) if row.current_balance_total is not None else 0.0,
"pr_number": row.pr_number,
"pr_issue_date": row.pr_issue_date,
"pr_qty_ordered": float(row.pr_qty_ordered) if row.pr_qty_ordered is not None else 0.0,
"po_exists": row.po_exists,
"po_qty_received": float(row.po_qty_received) if row.po_qty_received is not None else 0.0,
"po_qty_ordered": float(row.po_qty_ordered) if row.po_qty_ordered is not None else 0.0,
"po_estimated_arrival_date": row.po_estimated_arrival_date
})
return spare_parts
class ProcurementStatus(Enum):
PLANNED = "planned"
ORDERED = "ordered"
RECEIVED = "received"
CANCELLED = "cancelled"
@dataclass
class SparepartRequirement:
"""Sparepart requirement for equipment overhaul"""
sparepart_id: str
quantity_required: int
lead_time: int
sparepart_name: str
unit_cost: float
@dataclass
class SparepartStock:
"""Current sparepart stock information"""
sparepart_id: str
sparepart_name: str
current_stock: int
unit_cost: float
location: str
@dataclass
class ProcurementRecord:
"""Purchase Order/Purchase Request record"""
po_pr_id: str
sparepart_id: str
sparepart_name: str
quantity: int
unit_cost: float
total_cost: float
order_date: date
expected_delivery_date: date
status: ProcurementStatus
po_vendor_delivery_date: date
class SparepartManager:
"""Manages sparepart availability and procurement for overhaul optimization"""
@ -532,8 +279,7 @@ class SparepartManager:
sparepart_id = requirement.sparepart_id
needed_quantity = requirement.quantity_required
sparepart_name = requirement.sparepart_name
sparepart_remark= requirement.remark
unit_cost = requirement.avg_cost if requirement.avg_cost > 0 else requirement.unit_cost
unit_cost = requirement.unit_cost
current_stock = adjusted_stocks.get(sparepart_id, 0)
@ -561,8 +307,7 @@ class SparepartManager:
'status': order.status.value,
'months_until_delivery': self._calculate_months_until_delivery(order.expected_delivery_date, target_month),
'is_on_time': self._is_delivery_on_time(order.expected_delivery_date, target_month),
'usage': 'covers_requirement',
'remark': sparepart_remark
'usage': 'covers_requirement'
}
pr_po_summary['existing_orders'].append(order_info)
pr_po_summary['total_existing_value'] += order.total_cost
@ -574,7 +319,6 @@ class SparepartManager:
missing_parts.append({
'sparepart_id': sparepart_id,
'sparepart_name': sparepart_name,
'remark': sparepart_remark,
'required': needed_quantity,
'current_stock': current_stock,
'ordered_quantity': total_ordered_quantity,
@ -606,7 +350,6 @@ class SparepartManager:
new_order = {
'sparepart_id': sparepart_id,
'sparepart_name': sparepart_name,
'remark': sparepart_remark,
'quantity_needed': shortage,
'unit_cost': unit_cost,
'total_cost': procurement_cost,
@ -913,7 +656,7 @@ class SparepartManager:
# Integration functions for database operations
async def load_sparepart_data_from_db(scope, prev_oh_scope, db_session, app_db_session, analysis_window_months = None) -> SparepartManager:
async def load_sparepart_data_from_db(scope, prev_oh_scope, db_session) -> SparepartManager:
"""Load sparepart data from database"""
# You'll need to implement these queries based on your database schema
# Get scope dates for analysis window
@ -921,40 +664,31 @@ async def load_sparepart_data_from_db(scope, prev_oh_scope, db_session, app_db_s
# prev_oh_scope = await get_prev_oh(db_session=db_session, overhaul_session=scope)
analysis_start_date = prev_oh_scope.end_date
analysis_window_months = int(((scope.start_date - prev_oh_scope.end_date).days / 30) * 1.2) if not analysis_window_months else analysis_window_months
analysis_window_months = int(((scope.start_date - prev_oh_scope.end_date).days / 30) * 1.5)
sparepart_manager = SparepartManager(analysis_start_date, analysis_window_months)
start_date = prev_oh_scope.end_date
end_date = scope.start_date
# Load sparepart stocks
# Example query - adjust based on your schema
query = text("""SELECT
wm.inv_itemnum AS itemnum,
wm.inv_itemsetid AS itemsetid,
wm.inv_location AS location,
MAX(wm.inv_curbaltotal) AS curbaltotal,
AVG(wm.inv_avgcost) AS avgcost,
COALESCE(mspl.description, 'No description available') AS description
FROM public.wo_maximo_material wm
LEFT JOIN public.maximo_sparepart_pr_po_line mspl
ON wm.inv_itemnum = mspl.item_num
WHERE wm.inv_itemnum IS NOT NULL
GROUP BY wm.inv_itemnum, wm.inv_itemsetid, wm.inv_location, mspl.description
""")
query = text("""
SELECT
mi.id,
mi.itemnum,
mi.itemsetid,
mi."location",
mi.curbaltotal,
mi.avgcost,
mspl.description
FROM public.maximo_inventory mi
LEFT JOIN public.maximo_sparepart_pr_po_line mspl
ON mi.itemnum = mspl.item_num
""")
log.info("Fetch sparepart")
sparepart_stocks_query = await db_session.execute(query)
sparepart_remark = (await app_db_session.execute(
select(SparepartRemark)
)).scalars().all()
sparepart_remark_dict = {item.itemnum: item.remark for item in sparepart_remark}
for stock_record in sparepart_stocks_query:
stock = SparepartStock(
sparepart_id=stock_record.itemnum,
remark=sparepart_remark_dict.get(stock_record.itemnum),
sparepart_name=stock_record.description,
current_stock=stock_record.curbaltotal,
unit_cost=stock_record.avgcost,
@ -962,186 +696,27 @@ async def load_sparepart_data_from_db(scope, prev_oh_scope, db_session, app_db_s
)
sparepart_manager.add_sparepart_stock(stock)
# parent_nums = []
# query = text("""
# WITH target_wo AS (
# -- Work orders from the given parent(s)
# SELECT
# wonum,
# xx_parent,
# location_tag AS asset_location
# FROM public.wo_maxim
# WHERE xx_parent = ANY(:parent_nums)
# ),
# target_materials AS (
# -- Materials directly linked to target WOs (new requirement data)
# SELECT
# tw.asset_location,
# wm.itemnum,
# wm.inv_avgcost
# SUM(wm.itemqty) AS total_qty_required
# FROM public.wo_maxim_material wm
# JOIN target_wo tw ON wm.wonum = tw.wonum
# WHERE wm.itemnum IS NOT NULL
# GROUP BY tw.asset_location, wm.itemnum
# ),
# -- Historical OH work orders (for lead time reference)
# oh_workorders AS (
# SELECT DISTINCT
# wonum,
# asset_location
# FROM public.wo_staging_maximo_2
# WHERE worktype = 'OH'
# AND asset_location IS NOT NULL
# AND asset_unit IN ('3', '00')
# ),
# sparepart_usage AS (
# SELECT
# oh.asset_location,
# mwm.itemnum,
# mwm.itemqty,
# mwm.wonum
# FROM oh_workorders oh
# INNER JOIN public.wo_maxim_material mwm
# ON oh.wonum = mwm.wonum
# ),
# location_sparepart_stats AS (
# SELECT
# asset_location,
# itemnum,
# COUNT(DISTINCT wonum) as total_wo_count,
# SUM(itemqty) as total_qty_used,
# AVG(itemqty) as avg_qty_per_wo
# FROM sparepart_usage
# GROUP BY asset_location, itemnum
# ),
# pr_po_combined AS (
# SELECT
# mspl.item_num,
# mspl.num,
# mspl.unit_cost,
# mspl.qty_ordered,
# MAX(CASE WHEN mspo.type = 'PR' THEN mspo.issue_date END) as issue_date,
# MAX(CASE WHEN mspo.type = 'PO' THEN mspo.vendeliverydate END) as vendeliverydate,
# MAX(CASE WHEN mspo.type = 'PO' THEN mspo.estimated_arrival_date END) as estimated_arrival_date
# FROM public.maximo_sparepart_pr_po_line mspl
# INNER JOIN public.maximo_sparepart_pr_po mspo
# ON mspl.num = mspo.num
# WHERE mspo.type IN ('PR', 'PO')
# GROUP BY mspl.item_num, mspl.num, mspl.unit_cost, mspl.qty_ordered
# ),
# leadtime_stats AS (
# SELECT
# item_num,
# ROUND(CAST(AVG(
# EXTRACT(EPOCH FROM (
# COALESCE(vendeliverydate, estimated_arrival_date) - issue_date
# )) / 86400 / 30.44
# ) AS NUMERIC), 1) as avg_leadtime_months,
# ROUND(CAST(MIN(
# EXTRACT(EPOCH FROM (
# COALESCE(vendeliverydate, estimated_arrival_date) - issue_date
# )) / 86400 / 30.44
# ) AS NUMERIC), 1) as min_leadtime_months,
# ROUND(CAST(MAX(
# EXTRACT(EPOCH FROM (
# COALESCE(vendeliverydate, estimated_arrival_date) - issue_date
# )) / 86400 / 30.44
# ) AS NUMERIC), 1) as max_leadtime_months,
# COUNT(*) as leadtime_sample_size,
# COUNT(CASE WHEN vendeliverydate IS NOT NULL THEN 1 END) as vendelivery_count,
# COUNT(CASE WHEN vendeliverydate IS NULL AND estimated_arrival_date IS NOT NULL THEN 1 END) as estimated_only_count
# FROM pr_po_combined
# WHERE issue_date IS NOT NULL
# AND COALESCE(vendeliverydate, estimated_arrival_date) IS NOT NULL
# AND COALESCE(vendeliverydate, estimated_arrival_date) > issue_date
# GROUP BY item_num
# ),
# cost_stats AS (
# SELECT
# item_num,
# ROUND(CAST(AVG(unit_cost) AS NUMERIC), 2) as avg_unit_cost,
# ROUND(CAST(MIN(unit_cost) AS NUMERIC), 2) as min_unit_cost,
# ROUND(CAST(MAX(unit_cost) AS NUMERIC), 2) as max_unit_cost,
# COUNT(*) as cost_sample_size,
# ROUND(CAST(AVG(unit_cost * qty_ordered) AS NUMERIC), 2) as avg_order_value,
# ROUND(CAST(SUM(unit_cost * qty_ordered) AS NUMERIC), 2) as total_value_ordered
# FROM pr_po_combined
# WHERE unit_cost IS NOT NULL AND unit_cost > 0
# GROUP BY item_num
# ),
# item_descriptions AS (
# SELECT DISTINCT
# item_num,
# FIRST_VALUE(description) OVER (
# PARTITION BY item_num
# ORDER BY created_at DESC NULLS LAST
# ) as description
# FROM public.maximo_sparepart_pr_po_line
# WHERE description IS NOT NULL
# )
# SELECT
# tr.asset_location,
# tr.itemnum,
# COALESCE(id.description, 'No description available') as item_description,
# tr.total_qty_required AS total_required_for_oh,
# tr.inv_avgcost,
# COALESCE(lt.avg_leadtime_months, 0) as avg_leadtime_months,
# COALESCE(cs.avg_unit_cost, 0) as avg_unit_cost,
# ROUND(CAST(COALESCE(tr.total_qty_required * cs.avg_unit_cost, 0) AS NUMERIC), 2) as estimated_cost_for_oh
# FROM target_materials tr
# LEFT JOIN item_descriptions id ON tr.itemnum = id.item_num
# LEFT JOIN leadtime_stats lt ON tr.itemnum = lt.item_num
# LEFT JOIN cost_stats cs ON tr.itemnum = cs.item_num
# ORDER BY tr.asset_location, tr.itemnum;
# """)
# equipment_requirements_query = await db_session.execute(query, {"parent_nums": parent_nums})
# equipment_requirements = defaultdict(list)
# for req_record in equipment_requirements_query:
# requirement = SparepartRequirement(
# sparepart_id=req_record.itemnum,
# quantity_required=float(req_record.total_required_for_oh or 0.0),
# lead_time=float(req_record.avg_leadtime_months or 0.0),
# sparepart_name=req_record.item_description,
# unit_cost=float(req_record.avg_unit_cost or 0.0),
# avg_cost=float(req_record.inv_avgcost or 0.0),
# )
# equipment_requirements[req_record.asset_location].append(requirement)
# for equipment_tag, requirements in equipment_requirements.items():
# sparepart_manager.add_equipment_requirements(equipment_tag, requirements)
# Load equipment sparepart requirements
# Load equipment sparepart requirements
# You'll need to create this table/relationship
query = text("""WITH oh_workorders AS (
-- First, get all OH work orders
SELECT DISTINCT
wonum,
asset_location
FROM public.wo_maximo ma
WHERE worktype = 'OH' AND asset_location IS NOT NULL and asset_unit IN ('3', '00') AND EXTRACT(YEAR FROM reportdate) >= 2019
),
current_oh as (
SELECT DISTINCT wonum, asset_location, asset_unit
FROM public.wo_maximo ma
WHERE ma.xx_parent IN ('155026', '155027', '155029', '155030')
),
sparepart_usage AS (
SELECT
oh.asset_location,
mwm.itemnum,
mwm.itemqty,
mwm.wonum,
mwm.inv_avgcost
FROM current_oh oh
INNER JOIN public.wo_maximo_material mwm
ON oh.wonum = mwm.wonum
),
-- First, get all OH work orders
SELECT DISTINCT
wonum,
asset_location
FROM public.wo_staging_maximo_2
WHERE worktype = 'OH' AND asset_location IS NOT NULL
),
sparepart_usage AS (
-- Get sparepart usage for OH work orders
SELECT
oh.asset_location,
mwm.itemnum,
mwm.itemqty,
mwm.wonum
FROM oh_workorders oh
INNER JOIN public.maximo_workorder_materials mwm
ON oh.wonum = mwm.wonum
),
location_sparepart_stats AS (
-- Calculate average usage per sparepart per location
SELECT
@ -1226,12 +801,6 @@ item_descriptions AS (
) as description
FROM public.maximo_sparepart_pr_po_line
WHERE description IS NOT NULL
),
item_inventory as (
SELECT
itemnum,
avgcost
FROM public.maximo_inventory
)
SELECT
lss.asset_location,
@ -1242,7 +811,6 @@ SELECT
ROUND(CAST(lss.avg_qty_per_wo AS NUMERIC), 2) as avg_qty_per_wo,
lss.min_qty_used,
lss.max_qty_used,
iin.inv_avgcost,
-- Lead time metrics
COALESCE(lt.avg_leadtime_months, 0) as avg_leadtime_months,
COALESCE(lt.min_leadtime_months, 0) as min_leadtime_months,
@ -1263,7 +831,6 @@ FROM location_sparepart_stats lss
LEFT JOIN item_descriptions id ON lss.itemnum = id.item_num
LEFT JOIN leadtime_stats lt ON lss.itemnum = lt.item_num
LEFT JOIN cost_stats cs ON lss.itemnum = cs.item_num
LEFT JOIN sparepart_usage iin ON lss.itemnum = iin.itemnum
ORDER BY lss.asset_location, lss.itemnum;""")
equipment_requirements_query = await db_session.execute(query)
@ -1275,9 +842,7 @@ ORDER BY lss.asset_location, lss.itemnum;""")
quantity_required=float(req_record.avg_qty_per_wo),
lead_time=float(req_record.avg_leadtime_months),
sparepart_name=req_record.item_description,
unit_cost=float(req_record.avg_unit_cost),
avg_cost=float(req_record.inv_avgcost or 0),
remark=sparepart_remark_dict.get(req_record.itemnum, "")
unit_cost=float(req_record.avg_unit_cost)
)
equipment_requirements[req_record.asset_location].append(requirement)
@ -1289,65 +854,54 @@ ORDER BY lss.asset_location, lss.itemnum;""")
# Load procurement records (PO/PR)
query = text("""
WITH active_pos AS (
-- Get all POs that are NOT complete (not in inventory yet) and NOT closed
SELECT
pl.item_num,
h.num as po_number,
pl.qty_received,
pl.qty_ordered,
h.estimated_arrival_date,
h.vendeliverydate,
h.receipts as po_receipts,
h.status as po_status,
pl.description,
pl.unit_cost,
pl.line_cost
FROM public.maximo_sparepart_pr_po h
JOIN public.maximo_sparepart_pr_po_line pl
ON h.num = pl.num
WHERE h.type = 'PO'
-- Exclude POs where receipts = 'COMPLETE'
AND (h.receipts IS NULL OR h.receipts != 'COMPLETE')
-- Exclude closed POs
AND (h.status IS NULL OR h.status = 'APPR')
),
po_with_pr_date AS (
-- Force join with PR to ensure every PO has a PR
SELECT
po.*,
pr.issue_date as pr_issue_date
FROM active_pos po
INNER JOIN public.maximo_sparepart_pr_po pr
ON pr.num = po.po_number
AND pr.type = 'PR'
),
item_inventory AS (
-- Get all POs that are NOT complete (not in inventory yet) and NOT closed
SELECT
pl.item_num,
h.num as po_number,
pl.qty_received,
pl.qty_ordered,
h.estimated_arrival_date,
h.vendeliverydate,
h.receipts as po_receipts,
h.status as po_status,
pl.description,
pl.unit_cost,
pl.line_cost
FROM public.maximo_sparepart_pr_po h
JOIN public.maximo_sparepart_pr_po_line pl ON h.num = pl.num
WHERE h.type = 'PO'
-- Exclude POs where receipts = 'COMPLETE'
AND (h.receipts IS NULL OR h.receipts != 'COMPLETE')
-- Exclude closed POs
AND (h.status IS NULL OR h.status != 'CLOSE')
),
po_with_pr_date AS (
-- Join with PR to get the issue_date
SELECT
po.*,
pr.issue_date as pr_issue_date
FROM active_pos po
LEFT JOIN public.maximo_sparepart_pr_po pr
ON pr.num = po.po_number
AND pr.type = 'PR'
)
SELECT
itemnum,
MAX(inv_curbaltotal) AS current_balance_total,
AVG(inv_avgcost) AS avg_cost
FROM public.wo_maximo_material
WHERE inv_itemnum IS NOT NULL
GROUP BY itemnum
)
SELECT
po.item_num,
po.description,
po.line_cost,
po.unit_cost,
COALESCE(i.current_balance_total, 0) as current_balance_total,
po.po_number,
po.pr_issue_date,
po.po_status,
po.po_receipts,
COALESCE(po.qty_received, 0) as po_qty_received,
COALESCE(po.qty_ordered, 0) as po_qty_ordered,
po.estimated_arrival_date as po_estimated_arrival_date,
po.vendeliverydate as po_vendor_delivery_date
FROM po_with_pr_date po
LEFT JOIN item_inventory i
ON po.item_num = i.itemnum
ORDER BY po.item_num, po.pr_issue_date DESC;
po.item_num,
po.description,
po.line_cost,
po.unit_cost,
COALESCE(i.curbaltotal, 0) as current_balance_total,
po.po_number,
po.pr_issue_date,
po.po_status,
po.po_receipts,
COALESCE(po.qty_received, 0) as po_qty_received,
COALESCE(po.qty_ordered, 0) as po_qty_ordered,
po.estimated_arrival_date as po_estimated_arrival_date,
po.vendeliverydate as po_vendor_delivery_date
FROM po_with_pr_date po
LEFT JOIN public.maximo_inventory i ON po.item_num = i.itemnum
ORDER BY po.item_num, po.pr_issue_date DESC;
""")
# Execute the query
@ -1395,29 +949,4 @@ ORDER BY po.item_num, po.pr_issue_date DESC;
async def create_remark(*, db_session, collector_db_session, remark_in):
# Step 1: Check if remark already exists for this itemnum
result = await db_session.execute(
select(SparepartRemark).where(SparepartRemark.itemnum == remark_in.itemnum)
)
existing_remark = result.scalar_one_or_none()
# Step 2: If it already exists, you can decide what to do
if existing_remark:
# Option B: Update existing remark (if needed)
existing_remark.remark = remark_in.remark
await db_session.commit()
await db_session.refresh(existing_remark)
return existing_remark
# Step 3: If it doesnt exist, create new one
new_remark = SparepartRemark(
itemnum=remark_in.itemnum,
remark=remark_in.remark,
)
db_session.add(new_remark)
await db_session.commit()
await db_session.refresh(new_remark)
return new_remark

@ -4,15 +4,15 @@ from fastapi import APIRouter, HTTPException, status
from fastapi.params import Query
from src.auth.service import CurrentUser
from src.database.core import DbSession, CollectorDbSession
from src.database.core import DbSession
from src.database.service import CommonParameters, search_filter_sort_paginate
from src.models import StandardResponse
from .schema import (MasterEquipmentPagination, ScopeEquipmentCreate,
ScopeEquipmentPagination, ScopeEquipmentRead,
ScopeEquipmentUpdate)
from .service import (create, delete, get_all, get_all_master_equipment, update, get_history_standard_scope_wo_service)
from uuid import UUID
from .service import (create, delete, get_all, get_all_master_equipment, update)
router = APIRouter()
@ -47,13 +47,6 @@ async def create_scope_equipment(
return StandardResponse(data=scope, message="Data created successfully")
@router.get("/history/{oh_session_id}", response_model=StandardResponse[List[dict]])
async def get_history_standard_scope_wo(
db_session: DbSession, collector_db_session:CollectorDbSession, oh_session_id:UUID):
results = await get_history_standard_scope_wo_service(db_session=db_session, collector_db_session=collector_db_session, oh_session_id=oh_session_id)
return StandardResponse(data=results, message="Data retrieved successfully")
# @router.put("/{assetnum}", response_model=StandardResponse[ScopeEquipmentRead])
# async def update_scope_equipment(
# db_session: DbSession, assetnum: str, scope__equipment_in: ScopeEquipmentUpdate

@ -37,9 +37,8 @@ class ScopeEquipmentRead(ScopeEquipmentBase):
master_equipment: Optional[MasterEquipmentBase] = Field(None)
class ScopeEquipmentPagination(DefultBase):
class ScopeEquipmentPagination(Pagination):
items: List[ScopeEquipmentRead] = []
total: int
class MasterEquipmentRead(DefultBase):
assetnum: Optional[str] = Field(None, title="Asset Number")

@ -7,8 +7,7 @@ from sqlalchemy.dialects.postgresql import insert
from sqlalchemy.orm import selectinload
from src.auth.service import CurrentUser
from src.database.core import DbSession, CollectorDbSession
from src.utils import update_model
from src.database.core import DbSession
from src.database.service import CommonParameters, search_filter_sort_paginate
from src.overhaul_scope.model import OverhaulScope
from src.standard_scope.enum import ScopeEquipmentType
@ -19,7 +18,6 @@ from src.workscope_group.model import MasterActivity
from src.workscope_group_maintenance_type.model import WorkscopeOHType
from src.overhaul_scope.model import MaintenanceType
from src.overhaul_scope.service import get as get_overhaul
from src.maximo.service import get_history_oh_wo
from .model import MasterEquipment, MasterEquipmentTree, StandardScope
from .schema import ScopeEquipmentCreate, ScopeEquipmentUpdate
from uuid import UUID
@ -62,15 +60,8 @@ async def get_all(*, common, oh_scope: Optional[str] = None):
# ).distinct()
)
results = await common['db_session'].execute(query)
items = results.scalars().all()
return {
"items": items,
"total": len(items)
}
results = await search_filter_sort_paginate(model=query, **common)
return results
async def get_by_oh_session_id(*, db_session: DbSession, oh_session_id: UUID):
@ -155,7 +146,9 @@ async def update(
update_data = scope_equipment_in.model_dump(exclude_defaults=True)
update_model(scope_equipment, update_data)
for field in data:
if field in update_data:
setattr(scope_equipment, field, update_data[field])
await db_session.commit()
@ -220,25 +213,3 @@ async def get_equipment_level_by_no(*, db_session: DbSession, level: int):
result = await db_session.execute(query)
return result.scalars().all()
async def get_history_standard_scope_wo_service(*, db_session: DbSession, collector_db_session:CollectorDbSession, oh_session_id:UUID):
planning_oh_data = await get_by_oh_session_id(db_session=db_session, oh_session_id=oh_session_id)
planning_scopes = planning_oh_data[0]
overhaul = planning_oh_data[1]
results = await get_history_oh_wo(
db_session=db_session,
collector_db_session=collector_db_session,
oh_session_id=oh_session_id,
parent_wo_num=overhaul.wo_parent
)
scope_cost_map = {scope.location_tag: scope.service_cost for scope in planning_scopes}
for result in results:
result["planning_service_cost"] = scope_cost_map.get(result["location_tag"], 0)
return results

@ -22,8 +22,7 @@ def parse_relative_expression(date_str: str) -> Optional[datetime]:
unit, offset = match.groups()
offset = int(offset) if offset else 0
# Use UTC timezone for consistency
jakarta_tz = pytz.timezone("Asia/Jakarta")
today = datetime.now(jakarta_tz)
today = datetime.now(timezone.tzname("Asia/Jakarta"))
if unit == "H":
# For hours, keep minutes and seconds
result_time = today + timedelta(hours=offset)
@ -65,7 +64,7 @@ def parse_date_string(date_str: str) -> Optional[datetime]:
minute=0,
second=0,
microsecond=0,
tzinfo=pytz.timezone("Asia/Jakarta"),
tzinfo=timezone.tzname("Asia/Jakarta"),
)
return dt
except ValueError:
@ -140,13 +139,3 @@ def save_to_pastebin(data, title="Result Log", expire_date="1H"):
return response.text # This will be the paste URL
else:
return f"Error: {response.status_code} - {response.text}"
def update_model(model, update_data: dict):
"""
Update a SQLAlchemy model with data from a dictionary.
"""
for key, value in update_data.items():
if hasattr(model, key):
setattr(model, key, value)

@ -47,8 +47,8 @@ async def get_activity(db_session: DbSession, activity_id: str):
return StandardResponse(data=activity, message="Data retrieved successfully")
@router.post(
"/update/{scope_equipment_activity_id}", response_model=StandardResponse[ActivityMaster]
@router.put(
"/{scope_equipment_activity_id}", response_model=StandardResponse[ActivityMaster]
)
async def update_scope(
db_session: DbSession, activity_in: ActivityMasterCreate, activity_id
@ -69,8 +69,8 @@ async def update_scope(
)
@router.post(
"/delete/{scope_equipment_activity_id}", response_model=StandardResponse[ActivityMaster]
@router.delete(
"/{scope_equipment_activity_id}", response_model=StandardResponse[ActivityMaster]
)
async def delete_scope(db_session: DbSession, activity_id: str):
activity = await get(db_session=db_session, activity_id=activity_id)

@ -6,7 +6,6 @@ from sqlalchemy.orm import joinedload, selectinload
from src.auth.service import CurrentUser
from src.database.core import DbSession
from src.database.service import CommonParameters, search_filter_sort_paginate
from src.utils import update_model
from .model import MasterActivity
from .schema import ActivityMaster, ActivityMasterCreate
@ -44,7 +43,9 @@ async def update(
update_data = activity_in.model_dump(exclude_defaults=True)
update_model(activity, update_data)
for field in data:
if field in update_data:
setattr(activity, field, update_data[field])
await db_session.commit()

@ -6,7 +6,6 @@ from sqlalchemy.orm import joinedload, selectinload
from src.auth.service import CurrentUser
from src.database.core import DbSession
from src.database.service import CommonParameters, search_filter_sort_paginate
from src.utils import update_model
from .model import MasterActivity
from .schema import ActivityMaster, ActivityMasterCreate
@ -44,7 +43,9 @@ async def update(
update_data = activity_in.model_dump(exclude_defaults=True)
update_model(activity, update_data)
for field in data:
if field in update_data:
setattr(activity, field, update_data[field])
await db_session.commit()

@ -1,68 +1,68 @@
# import asyncio
# from typing import AsyncGenerator, Generator
import asyncio
from typing import AsyncGenerator, Generator
# import pytest
# from httpx import AsyncClient
# from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
# from sqlalchemy.orm import sessionmaker
# from sqlalchemy.pool import StaticPool
# from sqlalchemy_utils import database_exists, drop_database
# from starlette.config import environ
# from starlette.testclient import TestClient
import pytest
from httpx import AsyncClient
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import StaticPool
from sqlalchemy_utils import database_exists, drop_database
from starlette.config import environ
from starlette.testclient import TestClient
# # from src.database import Base, get_db
# # from src.main import app
# from src.database import Base, get_db
# from src.main import app
# # Test database URL
# TEST_DATABASE_URL = "sqlite+aiosqlite:///:memory:"
# Test database URL
TEST_DATABASE_URL = "sqlite+aiosqlite:///:memory:"
# engine = create_async_engine(
# TEST_DATABASE_URL,
# connect_args={"check_same_thread": False},
# poolclass=StaticPool,
# )
engine = create_async_engine(
TEST_DATABASE_URL,
connect_args={"check_same_thread": False},
poolclass=StaticPool,
)
# async_session = sessionmaker(
# engine,
# class_=AsyncSession,
# expire_on_commit=False,
# autocommit=False,
# autoflush=False,
# )
async_session = sessionmaker(
engine,
class_=AsyncSession,
expire_on_commit=False,
autocommit=False,
autoflush=False,
)
# async def override_get_db() -> AsyncGenerator[AsyncSession, None]:
# async with async_session() as session:
# try:
# yield session
# await session.commit()
# except Exception:
# await session.rollback()
# raise
# finally:
# await session.close()
async def override_get_db() -> AsyncGenerator[AsyncSession, None]:
async with async_session() as session:
try:
yield session
await session.commit()
except Exception:
await session.rollback()
raise
finally:
await session.close()
# app.dependency_overrides[get_db] = override_get_db
app.dependency_overrides[get_db] = override_get_db
# @pytest.fixture(scope="session")
# def event_loop() -> Generator:
# loop = asyncio.get_event_loop_policy().new_event_loop()
# yield loop
# loop.close()
@pytest.fixture(scope="session")
def event_loop() -> Generator:
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
# @pytest.fixture(autouse=True)
# async def setup_db() -> AsyncGenerator[None, None]:
# async with engine.begin() as conn:
# await conn.run_sync(Base.metadata.create_all)
# yield
# async with engine.begin() as conn:
# await conn.run_sync(Base.metadata.drop_all)
@pytest.fixture(autouse=True)
async def setup_db() -> AsyncGenerator[None, None]:
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.create_all)
yield
async with engine.begin() as conn:
await conn.run_sync(Base.metadata.drop_all)
# @pytest.fixture
# async def client() -> AsyncGenerator[AsyncClient, None]:
# async with AsyncClient(app=app, base_url="http://test") as client:
# yield client
@pytest.fixture
async def client() -> AsyncGenerator[AsyncClient, None]:
async with AsyncClient(app=app, base_url="http://test") as client:
yield client

@ -0,0 +1,3 @@
from sqlalchemy.orm import scoped_session, sessionmaker
Session = scoped_session(sessionmaker())

@ -0,0 +1,28 @@
import uuid
from datetime import datetime
from factory import (LazyAttribute, LazyFunction, SelfAttribute, Sequence,
SubFactory, post_generation)
from factory.alchemy import SQLAlchemyModelFactory
from factory.fuzzy import FuzzyChoice, FuzzyDateTime, FuzzyInteger, FuzzyText
from faker import Faker
from faker.providers import misc
from .database import Session
# from pytz import UTC
fake = Faker()
fake.add_provider(misc)
class BaseFactory(SQLAlchemyModelFactory):
"""Base Factory."""
class Meta:
"""Factory configuration."""
abstract = True
sqlalchemy_session = Session
sqlalchemy_session_persistence = "commit"

@ -1,44 +0,0 @@
import pytest
from src.calculation_budget_constrains.service import greedy_selection, knapsack_selection
def test_greedy_selection():
equipments = [
{"id": 1, "total_cost": 100, "priority_score": 10, "cost": 100},
{"id": 2, "total_cost": 50, "priority_score": 20, "cost": 50},
{"id": 3, "total_cost": 60, "priority_score": 15, "cost": 60},
]
budget = 120
# Items sorted by priority_score: id 2 (20), id 3 (15), id 1 (10)
# 2 (50) + 3 (60) = 110. Item 1 (100) won't fit.
selected, excluded = greedy_selection(equipments, budget)
selected_ids = [e["id"] for e in selected]
assert 2 in selected_ids
assert 3 in selected_ids
assert len(selected) == 2
assert excluded[0]["id"] == 1
def test_knapsack_selection_basic():
# Similar items but where greedy might fail if cost/value ratio is tricky
# item 1: value 10, cost 60
# item 2: value 7, cost 35
# item 3: value 7, cost 35
# budget: 70
# Greedy would take item 1 (value 10, remaining budget 10, can't take more)
# Optimal would take item 2 and 3 (value 14, remaining budget 0)
scale = 1 # No scaling for simplicity in this test
equipments = [
{"id": 1, "total_cost": 60, "priority_score": 10},
{"id": 2, "total_cost": 35, "priority_score": 7},
{"id": 3, "total_cost": 35, "priority_score": 7},
]
budget = 70
selected, excluded = knapsack_selection(equipments, budget, scale=1)
selected_ids = [e["id"] for e in selected]
assert 2 in selected_ids
assert 3 in selected_ids
assert len(selected) == 2
assert 1 not in selected_ids

@ -1,14 +0,0 @@
from src.context import set_request_id, get_request_id, set_user_id, get_user_id
def test_request_id_context():
test_id = "test-request-id-123"
set_request_id(test_id)
assert get_request_id() == test_id
def test_user_id_context():
test_uid = "user-456"
set_user_id(test_uid)
assert get_user_id() == test_uid
def test_context_default_none():
assert get_request_id() is None or get_request_id() != ""

@ -1,53 +0,0 @@
import pytest
from decimal import Decimal
from src.contribution_util import prod, system_availability, get_all_components, birnbaum_importance
def test_prod():
assert prod([1, 2, 3]) == 6.0
assert prod([0.5, 0.5]) == 0.25
assert prod([]) == 1.0
def test_system_availability_series():
structure = {"series": ["A", "B"]}
availabilities = {"A": 0.9, "B": 0.8}
# 0.9 * 0.8 = 0.72
assert system_availability(structure, availabilities) == pytest.approx(0.72)
def test_system_availability_parallel():
structure = {"parallel": ["A", "B"]}
availabilities = {"A": 0.9, "B": 0.8}
# 1 - (1-0.9)*(1-0.8) = 1 - 0.1*0.2 = 1 - 0.02 = 0.98
assert system_availability(structure, availabilities) == pytest.approx(0.98)
def test_system_availability_nested():
# (A in series with (B in parallel with C))
structure = {
"series": [
"A",
{"parallel": ["B", "C"]}
]
}
availabilities = {"A": 0.9, "B": 0.8, "C": 0.7}
# B||C = 1 - (1-0.8)*(1-0.7) = 1 - 0.2*0.3 = 0.94
# A && (B||C) = 0.9 * 0.94 = 0.846
assert system_availability(structure, availabilities) == pytest.approx(0.846)
def test_get_all_components():
structure = {
"series": [
"A",
{"parallel": ["B", "C"]}
]
}
assert get_all_components(structure) == {"A", "B", "C"}
def test_birnbaum_importance():
structure = {"series": ["A", "B"]}
availabilities = {"A": 0.9, "B": 0.8}
# I_B(A) = A_sys(A=1) - A_sys(A=0)
# A_sys(A=1, B=0.8) = 1 * 0.8 = 0.8
# A_sys(A=0, B=0.8) = 0 * 0.8 = 0
# I_B(A) = 0.8
assert birnbaum_importance(structure, availabilities, "A") == pytest.approx(0.8)
# I_B(B) = A_sys(B=1, A=0.9) - A_sys(B=0, A=0.9) = 0.9 - 0 = 0.9
assert birnbaum_importance(structure, availabilities, "B") == pytest.approx(0.9)

@ -1,31 +0,0 @@
import pytest
from sqlalchemy.exc import IntegrityError, DataError, DBAPIError
from src.exceptions import handle_sqlalchemy_error
def test_handle_sqlalchemy_error_unique_constraint():
err = IntegrityError("Unique constraint", params=None, orig=Exception("unique constraint violation"))
msg, status = handle_sqlalchemy_error(err)
assert status == 409
assert "already exists" in msg
def test_handle_sqlalchemy_error_foreign_key():
err = IntegrityError("Foreign key constraint", params=None, orig=Exception("foreign key constraint violation"))
msg, status = handle_sqlalchemy_error(err)
assert status == 400
assert "Related record not found" in msg
def test_handle_sqlalchemy_error_data_error():
err = DataError("Invalid data", params=None, orig=None)
msg, status = handle_sqlalchemy_error(err)
assert status == 400
assert "Invalid data" in msg
def test_handle_sqlalchemy_error_generic_dbapi():
class MockError:
def __str__(self):
return "Some generic database error"
err = DBAPIError("Generic error", params=None, orig=MockError())
msg, status = handle_sqlalchemy_error(err)
assert status == 500
assert "Database error" in msg

@ -1,56 +0,0 @@
import pytest
from unittest.mock import AsyncMock, MagicMock
from fastapi import HTTPException
from src.middleware import RequestValidationMiddleware
@pytest.mark.asyncio
async def test_request_validation_middleware_query_length():
middleware = RequestValidationMiddleware(app=MagicMock())
request = MagicMock()
request.url.query = "a" * 2001
with pytest.raises(HTTPException) as excinfo:
await middleware.dispatch(request, AsyncMock())
assert excinfo.value.status_code == 414
@pytest.mark.asyncio
async def test_request_validation_middleware_too_many_params():
middleware = RequestValidationMiddleware(app=MagicMock())
request = MagicMock()
request.url.query = "a=1"
request.query_params.multi_items.return_value = [("param", "val")] * 51
with pytest.raises(HTTPException) as excinfo:
await middleware.dispatch(request, AsyncMock())
assert excinfo.value.status_code == 400
assert "Too many query parameters" in excinfo.value.detail
@pytest.mark.asyncio
async def test_request_validation_middleware_xss_detection():
middleware = RequestValidationMiddleware(app=MagicMock())
request = MagicMock()
request.url.query = "q=<script>"
request.query_params.multi_items.return_value = [("q", "<script>")]
with pytest.raises(HTTPException) as excinfo:
await middleware.dispatch(request, AsyncMock())
assert excinfo.value.status_code == 400
assert "Potential XSS payload" in excinfo.value.detail
@pytest.mark.asyncio
async def test_request_validation_middleware_pagination_logic():
middleware = RequestValidationMiddleware(app=MagicMock())
request = MagicMock()
request.url.query = "size=55"
request.query_params.multi_items.return_value = [("size", "55")]
request.headers = {}
with pytest.raises(HTTPException) as excinfo:
await middleware.dispatch(request, AsyncMock())
assert excinfo.value.status_code == 400
assert "cannot exceed 50" in excinfo.value.detail
request.query_params.multi_items.return_value = [("size", "7")]
with pytest.raises(HTTPException) as excinfo:
await middleware.dispatch(request, AsyncMock())
assert "must be a multiple of 5" in excinfo.value.detail

@ -1,64 +0,0 @@
import pytest
import math
from src.calculation_target_reliability.service import calculate_asset_eaf_contributions
def test_calculate_asset_eaf_contributions_basic():
# Mock plant result
plant_result = {
"total_uptime": 7000,
"total_downtime": 1000,
"eaf": 85.0
}
# total_hours = 8000
# Mock equipment results
eq_results = [
{
"aeros_node": {"node_name": "Asset1"},
"num_events": 5,
"contribution_factor": 0.5,
"contribution": 0.1, # Birnbaum
"availability": 0.9,
"total_downtime": 100
},
{
"aeros_node": {"node_name": "Asset2"},
"num_events": 2,
"contribution_factor": 0.3,
"contribution": 0.05,
"availability": 0.95,
"total_downtime": 50
}
]
standard_scope = ["Asset1", "Asset2"]
eaf_gap = 2.0 # 2% gap
scheduled_outage = 500
results = calculate_asset_eaf_contributions(
plant_result, eq_results, standard_scope, eaf_gap, scheduled_outage
)
assert len(results) == 2
# Check sorting (highest birnbaum first)
assert results[0].node["node_name"] == "Asset1"
assert results[0].birbaum > results[1].birbaum
# Check that required_improvement is positive
assert results[0].required_improvement > 0
assert results[0].improvement_impact > 0
def test_calculate_asset_eaf_contributions_skipping():
plant_result = {"total_uptime": 1000, "total_downtime": 0, "eaf": 100}
eq_results = [{
"aeros_node": {"node_name": "Asset1"},
"num_events": 0,
"contribution_factor": 0.5,
"contribution": 0.1,
"availability": 1.0,
"total_downtime": 0
}]
results = calculate_asset_eaf_contributions(
plant_result, eq_results, ["Asset1"], 1.0, 0
)
assert len(results) == 0

@ -1,49 +0,0 @@
import pytest
from pydantic import ValidationError
from src.database.schema import CommonParams
from src.overhaul.schema import OverhaulCriticalParts
def test_common_params_valid():
params = CommonParams(
page=1,
itemsPerPage=10,
q="search test",
all=1
)
assert params.page == 1
assert params.items_per_page == 10
assert params.query_str == "search test"
assert params.is_all is True
def test_common_params_page_constraints():
# Test page must be > 0
with pytest.raises(ValidationError):
CommonParams(page=0)
with pytest.raises(ValidationError):
CommonParams(page=-1)
def test_common_params_items_per_page_constraints():
# Test items_per_page must be multiple of 5
with pytest.raises(ValidationError):
CommonParams(itemsPerPage=7)
# Test items_per_page maximum
with pytest.raises(ValidationError):
CommonParams(itemsPerPage=55)
# Valid multiples of 5
assert CommonParams(itemsPerPage=50).items_per_page == 50
assert CommonParams(itemsPerPage=5).items_per_page == 5
def test_overhaul_critical_parts_valid():
parts = OverhaulCriticalParts(criticalParts=["Part A", "Part B"])
assert parts.criticalParts == ["Part A", "Part B"]
def test_overhaul_critical_parts_invalid():
# criticalParts is required and must be a list
with pytest.raises(ValidationError):
OverhaulCriticalParts()
with pytest.raises(ValidationError):
OverhaulCriticalParts(criticalParts="Not a list")

@ -1,58 +0,0 @@
import pytest
from fastapi import HTTPException
from src.middleware import (
inspect_value,
inspect_json,
has_control_chars,
XSS_PATTERN,
SQLI_PATTERN
)
def test_xss_patterns():
# Test common XSS payloads in be-optimumoh
payloads = [
"<script>",
"javascript:",
"onerror=",
"onload=",
"<svg",
"<img"
]
for payload in payloads:
assert XSS_PATTERN.search(payload) is not None
def test_sqli_patterns():
# Test common SQLi payloads in be-optimumoh
payloads = [
"UNION",
"SELECT",
"INSERT",
"DELETE",
"DROP",
"--",
"OR 1=1"
]
for payload in payloads:
assert SQLI_PATTERN.search(payload) is not None
def test_inspect_value_raises():
with pytest.raises(HTTPException) as excinfo:
inspect_value("<script>", "source")
assert excinfo.value.status_code == 400
assert "Potential XSS payload" in excinfo.value.detail
with pytest.raises(HTTPException) as excinfo:
inspect_value("UNION SELECT", "source")
assert excinfo.value.status_code == 400
assert "Potential SQL injection" in excinfo.value.detail
def test_inspect_json_raises():
with pytest.raises(HTTPException) as excinfo:
inspect_json({"__proto__": "polluted"})
assert excinfo.value.status_code == 400
assert "Forbidden JSON key" in excinfo.value.detail
def test_has_control_chars():
assert has_control_chars("normal string") is False
assert has_control_chars("string with \x00 null") is True
assert has_control_chars("string with \n newline") is False

@ -1,33 +0,0 @@
import pytest
from datetime import datetime, timedelta
from src.calculation_target_reliability.utils import generate_down_periods
def test_generate_down_periods_count():
start = datetime(2025, 1, 1)
end = datetime(2025, 1, 31)
# Test fixed number of periods
periods = generate_down_periods(start, end, num_periods=5)
# It attempts to generate 5, but might be fewer due to overlaps
assert len(periods) <= 5
# Check they are within range
for p_start, p_end in periods:
assert p_start >= start
assert p_end <= end
assert p_start < p_end
def test_generate_down_periods_no_overlap():
start = datetime(2025, 1, 1)
end = datetime(2025, 1, 31)
periods = generate_down_periods(start, end, num_periods=10)
# Sort and check gaps
for i in range(len(periods) - 1):
assert periods[i][1] <= periods[i+1][0]
def test_generate_down_periods_too_small_range():
start = datetime(2025, 1, 1)
end = datetime(2025, 1, 2)
# Requesting 5 days duration in 1 day range
periods = generate_down_periods(start, end, num_periods=1, min_duration=5)
assert len(periods) == 0

@ -1,36 +0,0 @@
import pytest
from datetime import datetime, timedelta
from src.utils import parse_relative_expression, parse_date_string
def test_parse_relative_expression_days():
# Test T, T+n, T-n
result = parse_relative_expression("T")
assert result is not None
assert isinstance(result, datetime)
result_plus = parse_relative_expression("T+5")
assert result_plus is not None
result_minus = parse_relative_expression("T-3")
assert result_minus is not None
def test_parse_relative_expression_invalid():
assert parse_relative_expression("abc") is None
assert parse_relative_expression("123") is None
assert parse_relative_expression("T++1") is None
def test_parse_date_string_formats():
# Test various ISO and common formats
dt = parse_date_string("2024-11-08")
assert dt.year == 2024
assert dt.month == 11
assert dt.day == 8
dt = parse_date_string("08-11-2024")
assert dt.year == 2024
assert dt.month == 11
assert dt.day == 8
def test_parse_date_string_invalid():
with pytest.raises(ValueError):
parse_date_string("invalid-date")
Loading…
Cancel
Save