Compare commits

..

1 Commits

Author SHA1 Message Date
Cizz22 aa106a5a85 WIP: tm using relibility 3 months ago

20
.env

@ -9,15 +9,15 @@ DATABASE_CREDENTIAL_USER=postgres
DATABASE_CREDENTIAL_PASSWORD=postgres DATABASE_CREDENTIAL_PASSWORD=postgres
DATABASE_NAME=digital_twin DATABASE_NAME=digital_twin
COLLECTOR_HOSTNAME=192.168.1.82 # COLLECTOR_HOSTNAME=192.168.1.82
COLLECTOR_PORT=1111 # COLLECTOR_PORT=1111
COLLECTOR_CREDENTIAL_USER=digital_twin # COLLECTOR_CREDENTIAL_USER=digital_twin
COLLECTOR_CREDENTIAL_PASSWORD=Pr0jec7@D!g!tTwiN # COLLECTOR_CREDENTIAL_PASSWORD=Pr0jec7@D!g!tTwiN
COLLECTOR_NAME=digital_twin # COLLECTOR_NAME=digital_twin
# COLLECTOR_HOSTNAME=192.168.1.86 COLLECTOR_HOSTNAME=192.168.1.86
# COLLECTOR_PORT=5432 COLLECTOR_PORT=5432
# COLLECTOR_CREDENTIAL_USER=postgres COLLECTOR_CREDENTIAL_USER=postgres
# COLLECTOR_CREDENTIAL_PASSWORD=postgres COLLECTOR_CREDENTIAL_PASSWORD=postgres
# COLLECTOR_NAME=digital_twin COLLECTOR_NAME=digital_twin

10
Jenkinsfile vendored

@ -61,16 +61,6 @@ pipeline {
""" """
} }
} }
// stage('Watchtower Deployment') {
// steps {
// sh """
// # Push both tags
// docker push ${DOCKER_HUB_USERNAME}/${IMAGE_NAME}:${GIT_COMMIT_HASH}
// docker push ${DOCKER_HUB_USERNAME}/${IMAGE_NAME}:latest
// """
// }
// }
// stage('Deploy') { // stage('Deploy') {
// steps { // steps {

186
poetry.lock generated

@ -1,17 +1,5 @@
# This file is automatically @generated by Poetry 2.1.4 and should not be changed by hand. # This file is automatically @generated by Poetry 2.1.4 and should not be changed by hand.
[[package]]
name = "absl-py"
version = "2.3.1"
description = "Abseil Python Common Libraries, see https://github.com/abseil/abseil-py."
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "absl_py-2.3.1-py3-none-any.whl", hash = "sha256:eeecf07f0c2a93ace0772c92e596ace6d3d3996c042b2128459aaae2a76de11d"},
{file = "absl_py-2.3.1.tar.gz", hash = "sha256:a97820526f7fbfd2ec1bce83f3f25e3a14840dac0d8e02a0b71cd75db3f77fc9"},
]
[[package]] [[package]]
name = "aiohappyeyeballs" name = "aiohappyeyeballs"
version = "2.6.1" version = "2.6.1"
@ -1042,18 +1030,6 @@ files = [
[package.extras] [package.extras]
all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"]
[[package]]
name = "immutabledict"
version = "4.2.1"
description = "Immutable wrapper around dictionaries (a fork of frozendict)"
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "immutabledict-4.2.1-py3-none-any.whl", hash = "sha256:c56a26ced38c236f79e74af3ccce53772827cef5c3bce7cab33ff2060f756373"},
{file = "immutabledict-4.2.1.tar.gz", hash = "sha256:d91017248981c72eb66c8ff9834e99c2f53562346f23e7f51e7a5ebcf66a3bcc"},
]
[[package]] [[package]]
name = "importlib-resources" name = "importlib-resources"
version = "6.4.5" version = "6.4.5"
@ -1444,63 +1420,6 @@ rsa = ["cryptography (>=3.0.0)"]
signals = ["blinker (>=1.4.0)"] signals = ["blinker (>=1.4.0)"]
signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"]
[[package]]
name = "ortools"
version = "9.14.6206"
description = "Google OR-Tools python libraries and modules"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "ortools-9.14.6206-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:6e2364edd1577cd094e7c7121ec5fb0aa462a69a78ce29cdc40fa45943ff0091"},
{file = "ortools-9.14.6206-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:164b726b4d358ae68a018a52ff1999c0646d6f861b33676c2c83e2ddb60cfa13"},
{file = "ortools-9.14.6206-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ebb0e210969cc3246fe78dadf9038936a3a18edc8156e23a394e2bbcec962431"},
{file = "ortools-9.14.6206-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:174de2f04c106c7dcc5989560f2c0e065e78fba0ad0d1fd029897582f4823c3a"},
{file = "ortools-9.14.6206-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e6d994ebcf9cbdda1e20a75662967124e7e6ffd707c7f60b2db1a11f2104d384"},
{file = "ortools-9.14.6206-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5763472f8b05072c96c36c4eafadd9f6ffcdab38a81d8f0142fc408ad52a4342"},
{file = "ortools-9.14.6206-cp310-cp310-win_amd64.whl", hash = "sha256:6711516f837f06836ff9fda66fe4337b88c214f2ba6a921b84d3b05876f1fa8c"},
{file = "ortools-9.14.6206-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:8bcd8481846090585a4fac82800683555841685c49fa24578ad1e48a37918568"},
{file = "ortools-9.14.6206-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5af2bbf2fff7d922ba036e27d7ff378abecb24749380c86a77fa6208d5ba35cd"},
{file = "ortools-9.14.6206-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a6ab43490583c4bbf0fff4e51bb1c15675d5651c2e8e12ba974fd08e8c05a48f"},
{file = "ortools-9.14.6206-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9aa2c0c50a765c6a060960dcb0207bd6aeb6341f5adacb3d33e613b7e7409428"},
{file = "ortools-9.14.6206-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:64ec63fd92125499e9ca6b72700406dda161eefdfef92f04c35c5150391f89a4"},
{file = "ortools-9.14.6206-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:8651008f05257471f45a919ade5027afa12ab6f7a4fdf0a8bcc18c92032f8571"},
{file = "ortools-9.14.6206-cp311-cp311-win_amd64.whl", hash = "sha256:ca60877830a631545234e83e7f6bd55830334a4d0c2b51f1669b1f2698d58b84"},
{file = "ortools-9.14.6206-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:e38c8c4a184820cbfdb812a8d484f6506cf16993ce2a95c88bc1c9d23b17c63e"},
{file = "ortools-9.14.6206-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:db685073cbed9f8bfaa744f5e883f3dea57c93179b0abe1788276fd3b074fa61"},
{file = "ortools-9.14.6206-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d4bfb8bffb29991834cf4bde7048ca8ee8caed73e8dd21e5ec7de99a33bbfea0"},
{file = "ortools-9.14.6206-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:eb464a698837e7f90ca5f9b3d748b6ddf553198a70032bc77824d1cd88695d2b"},
{file = "ortools-9.14.6206-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8f33deaeb7c3dda8ca1d29c5b9aa9c3a4f2ca9ecf34f12a1f809bb2995f41274"},
{file = "ortools-9.14.6206-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:086e7c2dc4f23efffb20a5e20f618c7d6adb99b2d94f684cab482387da3bc434"},
{file = "ortools-9.14.6206-cp312-cp312-win_amd64.whl", hash = "sha256:17c13b0bfde17ac57789ad35243edf1318ecd5db23cf949b75ab62480599f188"},
{file = "ortools-9.14.6206-cp313-cp313-macosx_10_15_x86_64.whl", hash = "sha256:8d0df7eef8ba53ad235e29018389259bad2e667d9594b9c2a412ed6a5756bd4e"},
{file = "ortools-9.14.6206-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:57dfe10844ce8331634d4723040fe249263fd490407346efc314c0bc656849b5"},
{file = "ortools-9.14.6206-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5c0c2c00a6e5d5c462e76fdda7dbd40d0f9139f1df4211d34b36906696248020"},
{file = "ortools-9.14.6206-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:38044cf39952d93cbcc02f6acdbe0a9bd3628fbf17f0d7eb0374060fa028c22e"},
{file = "ortools-9.14.6206-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:98564de773d709e1e49cb3c32f6917589c314f047786d88bd5f324c0eb7be96e"},
{file = "ortools-9.14.6206-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:80528b0ac72dc3de00cbeef2ce028517a476450b5877b1cda1b8ecb9fa98505e"},
{file = "ortools-9.14.6206-cp313-cp313-win_amd64.whl", hash = "sha256:47b1b15dcb085d32c61621b790259193aefa9e4577abadf233d47fbe7d0b81ef"},
{file = "ortools-9.14.6206-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d26a0f9ed97ef9d3384a9069923585f5f974c3fde555a41f4d6381fbe7840bc4"},
{file = "ortools-9.14.6206-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d40d8141667d47405f296a9f687058c566d7816586e9a672b59e9fcec8493133"},
{file = "ortools-9.14.6206-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:aefea81ed81aa937873efc520381785ed65380e52917f492ab566f46bbb5660d"},
{file = "ortools-9.14.6206-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f044bb277db3ab6a1b958728fe1cf14ca87c3800d67d7b321d876b48269340f6"},
{file = "ortools-9.14.6206-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:070dc7cebfa0df066acb6b9a6d02339351be8f91b2352b782ee7f40412207e20"},
{file = "ortools-9.14.6206-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5eb558a03b4ada501ecdea7b89f0d3bdf2cc6752e1728759ccf27923f592a8c2"},
{file = "ortools-9.14.6206-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:646329fa74a5c48c591b7fabfd26743f6d2de4e632b3b96ec596c47bfe19177a"},
{file = "ortools-9.14.6206-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aa5161924f35b8244295acd0fab2a8171bb08ef8d5cfaf1913a21274475704cc"},
{file = "ortools-9.14.6206-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e253526a026ae194aed544a0d065163f52a0c9cb606a1061c62df546877d5452"},
{file = "ortools-9.14.6206-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:dcb496ef633d884036770783f43bf8a47ff253ecdd8a8f5b95f00276ec241bfd"},
{file = "ortools-9.14.6206-cp39-cp39-win_amd64.whl", hash = "sha256:2733f635675de631fdc7b1611878ec9ee2f48a26434b7b3c07d0a0f535b92e03"},
]
[package.dependencies]
absl-py = ">=2.0.0"
immutabledict = ">=3.0.0"
numpy = ">=1.13.3"
pandas = ">=2.0.0"
protobuf = ">=6.31.1,<6.32"
typing-extensions = ">=4.12"
[[package]] [[package]]
name = "packaging" name = "packaging"
version = "24.2" version = "24.2"
@ -1743,21 +1662,23 @@ testing = ["google-api-core (>=1.31.5)"]
[[package]] [[package]]
name = "protobuf" name = "protobuf"
version = "6.31.1" version = "5.29.0"
description = "" description = ""
optional = false optional = false
python-versions = ">=3.9" python-versions = ">=3.8"
groups = ["main"] groups = ["main"]
files = [ files = [
{file = "protobuf-6.31.1-cp310-abi3-win32.whl", hash = "sha256:7fa17d5a29c2e04b7d90e5e32388b8bfd0e7107cd8e616feef7ed3fa6bdab5c9"}, {file = "protobuf-5.29.0-cp310-abi3-win32.whl", hash = "sha256:ea7fb379b257911c8c020688d455e8f74efd2f734b72dc1ea4b4d7e9fd1326f2"},
{file = "protobuf-6.31.1-cp310-abi3-win_amd64.whl", hash = "sha256:426f59d2964864a1a366254fa703b8632dcec0790d8862d30034d8245e1cd447"}, {file = "protobuf-5.29.0-cp310-abi3-win_amd64.whl", hash = "sha256:34a90cf30c908f47f40ebea7811f743d360e202b6f10d40c02529ebd84afc069"},
{file = "protobuf-6.31.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:6f1227473dc43d44ed644425268eb7c2e488ae245d51c6866d19fe158e207402"}, {file = "protobuf-5.29.0-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:c931c61d0cc143a2e756b1e7f8197a508de5365efd40f83c907a9febf36e6b43"},
{file = "protobuf-6.31.1-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:a40fc12b84c154884d7d4c4ebd675d5b3b5283e155f324049ae396b95ddebc39"}, {file = "protobuf-5.29.0-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:85286a47caf63b34fa92fdc1fd98b649a8895db595cfa746c5286eeae890a0b1"},
{file = "protobuf-6.31.1-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:4ee898bf66f7a8b0bd21bce523814e6fbd8c6add948045ce958b73af7e8878c6"}, {file = "protobuf-5.29.0-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:0d10091d6d03537c3f902279fcf11e95372bdd36a79556311da0487455791b20"},
{file = "protobuf-6.31.1-cp39-cp39-win32.whl", hash = "sha256:0414e3aa5a5f3ff423828e1e6a6e907d6c65c1d5b7e6e975793d5590bdeecc16"}, {file = "protobuf-5.29.0-cp38-cp38-win32.whl", hash = "sha256:0cd67a1e5c2d88930aa767f702773b2d054e29957432d7c6a18f8be02a07719a"},
{file = "protobuf-6.31.1-cp39-cp39-win_amd64.whl", hash = "sha256:8764cf4587791e7564051b35524b72844f845ad0bb011704c3736cce762d8fe9"}, {file = "protobuf-5.29.0-cp38-cp38-win_amd64.whl", hash = "sha256:e467f81fdd12ded9655cea3e9b83dc319d93b394ce810b556fb0f421d8613e86"},
{file = "protobuf-6.31.1-py3-none-any.whl", hash = "sha256:720a6c7e6b77288b85063569baae8536671b39f15cc22037ec7045658d80489e"}, {file = "protobuf-5.29.0-cp39-cp39-win32.whl", hash = "sha256:17d128eebbd5d8aee80300aed7a43a48a25170af3337f6f1333d1fac2c6839ac"},
{file = "protobuf-6.31.1.tar.gz", hash = "sha256:d8cac4c982f0b957a4dc73a80e2ea24fab08e679c0de9deb835f4a12d69aca9a"}, {file = "protobuf-5.29.0-cp39-cp39-win_amd64.whl", hash = "sha256:6c3009e22717c6cc9e6594bb11ef9f15f669b19957ad4087214d69e08a213368"},
{file = "protobuf-5.29.0-py3-none-any.whl", hash = "sha256:88c4af76a73183e21061881360240c0cdd3c39d263b4e8fb570aaf83348d608f"},
{file = "protobuf-5.29.0.tar.gz", hash = "sha256:445a0c02483869ed8513a585d80020d012c6dc60075f96fa0563a724987b1001"},
] ]
[[package]] [[package]]
@ -2242,6 +2163,85 @@ files = [
[package.dependencies] [package.dependencies]
pyasn1 = ">=0.1.3" pyasn1 = ">=0.1.3"
[[package]]
name = "scipy"
version = "1.16.2"
description = "Fundamental algorithms for scientific computing in Python"
optional = false
python-versions = ">=3.11"
groups = ["main"]
files = [
{file = "scipy-1.16.2-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:6ab88ea43a57da1af33292ebd04b417e8e2eaf9d5aa05700be8d6e1b6501cd92"},
{file = "scipy-1.16.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:c95e96c7305c96ede73a7389f46ccd6c659c4da5ef1b2789466baeaed3622b6e"},
{file = "scipy-1.16.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:87eb178db04ece7c698220d523c170125dbffebb7af0345e66c3554f6f60c173"},
{file = "scipy-1.16.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:4e409eac067dcee96a57fbcf424c13f428037827ec7ee3cb671ff525ca4fc34d"},
{file = "scipy-1.16.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e574be127bb760f0dad24ff6e217c80213d153058372362ccb9555a10fc5e8d2"},
{file = "scipy-1.16.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f5db5ba6188d698ba7abab982ad6973265b74bb40a1efe1821b58c87f73892b9"},
{file = "scipy-1.16.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ec6e74c4e884104ae006d34110677bfe0098203a3fec2f3faf349f4cb05165e3"},
{file = "scipy-1.16.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:912f46667d2d3834bc3d57361f854226475f695eb08c08a904aadb1c936b6a88"},
{file = "scipy-1.16.2-cp311-cp311-win_amd64.whl", hash = "sha256:91e9e8a37befa5a69e9cacbe0bcb79ae5afb4a0b130fd6db6ee6cc0d491695fa"},
{file = "scipy-1.16.2-cp311-cp311-win_arm64.whl", hash = "sha256:f3bf75a6dcecab62afde4d1f973f1692be013110cad5338007927db8da73249c"},
{file = "scipy-1.16.2-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:89d6c100fa5c48472047632e06f0876b3c4931aac1f4291afc81a3644316bb0d"},
{file = "scipy-1.16.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:ca748936cd579d3f01928b30a17dc474550b01272d8046e3e1ee593f23620371"},
{file = "scipy-1.16.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:fac4f8ce2ddb40e2e3d0f7ec36d2a1e7f92559a2471e59aec37bd8d9de01fec0"},
{file = "scipy-1.16.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:033570f1dcefd79547a88e18bccacff025c8c647a330381064f561d43b821232"},
{file = "scipy-1.16.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ea3421209bf00c8a5ef2227de496601087d8f638a2363ee09af059bd70976dc1"},
{file = "scipy-1.16.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f66bd07ba6f84cd4a380b41d1bf3c59ea488b590a2ff96744845163309ee8e2f"},
{file = "scipy-1.16.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5e9feab931bd2aea4a23388c962df6468af3d808ddf2d40f94a81c5dc38f32ef"},
{file = "scipy-1.16.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:03dfc75e52f72cf23ec2ced468645321407faad8f0fe7b1f5b49264adbc29cb1"},
{file = "scipy-1.16.2-cp312-cp312-win_amd64.whl", hash = "sha256:0ce54e07bbb394b417457409a64fd015be623f36e330ac49306433ffe04bc97e"},
{file = "scipy-1.16.2-cp312-cp312-win_arm64.whl", hash = "sha256:2a8ffaa4ac0df81a0b94577b18ee079f13fecdb924df3328fc44a7dc5ac46851"},
{file = "scipy-1.16.2-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:84f7bf944b43e20b8a894f5fe593976926744f6c185bacfcbdfbb62736b5cc70"},
{file = "scipy-1.16.2-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:5c39026d12edc826a1ef2ad35ad1e6d7f087f934bb868fc43fa3049c8b8508f9"},
{file = "scipy-1.16.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e52729ffd45b68777c5319560014d6fd251294200625d9d70fd8626516fc49f5"},
{file = "scipy-1.16.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:024dd4a118cccec09ca3209b7e8e614931a6ffb804b2a601839499cb88bdf925"},
{file = "scipy-1.16.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7a5dc7ee9c33019973a470556081b0fd3c9f4c44019191039f9769183141a4d9"},
{file = "scipy-1.16.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c2275ff105e508942f99d4e3bc56b6ef5e4b3c0af970386ca56b777608ce95b7"},
{file = "scipy-1.16.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:af80196eaa84f033e48444d2e0786ec47d328ba00c71e4299b602235ffef9acb"},
{file = "scipy-1.16.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9fb1eb735fe3d6ed1f89918224e3385fbf6f9e23757cacc35f9c78d3b712dd6e"},
{file = "scipy-1.16.2-cp313-cp313-win_amd64.whl", hash = "sha256:fda714cf45ba43c9d3bae8f2585c777f64e3f89a2e073b668b32ede412d8f52c"},
{file = "scipy-1.16.2-cp313-cp313-win_arm64.whl", hash = "sha256:2f5350da923ccfd0b00e07c3e5cfb316c1c0d6c1d864c07a72d092e9f20db104"},
{file = "scipy-1.16.2-cp313-cp313t-macosx_10_14_x86_64.whl", hash = "sha256:53d8d2ee29b925344c13bda64ab51785f016b1b9617849dac10897f0701b20c1"},
{file = "scipy-1.16.2-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:9e05e33657efb4c6a9d23bd8300101536abd99c85cca82da0bffff8d8764d08a"},
{file = "scipy-1.16.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:7fe65b36036357003b3ef9d37547abeefaa353b237e989c21027b8ed62b12d4f"},
{file = "scipy-1.16.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:6406d2ac6d40b861cccf57f49592f9779071655e9f75cd4f977fa0bdd09cb2e4"},
{file = "scipy-1.16.2-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ff4dc42bd321991fbf611c23fc35912d690f731c9914bf3af8f417e64aca0f21"},
{file = "scipy-1.16.2-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:654324826654d4d9133e10675325708fb954bc84dae6e9ad0a52e75c6b1a01d7"},
{file = "scipy-1.16.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:63870a84cd15c44e65220eaed2dac0e8f8b26bbb991456a033c1d9abfe8a94f8"},
{file = "scipy-1.16.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:fa01f0f6a3050fa6a9771a95d5faccc8e2f5a92b4a2e5440a0fa7264a2398472"},
{file = "scipy-1.16.2-cp313-cp313t-win_amd64.whl", hash = "sha256:116296e89fba96f76353a8579820c2512f6e55835d3fad7780fece04367de351"},
{file = "scipy-1.16.2-cp313-cp313t-win_arm64.whl", hash = "sha256:98e22834650be81d42982360382b43b17f7ba95e0e6993e2a4f5b9ad9283a94d"},
{file = "scipy-1.16.2-cp314-cp314-macosx_10_14_x86_64.whl", hash = "sha256:567e77755019bb7461513c87f02bb73fb65b11f049aaaa8ca17cfaa5a5c45d77"},
{file = "scipy-1.16.2-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:17d9bb346194e8967296621208fcdfd39b55498ef7d2f376884d5ac47cec1a70"},
{file = "scipy-1.16.2-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:0a17541827a9b78b777d33b623a6dcfe2ef4a25806204d08ead0768f4e529a88"},
{file = "scipy-1.16.2-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:d7d4c6ba016ffc0f9568d012f5f1eb77ddd99412aea121e6fa8b4c3b7cbad91f"},
{file = "scipy-1.16.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9702c4c023227785c779cba2e1d6f7635dbb5b2e0936cdd3a4ecb98d78fd41eb"},
{file = "scipy-1.16.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d1cdf0ac28948d225decdefcc45ad7dd91716c29ab56ef32f8e0d50657dffcc7"},
{file = "scipy-1.16.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:70327d6aa572a17c2941cdfb20673f82e536e91850a2e4cb0c5b858b690e1548"},
{file = "scipy-1.16.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5221c0b2a4b58aa7c4ed0387d360fd90ee9086d383bb34d9f2789fafddc8a936"},
{file = "scipy-1.16.2-cp314-cp314-win_amd64.whl", hash = "sha256:f5a85d7b2b708025af08f060a496dd261055b617d776fc05a1a1cc69e09fe9ff"},
{file = "scipy-1.16.2-cp314-cp314-win_arm64.whl", hash = "sha256:2cc73a33305b4b24556957d5857d6253ce1e2dcd67fa0ff46d87d1670b3e1e1d"},
{file = "scipy-1.16.2-cp314-cp314t-macosx_10_14_x86_64.whl", hash = "sha256:9ea2a3fed83065d77367775d689401a703d0f697420719ee10c0780bcab594d8"},
{file = "scipy-1.16.2-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:7280d926f11ca945c3ef92ba960fa924e1465f8d07ce3a9923080363390624c4"},
{file = "scipy-1.16.2-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:8afae1756f6a1fe04636407ef7dbece33d826a5d462b74f3d0eb82deabefd831"},
{file = "scipy-1.16.2-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:5c66511f29aa8d233388e7416a3f20d5cae7a2744d5cee2ecd38c081f4e861b3"},
{file = "scipy-1.16.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:efe6305aeaa0e96b0ccca5ff647a43737d9a092064a3894e46c414db84bc54ac"},
{file = "scipy-1.16.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7f3a337d9ae06a1e8d655ee9d8ecb835ea5ddcdcbd8d23012afa055ab014f374"},
{file = "scipy-1.16.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:bab3605795d269067d8ce78a910220262711b753de8913d3deeaedb5dded3bb6"},
{file = "scipy-1.16.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:b0348d8ddb55be2a844c518cd8cc8deeeb8aeba707cf834db5758fc89b476a2c"},
{file = "scipy-1.16.2-cp314-cp314t-win_amd64.whl", hash = "sha256:26284797e38b8a75e14ea6631d29bda11e76ceaa6ddb6fdebbfe4c4d90faf2f9"},
{file = "scipy-1.16.2-cp314-cp314t-win_arm64.whl", hash = "sha256:d2a4472c231328d4de38d5f1f68fdd6d28a615138f842580a8a321b5845cf779"},
{file = "scipy-1.16.2.tar.gz", hash = "sha256:af029b153d243a80afb6eabe40b0a07f8e35c9adc269c019f364ad747f826a6b"},
]
[package.dependencies]
numpy = ">=1.25.2,<2.6"
[package.extras]
dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy (==1.10.0)", "pycodestyle", "pydevtool", "rich-click", "ruff (>=0.0.292)", "types-psutil", "typing_extensions"]
doc = ["intersphinx_registry", "jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.19.1)", "jupytext", "linkify-it-py", "matplotlib (>=3.5)", "myst-nb (>=1.2.0)", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0,<8.2.0)", "sphinx-copybutton", "sphinx-design (>=0.4.0)"]
test = ["Cython", "array-api-strict (>=2.3.1)", "asv", "gmpy2", "hypothesis (>=6.30)", "meson", "mpmath", "ninja ; sys_platform != \"emscripten\"", "pooch", "pytest (>=8.0.0)", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"]
[[package]] [[package]]
name = "shellingham" name = "shellingham"
version = "1.5.4" version = "1.5.4"
@ -3008,4 +3008,4 @@ propcache = ">=0.2.1"
[metadata] [metadata]
lock-version = "2.1" lock-version = "2.1"
python-versions = "^3.11" python-versions = "^3.11"
content-hash = "6c2a5a5a8e6a2732bd9e94de4bac3a7c0d3e63d959d5793b23eb327c7a95f3f8" content-hash = "91dbae2db3aade422091b46bec01e32cabc4457814e736775fc0d020d785fae5"

@ -30,7 +30,7 @@ google-api-python-client = "^2.169.0"
google-auth-httplib2 = "^0.2.0" google-auth-httplib2 = "^0.2.0"
google-auth-oauthlib = "^1.2.2" google-auth-oauthlib = "^1.2.2"
aiohttp = "^3.12.14" aiohttp = "^3.12.14"
ortools = "^9.14.6206" scipy = "^1.16.2"
[build-system] [build-system]

@ -10,7 +10,7 @@ from src.calculation_budget_constrains.router import \
from src.calculation_target_reliability.router import \ from src.calculation_target_reliability.router import \
router as calculation_target_reliability router as calculation_target_reliability
from src.calculation_time_constrains.router import \ from src.calculation_time_constrains.router import \
router as calculation_time_constrains_router, get_calculation router as calculation_time_constrains_router
# from src.job.router import router as job_router # from src.job.router import router as job_router
from src.overhaul.router import router as overhaul_router from src.overhaul.router import router as overhaul_router
@ -32,7 +32,7 @@ from src.equipment_sparepart.router import router as equipment_sparepart_router
# from src.overhaul.router import router as overhaul_router # from src.overhaul.router import router as overhaul_router
# from src.overhaul_history.router import router as overhaul_history_router # from src.overhaul_history.router import router as overhaul_history_router
# from src.overhaul_activity.router import router as scope_equipment_activity_router # from src.overhaul_activity.router import router as scope_equipment_activity_router
from src.overhaul_scope.router import router as ovehaul_schedule_router # # from src.overhaul_schedule.router import router as ovehaul_schedule_router
# from src.scope_equipment_part.router import router as scope_equipment_part_router # from src.scope_equipment_part.router import router as scope_equipment_part_router
# from src.calculation_target_reliability.router import router as calculation_target_reliability # from src.calculation_target_reliability.router import router as calculation_target_reliability
# #
@ -142,9 +142,9 @@ authenticated_api_router.include_router(
# scope_equipment_part_router, prefix="/equipment-parts", tags=["scope_equipment_parts"] # scope_equipment_part_router, prefix="/equipment-parts", tags=["scope_equipment_parts"]
# ) # )
authenticated_api_router.include_router( # authenticated_api_router.include_router(
ovehaul_schedule_router, prefix="/overhaul-schedules", tags=["overhaul_schedules"] # ovehaul_schedule_router, prefix="/overhaul-schedules", tags=["overhaul_schedules"]
) # )
# calculation # calculation
calculation_router = APIRouter(prefix="/calculation", tags=["calculations"]) calculation_router = APIRouter(prefix="/calculation", tags=["calculations"])
@ -172,10 +172,4 @@ calculation_router.include_router(
authenticated_api_router.include_router(calculation_router) authenticated_api_router.include_router(calculation_router)
api_router.include_router(
get_calculation,
prefix="/calculation/time-constraint",
tags=["calculation", "time_constraint"],
)
api_router.include_router(authenticated_api_router) api_router.include_router(authenticated_api_router)

@ -1,6 +1,5 @@
# app/auth/auth_bearer.py # app/auth/auth_bearer.py
import json
from typing import Annotated, Optional from typing import Annotated, Optional
import requests import requests
@ -71,72 +70,15 @@ class JWTBearer(HTTPBearer):
async def get_current_user(request: Request) -> UserBase: async def get_current_user(request: Request) -> UserBase:
return request.state.user return request.state.user
async def get_token(request: Request):
token = request.headers.get("Authorization")
if token:
return token.replace("Bearer ", "") # Menghapus prefix "Bearer "
else:
return request.cookies.get("access_token") # Fallback ke cookie
return "" # Mengembalikan token atau None jika tidak ada
async def internal_key(request: Request): async def get_token(request: Request):
token = request.headers.get("Authorization") token = request.headers.get("Authorization")
if not token:
api_key = request.headers.get("X-Internal-Key")
if api_key != config.API_KEY:
raise HTTPException(
status_code=403, detail="Invalid Key."
)
try:
headers = {
'Content-Type': 'application/json'
}
response = requests.post(
f"{config.AUTH_SERVICE_API}/sign-in",
headers=headers,
data=json.dumps({
"username": "ohuser",
"password": "123456789"
})
)
if not response.ok:
print(str(response.json()))
raise Exception("error auth")
user_data = response.json()
return user_data['data']['access_token']
except Exception as e:
raise Exception(str(e))
else:
try:
response = requests.get(
f"{config.AUTH_SERVICE_API}/verify-token",
headers={"Authorization": f"{token}"},
)
if not response.ok: if token:
raise HTTPException( return token.split(" ")[1]
status_code=403, detail="Invalid token."
)
return token.split(" ")[1]
except Exception as e:
print(f"Token verification error: {str(e)}")
return False, str(e)
return ""
CurrentUser = Annotated[UserBase, Depends(get_current_user)] CurrentUser = Annotated[UserBase, Depends(get_current_user)]
Token = Annotated[str, Depends(get_token)] Token = Annotated[str, Depends(get_token)]
InternalKey = Annotated[str, Depends(internal_key)]

@ -5,7 +5,6 @@ from fastapi.params import Query
from src.auth.service import Token from src.auth.service import Token
from src.calculation_target_reliability.service import get_simulation_results from src.calculation_target_reliability.service import get_simulation_results
from src.config import TC_RBD_ID
from src.database.core import CollectorDbSession, DbSession from src.database.core import CollectorDbSession, DbSession
from src.models import StandardResponse from src.models import StandardResponse
@ -25,7 +24,7 @@ async def get_target_reliability(
"""Get all scope pagination.""" """Get all scope pagination."""
results = await get_simulation_results( results = await get_simulation_results(
simulation_id = TC_RBD_ID, simulation_id = "default",
token=token token=token
) )

@ -84,7 +84,7 @@ async def get_all_budget_constrains(
for item in result: for item in result:
cost = item["total_cost"] or 1.0 cost = item["total_cost"] or 1.0
efficiency = item["contribution_norm"] / cost efficiency = item["contribution_norm"] / cost
item["priority_score"] = item["contribution_norm"] item["priority_score"] = 0.7 * item["contribution_norm"] + 0.3 * efficiency
# Choose method # Choose method
if use_optimal: if use_optimal:
@ -104,7 +104,7 @@ def calculate_asset_eaf_contributions(plant_result, eq_results):
for asset in eq_results: for asset in eq_results:
node_name = asset.get("aeros_node", {}).get("node_name") node_name = asset.get("aeros_node", {}).get("node_name")
if node_name: if node_name:
results[node_name] = asset.get("contribution_factor", 0.0) results[node_name] = asset.get("contribution", 0.0)
return results return results
@ -150,7 +150,7 @@ def knapsack_selection(equipments: List[dict], budget: float, scale: int = 10_00
for i in range(n): for i in range(n):
cost, value = costs[i], values[i] cost, value = costs[i], values[i]
for w in range(W, cost - 1, -1): for w in range(W, cost - 1, -1):
if dp[w - cost] + value >= dp[w]: # <= FIXED HERE if dp[w - cost] + value > dp[w]:
dp[w] = dp[w - cost] + value dp[w] = dp[w - cost] + value
keep[i][w] = True keep[i][w] = True
@ -164,15 +164,5 @@ def knapsack_selection(equipments: List[dict], budget: float, scale: int = 10_00
else: else:
excluded.append(equipments[i]) excluded.append(equipments[i])
# Optional: fill leftover budget with zero-priority items
remaining_budget = budget - sum(eq["total_cost"] for eq in selected)
if remaining_budget > 0:
for eq in excluded[:]:
if eq["total_cost"] <= remaining_budget:
selected.append(eq)
excluded.remove(eq)
remaining_budget -= eq["total_cost"]
return selected, excluded return selected, excluded

@ -1,11 +1,8 @@
import asyncio
from typing import Dict, List, Optional from typing import Dict, List, Optional
from temporalio.client import Client
from fastapi import APIRouter, HTTPException, status from fastapi import APIRouter, HTTPException, status
from fastapi.params import Query from fastapi.params import Query
from src.calculation_target_reliability.utils import wait_for_workflow
from src.config import TEMPORAL_URL, TR_RBD_ID
from src.database.core import DbSession, CollectorDbSession from src.database.core import DbSession, CollectorDbSession
from src.auth.service import Token from src.auth.service import Token
from src.models import StandardResponse from src.models import StandardResponse
@ -39,9 +36,8 @@ async def get_target_reliability(
collector_db: CollectorDbSession, collector_db: CollectorDbSession,
oh_session_id: Optional[str] = Query(None), oh_session_id: Optional[str] = Query(None),
eaf_input: float = Query(99.8), eaf_input: float = Query(99.8),
duration: int = Query(17520), duration: int = Query(8760),
simulation_id: Optional[str] = Query(None), simulation_id: Optional[str] = Query(None)
cut_hours = Query(0)
): ):
"""Get all scope pagination.""" """Get all scope pagination."""
if not oh_session_id: if not oh_session_id:
@ -56,43 +52,27 @@ async def get_target_reliability(
# eaf_input=eaf_input, # eaf_input=eaf_input,
# oh_duration=duration # oh_duration=duration
# ) # )
if duration != 17520:
if not simulation_id:
raise HTTPException(
status_code=status.HTTP_425_TOO_EARLY, # or 409 Conflict
detail="Simulation still running. Please wait.",
)
else:
temporal_client = await Client.connect(TEMPORAL_URL)
handle = temporal_client.get_workflow_handle(f"simulation-{simulation_id}")
desc = await handle.describe()
status_name = desc.status.name
if status_name in ["RUNNING", "CONTINUED_AS_NEW"]: # simulation_id = await run_rbd_simulation(
raise HTTPException( # sim_hours=duration,
status_code=status.HTTP_425_TOO_EARLY, # or 409 Conflict # token=token
detail="Simulation still running. Please wait.", # )
)
else: if not simulation_id:
simulation_id = TR_RBD_ID simulation_id = "default"
results = await get_simulation_results( results = await get_simulation_results(
simulation_id=simulation_id, simulation_id=simulation_id,
token=token token=token
) )
optimize_result = await identify_worst_eaf_contributors( optimize_result = await identify_worst_eaf_contributors(
simulation_result=results, simulation_result=results,
target_eaf=eaf_input, target_eaf=eaf_input,
db_session=db_session, db_session=db_session,
oh_session_id=oh_session_id, oh_session_id=oh_session_id,
collector_db=collector_db, collector_db=collector_db,
simulation_id=simulation_id, simulation_id=simulation_id
duration=duration,
po_duration=1200,
cut_hours=float(cut_hours)
) )

@ -39,8 +39,6 @@ class AssetWeight(OverhaulBase):
num_of_failures: int num_of_failures: int
down_time: float down_time: float
efficiency: float efficiency: float
improvement_impact:float
birbaum: float
class MaintenanceScenario(OverhaulBase): class MaintenanceScenario(OverhaulBase):
location_tag: str location_tag: str
@ -54,15 +52,10 @@ class MaintenanceScenario(OverhaulBase):
class OptimizationResult(OverhaulBase): class OptimizationResult(OverhaulBase):
current_plant_eaf: float current_plant_eaf: float
target_plant_eaf: float target_plant_eaf: float
possible_plant_eaf:float
eaf_gap: float eaf_gap: float
eaf_improvement_text:str
recommended_reduced_outage:Optional[float] = 0
warning_message:Optional[str]
asset_contributions: List[dict] asset_contributions: List[dict]
optimization_success: bool = False optimization_success: bool = False
simulation_id: Optional[str] = None simulation_id: Optional[str] = None
# { # {

@ -1,10 +1,8 @@
import math
from typing import Optional, List from typing import Optional, List
from dataclasses import dataclass from dataclasses import dataclass
from sqlalchemy import Delete, Select from sqlalchemy import Delete, Select
import httpx import httpx
from src.auth.service import CurrentUser from src.auth.service import CurrentUser
from src.config import RBD_SERVICE_API
from src.contribution_util import calculate_contribution, calculate_contribution_accurate from src.contribution_util import calculate_contribution, calculate_contribution_accurate
from src.database.core import DbSession, CollectorDbSession from src.database.core import DbSession, CollectorDbSession
from datetime import datetime, timedelta from datetime import datetime, timedelta
@ -18,20 +16,18 @@ from .schema import AssetWeight,MaintenanceScenario,OptimizationResult
from src.overhaul_activity.service import get_standard_scope_by_session_id from src.overhaul_activity.service import get_standard_scope_by_session_id
RBD_SERVICE_API = "http://192.168.1.82:8000/rbd"
client = httpx.AsyncClient(timeout=300.0) client = httpx.AsyncClient(timeout=300.0)
async def run_rbd_simulation(*, sim_hours: int, token): async def run_rbd_simulation(*, sim_hours: int, token):
sim_data = { sim_data = {
"SimulationName": f"Simulasi TR OH {sim_hours}", "SimulationName": "Simulation OH Reliability Target",
"SchematicName": "- TJB - Unit 3 -", "SchematicName": "- TJB - Unit 3 -",
"SimSeed": 1, "SimSeed": 1,
"SimDuration": sim_hours, "SimDuration": sim_hours,
"OverhaulInterval": sim_hours - 1201, "DurationUnit": "UHour",
"DurationUnit": "UHour",
"SimNumRun": 1,
"IsDefault": False,
"OverhaulDuration": 1200
} }
headers = { headers = {
@ -78,111 +74,77 @@ async def get_simulation_results(*, simulation_id: str, token: str):
"plant_result": plant_data "plant_result": plant_data
} }
def calculate_asset_eaf_contributions(plant_result, eq_results, standard_scope, eaf_gap, scheduled_outage): def calculate_asset_eaf_contributions(plant_result, eq_results, standard_scope, eaf_gap):
""" """
Calculate each asset's contribution to plant EAF with realistic, fair improvement allocation. Calculate each asset's contribution to plant EAF with realistic improvement potential.
The total EAF gap is distributed among assets proportionally to their contribution potential. Ranking:
Automatically skips equipment with no unplanned downtime (only scheduled outages). 1. Highest contribution (Birnbaum Importance)
2. Tie-breaker: Contribution per downtime (efficiency)
""" """
eaf_gap_fraction = eaf_gap / 100.0 if eaf_gap > 1.0 else eaf_gap eaf_gap_fraction = eaf_gap / 100.0 if eaf_gap > 1.0 else eaf_gap
total_hours = plant_result.get("total_uptime") + plant_result.get("total_downtime") MIN_BIRNBAUM_IMPORTANCE = 0.0005
plant_operating_fraction = (total_hours - scheduled_outage) / total_hours REALISTIC_MAX_AVAILABILITY = 0.995 # 99.5%
MIN_IMPROVEMENT_PERCENT = 0.005 # 0.5%
REALISTIC_MAX_TECHNICAL = 0.995
REALISTIC_MAX_AVAILABILITY = REALISTIC_MAX_TECHNICAL * plant_operating_fraction
MIN_IMPROVEMENT_PERCENT = 0.0001
min_improvement_fraction = MIN_IMPROVEMENT_PERCENT / 100.0 min_improvement_fraction = MIN_IMPROVEMENT_PERCENT / 100.0
EPSILON = 0.001 # 1 ms or a fraction of an hour for comparison tolerance
results = [] results = []
weighted_assets = []
# Step 1: Collect eligible assets and their weights
for asset in eq_results: for asset in eq_results:
node = asset.get("aeros_node") asset_name = asset.get("aeros_node").get("node_name")
if not node:
continue
asset_name = node.get("node_name")
num_of_events = asset.get("num_events", 0)
if asset_name not in standard_scope: if asset_name not in standard_scope:
continue continue
contribution_factor = asset.get("contribution_factor", 0.0) birnbaum = asset.get("contribution", 0.0)
birbaum = asset.get("contribution", 0.0)
current_availability = asset.get("availability", 0.0) current_availability = asset.get("availability", 0.0)
downtime = asset.get("total_downtime", 0.0) downtime = asset.get("total_downtime", 0.0)
# --- NEW: Skip equipment with no failures and near-maximum availability --- # Filter 1: Importance too low
if ( if birnbaum < MIN_BIRNBAUM_IMPORTANCE:
num_of_events < 2 # no unplanned events
or contribution_factor <= 0
):
# This equipment has nothing to improve realistically
continue continue
# --- Compute realistic possible improvement ---
if REALISTIC_MAX_AVAILABILITY > current_availability:
max_possible_improvement = REALISTIC_MAX_AVAILABILITY - current_availability
else:
max_possible_improvement = 0.0 # No improvement possible
# Max possible availability improvement
max_possible_improvement = REALISTIC_MAX_AVAILABILITY - current_availability
if max_possible_improvement <= 0:
continue
# Required improvement (limited by plant gap and availability ceiling)
required_impr = min(eaf_gap_fraction, max_possible_improvement) * birnbaum
# Compute weighted importance (Birnbaum × FV) # Filter 2: Improvement too small
raw_weight = birbaum if required_impr < min_improvement_fraction:
weight = math.sqrt(max(raw_weight, 0.0)) continue
weighted_assets.append((asset, weight, 0))
# Step 2: Compute total weight
total_weight = sum(w for _, w, _ in weighted_assets) or 1.0
# Step 3: Distribute improvement proportionally to weight
for asset, weight, max_possible_improvement in weighted_assets:
node = asset.get("aeros_node")
contribution_factor = asset.get("contribution_factor", 0.0)
birbaum = asset.get("contribution", 0.0)
current_availability = asset.get("availability", 0.0)
downtime = asset.get("total_downtime", 0.0)
required_improvement = eaf_gap_fraction * (weight/total_weight)
required_improvement = min(required_improvement, max_possible_improvement)
required_improvement = max(required_improvement, min_improvement_fraction)
improvement_impact = required_improvement * contribution_factor # Contribution efficiency (secondary metric)
efficiency = birbaum / downtime if downtime > 0 else birbaum efficiency = birnbaum / downtime if downtime > 0 else birnbaum
contribution = AssetWeight( contribution = AssetWeight(
node=node, node=asset.get("aeros_node"),
availability=current_availability, availability=current_availability,
contribution=contribution_factor, contribution=birnbaum,
required_improvement=required_improvement, required_improvement=required_impr,
improvement_impact=improvement_impact,
num_of_failures=asset.get("num_events", 0), num_of_failures=asset.get("num_events", 0),
down_time=downtime, down_time=downtime,
efficiency=efficiency, efficiency= efficiency
birbaum=birbaum,
) )
results.append(contribution) results.append(contribution)
# Step 4: Sort by Birnbaum importance # Sort: 1) contribution (desc), 2) efficiency (desc)
results.sort(key=lambda x: x.birbaum, reverse=True) results.sort(key=lambda x: (x.contribution, x.efficiency), reverse=True)
return results return results
def project_eaf_improvement(asset: AssetWeight, improvement_factor: float = 0.3) -> float: def project_eaf_improvement(asset: AssetWeight, improvement_factor: float = 0.3) -> float:
""" """
Project EAF improvement after maintenance Project EAF improvement after maintenance
This is a simplified model - you should replace with your actual prediction logic This is a simplified model - you should replace with your actual prediction logic
""" """
current_downtime_pct = 100 - asset.eaf current_downtime_pct = 100 - asset.eaf
# Assume maintenance reduces downtime by improvement_factor
improved_downtime_pct = current_downtime_pct * (1 - improvement_factor) improved_downtime_pct = current_downtime_pct * (1 - improvement_factor)
projected_eaf = 100 - improved_downtime_pct projected_eaf = 100 - improved_downtime_pct
return min(projected_eaf, 99.9) # Cap at 99.9% return min(projected_eaf, 99.9) # Cap at 99.9%
@ -195,69 +157,24 @@ async def identify_worst_eaf_contributors(
oh_session_id: str, oh_session_id: str,
collector_db: CollectorDbSession, collector_db: CollectorDbSession,
simulation_id: str, simulation_id: str,
duration: int,
po_duration: int,
cut_hours: float = 0, # new optional parameter: how many hours of planned outage user wants to cut
): ):
""" """
Identify equipment that contributes most to plant EAF reduction, Identify equipment that contributes most to plant EAF reduction
evaluate if target EAF is physically achievable, and optionally in order to reach a target EAF.
calculate the additional improvement if user cuts scheduled outage.
""" """
# Extract results
calc_result = simulation_result["calc_result"] calc_result = simulation_result["calc_result"]
plant_result = simulation_result["plant_result"] plant_result = simulation_result["plant_result"]
# Ensure list of equipment
eq_results = calc_result if isinstance(calc_result, list) else [calc_result] eq_results = calc_result if isinstance(calc_result, list) else [calc_result]
# Base parameters # Current plant EAF and gap
current_plant_eaf = plant_result.get("eaf", 0) current_plant_eaf = plant_result.get("eaf", 0)
total_hours = duration
scheduled_outage = int(po_duration)
reduced_outage = max(scheduled_outage - cut_hours, 0)
max_eaf_possible = (total_hours - reduced_outage) / total_hours * 100
# Improvement purely from outage reduction (global)
scheduled_eaf_gain = (cut_hours / total_hours) * 100 if cut_hours > 0 else 0.0
# Target feasibility check
warning_message = None
if target_eaf > max_eaf_possible:
impossible_gap = target_eaf - max_eaf_possible
required_scheduled_hours = total_hours * (1 - target_eaf / 100)
required_reduction = reduced_outage - required_scheduled_hours
# Build dynamic phrase for clarity
if cut_hours > 0:
reduction_phrase = f" even after reducing planned outage by {cut_hours}h"
else:
reduction_phrase = ""
warning_message = (
f"⚠️ Target EAF {target_eaf:.2f}% exceeds theoretical maximum {max_eaf_possible:.2f}%"
f"{reduction_phrase}.\n"
f"To achieve it, planned outage must be further reduced by approximately "
f"{required_reduction:.1f} hours (from {reduced_outage:.0f}h → {required_scheduled_hours:.0f}h)."
)
# Cap target EAF to max achievable for calculation
target_eaf = max_eaf_possible
eaf_gap = (target_eaf - current_plant_eaf) / 100.0 eaf_gap = (target_eaf - current_plant_eaf) / 100.0
if eaf_gap <= 0:
return OptimizationResult(
current_plant_eaf=current_plant_eaf,
target_plant_eaf=target_eaf,
possible_plant_eaf=current_plant_eaf,
eaf_gap=0,
warning_message=warning_message or "Target already achieved or exceeded.",
asset_contributions=[],
optimization_success=True,
simulation_id=simulation_id,
eaf_improvement_text=""
)
# Get standard scope (equipment in OH) # Get standard scope (equipment allowed for overhaul/optimization)
standard_scope = await get_standard_scope_by_session_id( standard_scope = await get_standard_scope_by_session_id(
db_session=db_session, db_session=db_session,
overhaul_session_id=oh_session_id, overhaul_session_id=oh_session_id,
@ -265,92 +182,43 @@ async def identify_worst_eaf_contributors(
) )
standard_scope_location_tags = [tag.location_tag for tag in standard_scope] standard_scope_location_tags = [tag.location_tag for tag in standard_scope]
# Compute contributions for reliability improvements # Compute contributions
asset_contributions = calculate_asset_eaf_contributions( asset_contributions = calculate_asset_eaf_contributions(
plant_result, eq_results, standard_scope_location_tags, eaf_gap, reduced_outage plant_result, eq_results, standard_scope_location_tags, eaf_gap=eaf_gap
) )
# Greedy improvement allocation project_eaf_improvement = 0.0
project_eaf_improvement_total = 0.0
selected_eq = [] selected_eq = []
# Greedy select until gap is closed
for asset in asset_contributions: for asset in asset_contributions:
if project_eaf_improvement_total >= eaf_gap: if project_eaf_improvement >= eaf_gap:
break break
if (project_eaf_improvement_total + asset.improvement_impact) <= eaf_gap: if (project_eaf_improvement + asset.required_improvement) <= eaf_gap:
selected_eq.append(asset) selected_eq.append(asset)
project_eaf_improvement_total += asset.improvement_impact project_eaf_improvement += asset.required_improvement
else: else:
# allow overshoot tolerance by skipping large ones, continue with smaller ones
continue continue
# Total EAF after improvements + optional outage cut # Build output with efficiency included
possible_eaf_plant = current_plant_eaf + project_eaf_improvement_total * 100 + scheduled_eaf_gain
possible_eaf_plant = min(possible_eaf_plant, max_eaf_possible)
selected_eq.sort(key=lambda x: x.birbaum, reverse=True)
required_cut_hours = 0
# --- 2. Optimization feasible but cannot reach target (underperformance case) ---
if possible_eaf_plant < target_eaf:
# Calculate shortfall
performance_gap = target_eaf - possible_eaf_plant
# Estimate how many scheduled outage hours must be reduced to close the remaining gap
# Each hour reduced adds (1 / total_hours) * 100 % to plant EAF
required_cut_hours = (performance_gap / 100) * total_hours
reliability_limit_msg = (
f"⚠️ Optimization was unable to reach target EAF {target_eaf:.2f}%.\n"
f"The best achievable EAF based on current reliability is "
f"{possible_eaf_plant:.2f}% (short by {performance_gap:.2f}%)."
)
# Add actionable recommendation
recommendation_msg = (
f"To achieve the target EAF, consider reducing planned outage by approximately "
f"{required_cut_hours:.1f} hours or {int(required_cut_hours/24)} days (from {reduced_outage:.0f}h → {reduced_outage - required_cut_hours:.0f}h)."
)
if warning_message:
warning_message = warning_message + "\n\n" + reliability_limit_msg + "\n" + recommendation_msg
else:
warning_message = reliability_limit_msg + "\n" + recommendation_msg
# --- EAF improvement reporting ---
eaf_improvement_points = (possible_eaf_plant - current_plant_eaf)
# Express as text for user readability
if eaf_improvement_points > 0:
improvement_text = f"{eaf_improvement_points:.6f} percentage points increase"
else:
improvement_text = "No measurable improvement achieved"
# Build result
return OptimizationResult( return OptimizationResult(
current_plant_eaf=current_plant_eaf, current_plant_eaf=current_plant_eaf,
target_plant_eaf=target_eaf, target_plant_eaf=target_eaf,
possible_plant_eaf=possible_eaf_plant,
eaf_gap=eaf_gap, eaf_gap=eaf_gap,
warning_message=warning_message, # numeric
eaf_improvement_text=improvement_text,
recommended_reduced_outage=required_cut_hours,
asset_contributions=[ asset_contributions=[
{ {
"node": asset.node, "node": asset.node,
"availability": asset.availability, "availability": asset.availability,
"contribution": asset.contribution, "contribution": asset.contribution,
"sensitivy": asset.birbaum,
"required_improvement": asset.required_improvement, "required_improvement": asset.required_improvement,
"system_impact": asset.improvement_impact,
"num_of_failures": asset.num_of_failures, "num_of_failures": asset.num_of_failures,
"down_time": asset.down_time, "down_time": asset.down_time,
"efficiency": asset.efficiency, "efficiency": asset.efficiency,
} }
for asset in selected_eq for asset in selected_eq
], ],
outage_reduction_hours=cut_hours, optimization_success=(current_plant_eaf + project_eaf_improvement) >= target_eaf,
optimization_success=(current_plant_eaf + project_eaf_improvement_total * 100 + scheduled_eaf_gain)
>= target_eaf,
simulation_id=simulation_id, simulation_id=simulation_id,
) )

@ -1,10 +1,6 @@
import asyncio
from datetime import datetime, timedelta from datetime import datetime, timedelta
import random import random
from typing import List, Optional from typing import List, Optional
from temporalio.client import Client
from src.config import TEMPORAL_URL, TR_RBD_ID
def generate_down_periods(start_date: datetime, end_date: datetime, def generate_down_periods(start_date: datetime, end_date: datetime,
num_periods: Optional[int] = None, min_duration: int = 3, num_periods: Optional[int] = None, min_duration: int = 3,
@ -56,36 +52,3 @@ def generate_down_periods(start_date: datetime, end_date: datetime,
down_periods.append((period_start, period_end)) down_periods.append((period_start, period_end))
return sorted(down_periods) return sorted(down_periods)
async def wait_for_workflow(simulation_id, max_retries=3):
workflow_id = f"simulation-{simulation_id}" # use returned ID
retries = 0
temporal_client = await Client.connect(TEMPORAL_URL)
while True:
try:
handle = temporal_client.get_workflow_handle(workflow_id=workflow_id)
desc = await handle.describe()
status = desc.status.name
if status not in ["RUNNING", "CONTINUED_AS_NEW"]:
print(f"✅ Workflow {workflow_id} finished with status: {status}")
break
print(f"⏳ Workflow {workflow_id} still {status}, checking again in 10s...")
except Exception as e:
retries += 1
if retries > max_retries:
print(f"⚠️ Workflow {workflow_id} not found after {max_retries} retries, treating as done. Error: {e}")
break
else:
print(f"⚠️ Workflow {workflow_id} not found (retry {retries}/{max_retries}), waiting 10s before retry...")
await asyncio.sleep(10)
continue
retries = 0 # reset retries if describe() worked
await asyncio.sleep(30)
return simulation_id

@ -7,7 +7,6 @@ from sqlalchemy import Select, func, select
from sqlalchemy.orm import joinedload from sqlalchemy.orm import joinedload
from src.auth.service import Token from src.auth.service import Token
from src.config import TC_RBD_ID
from src.database.core import DbSession from src.database.core import DbSession
from src.overhaul_scope.service import get_all from src.overhaul_scope.service import get_all
from src.standard_scope.model import StandardScope from src.standard_scope.model import StandardScope
@ -21,7 +20,8 @@ from .service import (create_calculation_result_service, create_param_and_data,
get_avg_cost_by_asset, get_avg_cost_by_asset,
get_calculation_by_reference_and_parameter, get_calculation_by_reference_and_parameter,
get_calculation_data_by_id, get_calculation_result, get_calculation_data_by_id, get_calculation_result,
run_simulation_with_spareparts) get_corrective_cost_time_chart,
get_overhaul_cost_by_time_chart, run_simulation, run_simulation_with_spareparts)
from src.database.core import CollectorDbSession from src.database.core import CollectorDbSession
@ -86,25 +86,22 @@ async def create_calculation(
db_session: DbSession, db_session: DbSession,
collector_db_session: CollectorDbSession, collector_db_session: CollectorDbSession,
calculation_time_constrains_in: CalculationTimeConstrainsParametersCreate, calculation_time_constrains_in: CalculationTimeConstrainsParametersCreate,
created_by: str, created_by: str
simulation_id
): ):
calculation_data = await create_param_and_data( calculation_data = await create_param_and_data(
db_session=db_session, db_session=db_session,
calculation_param_in=calculation_time_constrains_in, calculation_param_in=calculation_time_constrains_in,
created_by=created_by, created_by=created_by,
) )
rbd_simulation_id = simulation_id or TC_RBD_ID
# results = await create_calculation_result_service( # results = await create_calculation_result_service(
# db_session=db_session, calculation=calculation_data, token=token # db_session=db_session, calculation=calculation_data, token=token
# ) # )
results = await run_simulation_with_spareparts( results = await run_simulation_with_spareparts(
db_session=db_session, calculation=calculation_data, token=token, collector_db_session=collector_db_session, simulation_id=rbd_simulation_id db_session=db_session, calculation=calculation_data, token=token, collector_db_session=collector_db_session
) )
return results return results["id"]
async def get_or_create_scope_equipment_calculation( async def get_or_create_scope_equipment_calculation(

@ -69,10 +69,6 @@ class CalculationData(Base, DefaultMixin, IdentityMixin):
optimum_oh_day = Column(Integer, nullable=True) optimum_oh_day = Column(Integer, nullable=True)
max_interval = Column(Integer, nullable=True) max_interval = Column(Integer, nullable=True)
rbd_simulation_id = Column(UUID(as_uuid=True), nullable=True)
optimum_analysis = Column(JSON, nullable=True)
session = relationship("OverhaulScope", lazy="raise") session = relationship("OverhaulScope", lazy="raise")
@ -82,9 +78,7 @@ class CalculationData(Base, DefaultMixin, IdentityMixin):
"CalculationEquipmentResult", lazy="raise", viewonly=True "CalculationEquipmentResult", lazy="raise", viewonly=True
) )
results = relationship("CalculationResult", lazy="raise", viewonly=True) results = relationship("CalculationResult", lazy="raise", viewonly=True)
@classmethod @classmethod
async def create_with_param( async def create_with_param(
@ -158,7 +152,6 @@ class CalculationEquipmentResult(Base, DefaultMixin):
optimum_day = Column(Integer, default=1) optimum_day = Column(Integer, default=1)
is_included = Column(Boolean, default=True) is_included = Column(Boolean, default=True)
procurement_details = Column(JSON, nullable=True) procurement_details = Column(JSON, nullable=True)
is_initial = Column(Boolean, default=True)
master_equipment = relationship( master_equipment = relationship(
"MasterEquipment", "MasterEquipment",

@ -2,11 +2,8 @@ from typing import List, Optional, Union
from fastapi import APIRouter from fastapi import APIRouter
from fastapi.params import Query from fastapi.params import Query
import requests
from src import config from src.auth.service import CurrentUser, Token
from src.auth.service import CurrentUser, InternalKey, Token
from src.config import DEFAULT_TC_ID
from src.database.core import DbSession from src.database.core import DbSession
from src.models import StandardResponse from src.models import StandardResponse
@ -24,11 +21,10 @@ from .service import (bulk_update_equipment, get_calculation_result,
from src.database.core import CollectorDbSession from src.database.core import CollectorDbSession
router = APIRouter() router = APIRouter()
get_calculation = APIRouter()
@router.post( @router.post(
"", response_model=StandardResponse[Union[dict, CalculationTimeConstrainsRead]] "", response_model=StandardResponse[Union[str, CalculationTimeConstrainsRead]]
) )
async def create_calculation_time_constrains( async def create_calculation_time_constrains(
token: Token, token: Token,
@ -38,7 +34,6 @@ async def create_calculation_time_constrains(
calculation_time_constrains_in: CalculationTimeConstrainsParametersCreate, calculation_time_constrains_in: CalculationTimeConstrainsParametersCreate,
scope_calculation_id: Optional[str] = Query(None), scope_calculation_id: Optional[str] = Query(None),
with_results: Optional[int] = Query(0), with_results: Optional[int] = Query(0),
simulation_id = Query(None)
): ):
"""Save calculation time constrains Here""" """Save calculation time constrains Here"""
@ -55,10 +50,9 @@ async def create_calculation_time_constrains(
collector_db_session=collector_db_session, collector_db_session=collector_db_session,
calculation_time_constrains_in=calculation_time_constrains_in, calculation_time_constrains_in=calculation_time_constrains_in,
created_by=current_user.name, created_by=current_user.name,
simulation_id=simulation_id
) )
return StandardResponse(data=results, message="Data created successfully") return StandardResponse(data=str(results), message="Data created successfully")
@router.get( @router.get(
@ -85,20 +79,13 @@ async def get_calculation_parameters(
) )
@get_calculation.get( @router.get(
"/{calculation_id}", response_model=StandardResponse[CalculationTimeConstrainsRead] "/{calculation_id}", response_model=StandardResponse[CalculationTimeConstrainsRead]
) )
async def get_calculation_results(db_session: DbSession, calculation_id, token:InternalKey, include_risk_cost:int = Query(1, alias="risk_cost")): async def get_calculation_results(db_session: DbSession, calculation_id):
if calculation_id == 'default':
calculation_id = DEFAULT_TC_ID
results = await get_calculation_result( results = await get_calculation_result(
db_session=db_session, calculation_id=calculation_id, token=token, include_risk_cost=include_risk_cost db_session=db_session, calculation_id=calculation_id
) )
requests.post(f"{config.AUTH_SERVICE_API}/sign-out", headers={
"Authorization": f"Bearer {token}"
})
return StandardResponse( return StandardResponse(
data=results, data=results,
@ -147,9 +134,6 @@ async def update_selected_equipment(
calculation_id, calculation_id,
calculation_time_constrains_in: List[CalculationSelectedEquipmentUpdate], calculation_time_constrains_in: List[CalculationSelectedEquipmentUpdate],
): ):
if calculation_id == 'default':
calculation_id = "3b9a73a2-bde6-418c-9e2f-19046f501a05"
results = await bulk_update_equipment( results = await bulk_update_equipment(
db=db_session, db=db_session,
selected_equipments=calculation_time_constrains_in, selected_equipments=calculation_time_constrains_in,

File diff suppressed because it is too large Load Diff

@ -1,10 +1,11 @@
import datetime import datetime
import json import json
import numpy as np
import pandas as pd import pandas as pd
import requests import requests
from src.config import RBD_SERVICE_API from src.config import REALIBILITY_SERVICE_API
def get_months_between(start_date: datetime.datetime, end_date: datetime.datetime) -> int: def get_months_between(start_date: datetime.datetime, end_date: datetime.datetime) -> int:
""" """
@ -15,42 +16,39 @@ def get_months_between(start_date: datetime.datetime, end_date: datetime.datetim
return months return months
def create_time_series_data(chart_data, max_hours=None): def create_time_series_data(chart_data, max_hours=24096):
# Filter out ON_OH # Filter out data points with currentEvent = "ON_OH"
filtered_data = [d for d in chart_data if d["currentEvent"] != "ON_OH"] filtered_data = [data for data in chart_data if data['currentEvent'] != 'ON_OH']
sorted_data = sorted(filtered_data, key=lambda x: x["cumulativeTime"])
# Sort filtered data by cumulative time
sorted_data = sorted(filtered_data, key=lambda x: x['cumulativeTime'])
if not sorted_data: if not sorted_data:
return [] return []
hourly_data = [] hourly_data = []
current_state_index = 0 current_state_index = 0
current_flow_rate = sorted_data[0]["flowRate"] current_flow_rate = sorted_data[0]['flowRate']
current_eq_status = sorted_data[0]["currentEQStatus"] current_eq_status = sorted_data[0]['currentEQStatus']
# Determine maximum bound (either given or from data) for hour in range(1, max_hours + 1):
last_time = int(sorted_data[-1]["cumulativeTime"]) # Check if we need to advance to the next state
if max_hours is None: while (current_state_index < len(sorted_data) - 1 and
max_hours = last_time hour >= int(sorted_data[current_state_index + 1]['cumulativeTime'])):
for hour in range(0, max_hours + 1): # start from 0
# Advance state if needed
while (current_state_index < len(sorted_data) - 1 and
hour >= sorted_data[current_state_index + 1]["cumulativeTime"]):
current_state_index += 1 current_state_index += 1
current_flow_rate = sorted_data[current_state_index]["flowRate"] current_flow_rate = sorted_data[current_state_index]['flowRate']
current_eq_status = sorted_data[current_state_index]["currentEQStatus"] current_eq_status = sorted_data[current_state_index]['currentEQStatus']
# Add hourly data point
hourly_data.append({ hourly_data.append({
"cumulativeTime": hour, 'cumulativeTime': hour,
"flowRate": current_flow_rate, 'flowRate': current_flow_rate,
"currentEQStatus": current_eq_status 'currentEQStatus': current_eq_status
}) })
return hourly_data return hourly_data
def calculate_failures_per_month(hourly_data): def calculate_failures_per_month(hourly_data):
""" """
Calculate the cumulative number of failures up to each month from hourly data. Calculate the cumulative number of failures up to each month from hourly data.
@ -99,110 +97,96 @@ def calculate_failures_per_month(hourly_data):
return result return result
import pandas as pd def analyze_monthly_metrics(timestamp_outs):
import datetime """
Analyze time series data to calculate monthly metrics:
import datetime 1. Failure count per month
import pandas as pd 2. Cumulative failure count each month
3. Total out-of-service time per month
async def plant_simulation_metrics(simulation_id: str, location_tag: str, max_interval, token, last_oh_date, use_location_tag: int = 1): 4. Average flow rate per month
"""Get failure predictions for equipment from simulation service""" """
calc_result_url = f"{RBD_SERVICE_API}/aeros/simulation/result/calc/{simulation_id}/{location_tag}"
try:
response = requests.get(
calc_result_url,
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {token}",
},
timeout=30
)
response.raise_for_status()
prediction_data = response.json()['data']
except (requests.RequestException, ValueError) as e:
raise Exception(str(e))
return prediction_data
def analyze_monthly_metrics(timestamp_outs, start_date, max_flow_rate: float = 550): # Check if timestamp_outs is None or empty
if not timestamp_outs: if timestamp_outs is None or not timestamp_outs:
# Return empty results with zero values
return {} return {}
# Convert to DataFrame for easier manipulation
df = pd.DataFrame(timestamp_outs) df = pd.DataFrame(timestamp_outs)
# Check if DataFrame is empty after creation
if df.empty:
return {}
# Check if required columns exist
required_columns = ['cumulativeTime', 'currentEQStatus', 'flowRate'] required_columns = ['cumulativeTime', 'currentEQStatus', 'flowRate']
if not all(col in df.columns for col in required_columns): missing_columns = [col for col in required_columns if col not in df.columns]
if missing_columns:
return {} return {}
start_oh = datetime.datetime(start_date.year, start_date.month, start_date.day)
# Actual datetime from cumulative hours # Assuming the simulation starts from a reference date (you can modify this)
df['datetime'] = df['cumulativeTime'].apply(lambda x: start_oh + datetime.timedelta(hours=x)) # For this example, I'll use January 1, 2024 as the start date
start_date = datetime.datetime(2025, 10, 22)
# Convert cumulative hours to actual datetime
df['datetime'] = df['cumulativeTime'].apply(
lambda x: start_date + datetime.timedelta(hours=x)
)
# Extract month-year for grouping
df['month_year'] = df['datetime'].dt.to_period('M') df['month_year'] = df['datetime'].dt.to_period('M')
# Duration until next timestamp # Calculate time duration for each record (difference between consecutive cumulative times)
df['duration_hours'] = df['cumulativeTime'].shift(-1) - df['cumulativeTime'] df['duration_hours'] = df['cumulativeTime'].diff().fillna(df['cumulativeTime'].iloc[0])
df['duration_hours'] = df['duration_hours'].fillna(0)
# Initialize results dictionary
# Failure detection
df['status_change'] = df['currentEQStatus'].shift() != df['currentEQStatus']
df['failure'] = (df['currentEQStatus'] == 'OoS') & df['status_change']
# Cumulative tracking
df['cumulative_failures'] = df['failure'].cumsum()
df['cumulative_oos'] = (df['duration_hours'] * (df['currentEQStatus'] == 'OoS')).cumsum()
# Derating calculation
# Derating = capacity reduction below max but not outage
df['derating'] = (max_flow_rate - df['flowRate']).clip(lower=0)
df['is_derated'] = (df['currentEQStatus'] == 'Svc') & (df['derating'] > 0)
# Equivalent Derated Hours (EFDH) → sum of derating * hours, then normalized by max capacity
df['derated_mwh'] = df['derating'] * df['duration_hours']
df['derated_hours_equivalent'] = df['derated_mwh'] / max_flow_rate
monthly_results = {} monthly_results = {}
for month_period, group in df.groupby('month_year', sort=True): # Track cumulative failures across all months
cumulative_failures = 0
cummulative_oos = 0
# Group by month-year and ensure chronological order
for month_period, group in df.groupby('month_year'):
month_str = str(month_period) month_str = str(month_period)
monthly_results[month_str] = {} monthly_results[month_str] = {}
# Failures # 1. Count failures per month
monthly_results[month_str]['failures_count'] = int(group['failure'].sum()) # A failure is when currentEQStatus changes from "Svc" to "OoS"
monthly_results[month_str]['cumulative_failures'] = int(group['cumulative_failures'].max()) status_changes = group['currentEQStatus'].shift() != group['currentEQStatus']
failures = ((group['currentEQStatus'] == 'OoS') & status_changes).sum()
# OOS hours monthly_results[month_str]['failures_count'] = int(failures)
oos_time = group.loc[group['currentEQStatus'] == 'OoS', 'duration_hours'].sum()
# 2. Add failures to cumulative count
cumulative_failures += failures
monthly_results[month_str]['cumulative_failures'] = int(cumulative_failures)
# 3. Total out-of-service time per month (in hours)
oos_time = group[group['currentEQStatus'] == 'OoS']['duration_hours'].sum()
monthly_results[month_str]['total_oos_hours'] = float(oos_time) monthly_results[month_str]['total_oos_hours'] = float(oos_time)
monthly_results[month_str]['cummulative_oos'] = float(group['cumulative_oos'].max())
cummulative_oos += oos_time
# Flow rate (weighted average) monthly_results[month_str]['cummulative_oos'] = float(cummulative_oos)
# 4. Average flow rate per month (weighted by duration)
# Calculate weighted average flow rate
total_flow_time = (group['flowRate'] * group['duration_hours']).sum() total_flow_time = (group['flowRate'] * group['duration_hours']).sum()
total_time = group['duration_hours'].sum() total_time = group['duration_hours'].sum()
avg_flow_rate = total_flow_time / total_time if total_time > 0 else 0 avg_flow_rate = total_flow_time / total_time if total_time > 0 else 0
monthly_results[month_str]['avg_flow_rate'] = float(avg_flow_rate) monthly_results[month_str]['avg_flow_rate'] = float(avg_flow_rate)
# Extra metrics # Additional useful metrics
monthly_results[month_str]['total_hours'] = float(total_time) monthly_results[month_str]['total_hours'] = float(total_time)
service_hours = group.loc[group['currentEQStatus'] == 'Svc', 'duration_hours'].sum() monthly_results[month_str]['service_hours'] = float(
monthly_results[month_str]['service_hours'] = float(service_hours) group[group['currentEQStatus'] == 'Svc']['duration_hours'].sum()
)
monthly_results[month_str]['availability_percentage'] = float( monthly_results[month_str]['availability_percentage'] = float(
(service_hours / total_time * 100) if total_time > 0 else 0 (monthly_results[month_str]['service_hours'] / total_time * 100) if total_time > 0 else 0
) )
# Derating metrics
derating_hours = group.loc[group['is_derated'], 'duration_hours'].sum()
derated_mwh = group['derated_mwh'].sum()
equivalent_derated_hours = group['derated_hours_equivalent'].sum()
monthly_results[month_str]['derating_hours'] = float(derating_hours)
monthly_results[month_str]['derated_mwh'] = float(derated_mwh)
monthly_results[month_str]['equivalent_derated_hours'] = float(equivalent_derated_hours)
return monthly_results return monthly_results
def calculate_risk_cost_per_failure(monthly_results, birnbaum_importance, energy_price): def calculate_risk_cost_per_failure(monthly_results, birnbaum_importance, energy_price):
""" """
Calculate risk cost per failure for each month based on: Calculate risk cost per failure for each month based on:
@ -293,10 +277,272 @@ def get_monthly_risk_analysis(timestamp_outs, birnbaum_importance, energy_price)
'risk_cost_array': risk_analysis['risk_cost_per_failure_array'] 'risk_cost_array': risk_analysis['risk_cost_per_failure_array']
} }
# Usage example:
# birnbaum_importance = 0.85 # Example value def fetch_reliability(location_tags):
# energy_price = 100 # Example: $100 per unit url = f"{REALIBILITY_SERVICE_API}/asset/batch"
# resp = requests.get(url, json={"location_tags": location_tags})
# results = get_monthly_risk_analysis(timestamp_outs, birnbaum_importance, energy_price) resp.raise_for_status()
# risk_cost_array = results['risk_cost_array'] return resp.json().get("data", [])
# print("Risk cost per failure each month:", risk_cost_array)
import math
from scipy.stats import lognorm, norm
def get_reliability(distribution: str, params: dict, t: float) -> float:
d = (distribution or "").lower()
if d in ["weibull_2p", "weibull_3p"]:
eta = params.get("eta"); beta = params.get("beta"); gamma_ = params.get("gamma", 0)
if eta is None or beta is None: return 1.0
if t <= gamma_: return 1.0
return math.exp(-((t - gamma_) / eta) ** beta)
elif d in ["exponential", "exponential_2p"]:
lam = params.get("lambda") or params.get("Lambda")
if lam is None: return 1.0
return math.exp(-lam * t)
elif "lognormal" in d:
mu = params.get("mu"); sigma = params.get("sigma"); gamma_ = params.get("gamma", 0)
if mu is None or sigma is None: return 1.0
return 1 - lognorm.cdf(max(t-gamma_,0), s=sigma, scale=math.exp(mu))
elif "normal" in d:
mu = params.get("mu"); sigma = params.get("sigma")
if mu is None or sigma is None: return 1.0
return 1 - norm.cdf(t, loc=mu, scale=sigma)
elif "nhpp" in d:
eta = params.get("eta")
beta = params.get("beta")
lam = params.get("lambda", 1)
if eta is None or beta is None:
return 1.0
if t <= 0:
return 1.0 # at time 0, survival = 1
return math.exp(-(t / eta) ** beta)
else:
return 1.0
import numpy as np
def failures_per_month(distribution, params, mttr, design_flow_rate=100,
population=1, months=24, hours_per_month=720,
mode="expected", runs=1):
"""
Calculate monthly failures, cumulative failures, downtime, and avg flowrate.
- mode="expected": returns smooth fractional expected values.
- mode="simulate": returns integer values per run (stochastic).
- runs: number of Monte Carlo runs (only used if simulate).
- If simulate with runs>1, returns P50 (median) summary across runs.
"""
all_runs = []
for r in range(runs):
results = []
cumulative = 0
total_oos_hours = 0
for m in range(1, months+1):
t_start = (m-1) * hours_per_month
t_end = m * hours_per_month
R_start = get_reliability(distribution, params, t_start)
R_end = get_reliability(distribution, params, t_end)
# Probability of failure in this month
prob_failure = max(0.0, R_start - R_end)
if mode == "expected":
failures = population * prob_failure # fractional
else: # simulate
failures = np.random.binomial(population, prob_failure)
cumulative += failures
# Downtime (failures × MTTR)
oos_hours = failures * mttr
total_oos_hours += oos_hours
service_hours = hours_per_month - oos_hours
if service_hours < 0:
service_hours = 0
# Availability = service / total
availability = service_hours / hours_per_month
# Avg flowrate scaled
avg_flowrate = design_flow_rate * availability
results.append({
"month": m,
"failures": failures,
"cumulative_failures": cumulative,
"oos_hours": oos_hours,
"total_oos_hours": total_oos_hours,
"service_hours": service_hours,
"availability": availability,
"avg_flowrate": avg_flowrate
})
all_runs.append(results)
# === OUTPUTS ===
if mode == "expected" or runs == 1:
return all_runs[0] # smooth or single trajectory
# === Summarize multiple runs (return only P50 for each field) ===
summary = []
keys = ["failures", "cumulative_failures", "oos_hours",
"total_oos_hours", "service_hours", "availability", "avg_flowrate"]
total_oos_hours = 0
cumulative = 0
for m in range(months):
row = {"month": m+1}
for key in keys:
values = [r[m][key] for r in all_runs]
if key == 'failures':
failures = float(np.percentile(values, 90)) # P50 median
oos_hours = failures * mttr
total_oos_hours += oos_hours
service_hours = hours_per_month - oos_hours
availability = service_hours / hours_per_month
avg_flowrate = design_flow_rate * availability
cumulative += failures
summary.append({
"month": m,
"failures": failures,
"cumulative_failures": cumulative,
"oos_hours": oos_hours,
"total_oos_hours": total_oos_hours,
"service_hours": service_hours,
"availability": availability,
"avg_flowrate": avg_flowrate
})
return summary
import pandas as pd
def get_reliability_data(location_tags, months=24):
# 1. Fetch parameters
data = fetch_reliability(location_tags)
all_results = []
for asset in data:
distribution = asset.get("distribution")
params = asset.get("parameters", {})
mttr = 3
tag = asset.get("location_tag")
# 2. Predict monthly
results = failures_per_month(distribution, params, mttr, design_flow_rate, months=months)
# 3. Store with location_tag
for row in results:
row["location_tag"] = tag
all_results.append(row)
return all_results
import numpy as np
import math
def sample_failure_time(distribution, params):
"""Draw one failure time from the reliability distribution."""
d = (distribution or "").lower()
u = np.random.rand()
if d in ["weibull_2p", "weibull_3p"]:
eta = params.get("eta"); beta = params.get("beta"); gamma_ = params.get("gamma", 0)
if eta is None or beta is None: return np.inf
return gamma_ + eta * (-math.log(1-u))**(1/beta)
elif "exponential" in d or "exponential_2p" in d:
lam = params.get("lambda") or params.get("Lambda")
if lam is None: return np.inf
return -math.log(1-u) / lam
elif "lognormal" in d:
mu = params.get("mu"); sigma = params.get("sigma"); gamma_ = params.get("gamma", 0)
if mu is None or sigma is None: return np.inf
return gamma_ + np.random.lognormal(mean=mu, sigma=sigma)
elif "normal" in d:
mu = params.get("mu"); sigma = params.get("sigma")
if mu is None or sigma is None: return np.inf
return max(0, np.random.normal(mu, sigma))
else:
return np.inf
def simulate_failures(distribution, params, mttr, design_flow_rate=100,
population=1, months=24, hours_per_month=720,
runs=1000):
"""
Simulate failures over a given horizon using renewal process.
Always in stochastic mode, results aggregated to P50 across runs.
"""
horizon = months * hours_per_month
all_runs = []
for r in range(runs):
results = []
failures_by_month = [0] * months
for _ in range(population):
# First failure
t = sample_failure_time(distribution, params)
while t < horizon:
month_idx = int(t // hours_per_month)
if month_idx < months:
failures_by_month[month_idx] += 1
# Renewal: after repair (mttr), draw new TTF
t += mttr + sample_failure_time(distribution, params)
# Build results for this run
cumulative = 0
total_oos_hours = 0
for m in range(months):
failures = failures_by_month[m]
cumulative += failures
oos_hours = failures * mttr
total_oos_hours += oos_hours
service_hours = max(0, hours_per_month - oos_hours)
availability = service_hours / hours_per_month
avg_flowrate = design_flow_rate * availability
results.append({
"month": m+1,
"failures": failures,
"cumulative_failures": cumulative,
"oos_hours": oos_hours,
"total_oos_hours": total_oos_hours,
"service_hours": service_hours,
"availability": availability,
"avg_flowrate": avg_flowrate
})
all_runs.append(results)
# === Aggregate to P50 ===
summary = []
for m in range(months):
row = {"month": m+1}
for key in ["failures", "cumulative_failures", "oos_hours",
"total_oos_hours", "service_hours", "availability", "avg_flowrate"]:
values = [r[m][key] for r in all_runs]
row[key] = float(np.percentile(values, 50)) # median
summary.append(row)
return summary

@ -83,12 +83,3 @@ MAXIMO_API_KEY = config("MAXIMO_API_KEY", default="keys")
AUTH_SERVICE_API = config("AUTH_SERVICE_API", default="http://192.168.1.82:8000/auth") AUTH_SERVICE_API = config("AUTH_SERVICE_API", default="http://192.168.1.82:8000/auth")
REALIBILITY_SERVICE_API = config("REALIBILITY_SERVICE_API", default="http://192.168.1.82:8000/reliability") REALIBILITY_SERVICE_API = config("REALIBILITY_SERVICE_API", default="http://192.168.1.82:8000/reliability")
RBD_SERVICE_API = config("RBD_SERVICE_API", default="http://192.168.1.82:8000/rbd")
TEMPORAL_URL = config("TEMPORAL_URL", default="http://192.168.1.86:7233")
API_KEY = config("API_KEY", default="0KFvcB7zWENyKVjoma9FKZNofVSViEshYr59zEQNGaYjyUP34gCJKDuqHuk9VfvE")
TR_RBD_ID = config("TR_RBD_ID", default="f04f365e-25d8-4036-87c2-ba1bfe1f9229")
TC_RBD_ID = config("TC_RBD_ID", default="f8523cb0-dc3c-4edb-bcf1-eea7b62582f1")
DEFAULT_TC_ID = config("DEFAULT_TC_ID", default="44f483f3-bfe4-4094-a59f-b97a10f2fea6")

@ -5,7 +5,7 @@ from sqlalchemy import Delete, Select, and_, text
from sqlalchemy.orm import selectinload from sqlalchemy.orm import selectinload
from src.auth.service import CurrentUser from src.auth.service import CurrentUser
from src.database.core import CollectorDbSession, DbSession from src.database.core import DbSession
from src.database.service import CommonParameters, search_filter_sort_paginate from src.database.service import CommonParameters, search_filter_sort_paginate
from .model import ScopeEquipmentPart from .model import ScopeEquipmentPart
@ -16,203 +16,139 @@ from .schema import ScopeEquipmentActivityCreate, ScopeEquipmentActivityUpdate
# result = await db_session.get(ScopeEquipmentActivity, scope_equipment_activity_id) # result = await db_session.get(ScopeEquipmentActivity, scope_equipment_activity_id)
# return result # return result
from typing import Optional, List, Dict, Any
from sqlalchemy.ext.asyncio import AsyncSession as DbSession
from sqlalchemy.sql import text
import logging
logger = logging.getLogger(__name__)
# async def get_all(
# db_session: CollectorDbSession,
# location_tag: Optional[str] = None,
# start_year: int = 2023,
# end_year: Optional[int] = None,
# parent_wonum: Optional[str] = None
# ) -> List[Dict[str, Any]]:
# """
# Retrieve overhaul spare parts consumption data.
# Handles missing data, null parent WO, and query safety.
# Args:
# db_session: Async SQLAlchemy session
# location_tag: Optional location filter
# start_year: Year to start analysis (default 2023)
# end_year: Optional year to end analysis (default start_year + 1)
# parent_wonum: Parent work order number (required for context)
# Returns:
# List of dictionaries with spare part usage per overhaul WO.
# """
# # --- 1. Basic validation ---
# if not parent_wonum:
# logger.warning("Parent WO number not provided. Returning empty result.")
# return []
# if start_year < 1900 or (end_year and end_year < start_year):
# raise ValueError("Invalid year range provided.")
# if end_year is None:
# end_year = start_year + 1
# # --- 2. Build SQL safely ---
# base_query = """
# WITH filtered_wo AS (
# SELECT wonum, location_tag
# FROM public.wo_max
# WHERE worktype = 'OH'
# AND xx_parent = :parent_wonum
# """
# params = {
# "parent_wonum": parent_wonum,
# }
# if location_tag:
# base_query += " AND location_tag = :location_tag"
# params["location_tag"] = location_tag
# base_query += """
# ),
# filtered_materials AS (
# SELECT wonum, itemnum, itemqty, inv_curbaltotal, inv_avgcost
# FROM public.wo_max_material
# WHERE wonum IN (SELECT wonum FROM filtered_wo)
# )
# SELECT
# fwo.location_tag AS location_tag,
# fm.itemnum,
# spl.description AS sparepart_name,
# COALESCE(SUM(fm.itemqty), 0) AS parts_consumed_in_oh,
# COALESCE(AVG(fm.inv_avgcost), 0) AS avgcost,
# COALESCE(AVG(fm.inv_curbaltotal), 0) AS inv_curbaltotal
# FROM filtered_wo fwo
# INNER JOIN filtered_materials fm ON fwo.wonum = fm.wonum
# LEFT JOIN public.maximo_sparepart_pr_po_line spl ON fm.itemnum = spl.item_num
# GROUP BY fwo.location_tag, fm.itemnum, spl.description
# ORDER BY fwo.location_tag, fm.itemnum;
# """
# # --- 3. Execute query ---
# try:
# result = await db_session.execute(text(base_query), params)
# rows = result.fetchall()
# # Handle "no data found"
# if not rows:
# logger.info(f"No spare part data found for parent WO {parent_wonum}.")
# return []
# # --- 4. Map results cleanly ---
# equipment_parts = []
# for row in rows:
# try:
# equipment_parts.append({
# "location_tag": row.location_tag,
# "itemnum": row.itemnum,
# "sparepart_name": row.sparepart_name or "-",
# "parts_consumed_in_oh": float(row.parts_consumed_in_oh or 0),
# "avgcost": float(row.avgcost or 0),
# "inv_curbaltotal": float(row.inv_curbaltotal or 0)
# })
# except Exception as parse_err:
# logger.error(f"Failed to parse row {row}: {parse_err}")
# continue # Skip malformed rows
# return equipment_parts
# except Exception as e:
# logger.exception(f"Database query failed: {e}")
# raise RuntimeError("Failed to fetch overhaul spare parts data.") from e
from typing import List, Dict, Any, Optional
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.sql import text
def create_dummy_parts(assetnum: str, count: int = 5):
"""
Create a list of dummy ScopeEquipmentPart objects with random stock values.
Args:
assetnum (str): The base asset number to generate dummy parts for.
count (int): The number of parts to create. Default is 5.
Returns:
List[ScopeEquipmentPart]: A list of dummy ScopeEquipmentPart objects.
"""
parts = []
for i in range(1, count + 1):
# Generate a unique part asset number
part_assetnum = f"{assetnum}_PART_{i}"
stock = random.randint(1, 100) # Random stock value between 1 and 100
parts.append({"assetnum": part_assetnum, "stock": stock})
return parts
from sqlalchemy import text
from typing import Optional, List, Dict, Any
from datetime import datetime
async def get_all( async def get_all(
db_session: AsyncSession, db_session: DbSession,
location_tag: Optional[str] = None, location_tag: Optional[str] = None,
start_year: int = 2023, start_year: int = 2023,
end_year: Optional[int] = None end_year: Optional[int] = None
) -> List[Dict[str, Any]]: ) -> List[Dict[str, Any]]:
""" """
Get overhaul spare parts consumption data with optimized query. Get overhaul spare parts consumption data with optimized query
Args: Args:
db_session: SQLAlchemy async database session db_session: Database session
location_tag: Optional filter for location (asset_location) location_tag: Optional location filter
start_year: Starting year (default: 2023) start_year: Year to start analysis (default: 2023)
end_year: Ending year (default: start_year + 1) end_year: Year to end analysis (default: start_year + 1)
Returns: Returns:
List of dictionaries with spare parts consumption data List of dictionaries containing spare parts consumption data
""" """
# Set default end year
# Set end year if not provided
if end_year is None: if end_year is None:
end_year = start_year + 1 end_year = start_year + 1
# Build query dynamically # Build dynamic query
query_str = """ base_query = """
WITH filtered_wo AS ( WITH filtered_wo AS (
SELECT DISTINCT wonum, asset_location, asset_unit SELECT wonum, asset_location
FROM public.wo_maximo ma FROM public.wo_staging_maximo_2
WHERE ma.xx_parent IN ('155026', '155027', '155029', '155030') WHERE worktype = 'OH'
""" """
params = {} params = {}
# Optional filter for location # Add location filter to CTE if provided
if location_tag: if location_tag:
query_str += " AND asset_location = :location_tag" base_query += " AND asset_location = :location_tag"
params["location_tag"] = location_tag params["location_tag"] = location_tag
query_str += """ base_query += """
), ),
filtered_materials AS ( filtered_transactions AS (
SELECT SELECT wonum, itemnum, curbal
mat.wonum, FROM public.maximo_material_use_transactions
mat.itemnum, WHERE issuetype = 'ISSUE'
mat.itemqty, AND wonum IN (SELECT wonum FROM filtered_wo)
mat.inv_curbaltotal AS inv_curbaltotal,
mat.inv_avgcost AS inv_avgcost
FROM public.wo_maximo_material AS mat
WHERE mat.wonum IN (SELECT wonum FROM filtered_wo)
) )
SELECT SELECT
fwo.asset_location AS location_tag, fwo.asset_location AS location_tag,
ft.itemnum, ft.itemnum,
COALESCE(spl.description, 'Unknown') AS sparepart_name, spl.description AS sparepart_name,
AVG(ft.itemqty) AS total_parts_used, COUNT(*) AS parts_consumed_in_oh,
COALESCE(AVG(ft.inv_avgcost), 0) AS avg_cost, MIN(ft.curbal) AS min_remaining_balance,
COALESCE(AVG(ft.inv_curbaltotal), 0) AS avg_inventory_balance MAX(mi.curbaltotal) AS inv_curbaltotal
FROM filtered_wo AS fwo FROM filtered_wo fwo
INNER JOIN filtered_materials AS ft INNER JOIN filtered_transactions ft ON fwo.wonum = ft.wonum
ON fwo.wonum = ft.wonum INNER JOIN public.maximo_inventory mi ON ft.itemnum = mi.itemnum
LEFT JOIN public.maximo_sparepart_pr_po_line AS spl LEFT JOIN public.maximo_sparepart_pr_po_line spl ON ft.itemnum = spl.item_num
ON ft.itemnum = spl.item_num
GROUP BY fwo.asset_location, ft.itemnum, spl.description GROUP BY fwo.asset_location, ft.itemnum, spl.description
ORDER BY fwo.asset_location, ft.itemnum; ORDER BY fwo.asset_location, ft.itemnum
""" """
query = text(base_query)
try: try:
result = await db_session.execute(text(query_str), params) results = await db_session.execute(query, params)
rows = result.fetchall()
equipment_parts = [] equipment_parts = []
for row in rows: for row in results:
equipment_parts.append({ equipment_parts.append({
"location_tag": row.location_tag, "location_tag": row.location_tag,
"itemnum": row.itemnum, "itemnum": row.itemnum,
"sparepart_name": row.sparepart_name, "sparepart_name": row.sparepart_name,
"parts_consumed_in_oh": float(row.total_parts_used or 0), "parts_consumed_in_oh": row.parts_consumed_in_oh,
"avg_cost": float(row.avg_cost or 0), "min_remaining_balance": float(row.min_remaining_balance),
"inv_curbaltotal": float(row.avg_inventory_balance or 0), "inv_curbaltotal": float(row.inv_curbaltotal)
}) })
return equipment_parts return equipment_parts
except Exception as e: except Exception as e:
print(f"[get_all] Database query error: {e}") # Log the error appropriately in your application
print(f"Database query error: {e}")
raise raise
# async def create(*, db_session: DbSession, scope_equipment_activty_in: ScopeEquipmentActivityCreate):
# activity = ScopeEquipmentActivity(
# **scope_equipment_activty_in.model_dump())
# db_session.add(activity)
# await db_session.commit()
# return activity
# async def update(*, db_session: DbSession, activity: ScopeEquipmentActivity, scope_equipment_activty_in: ScopeEquipmentActivityUpdate):
# """Updates a document."""
# data = scope_equipment_activty_in.model_dump()
# update_data = scope_equipment_activty_in.model_dump(exclude_defaults=True)
# for field in data:
# if field in update_data:
# setattr(activity, field, update_data[field])
# await db_session.commit()
# return activity
# async def delete(*, db_session: DbSession, scope_equipment_activity_id: str):
# """Deletes a document."""
# activity = await db_session.get(ScopeEquipmentActivity, scope_equipment_activity_id)
# await db_session.delete(activity)
# await db_session.commit()

@ -46,7 +46,7 @@ app = FastAPI(
) )
app.state.limiter = limiter app.state.limiter = limiter
app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler) app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)
app.add_middleware(GZipMiddleware, minimum_size=1000) app.add_middleware(GZipMiddleware, minimum_size=2000)
# credentials: "include", # credentials: "include",

@ -1,5 +1,5 @@
from datetime import datetime from datetime import datetime
from sqlalchemy import select, func, cast, Numeric, text from sqlalchemy import select, func, cast, Numeric
from sqlalchemy.orm import Session from sqlalchemy.orm import Session
from sqlalchemy import and_ from sqlalchemy import and_
from sqlalchemy.sql import not_ from sqlalchemy.sql import not_
@ -8,246 +8,72 @@ from src.database.core import CollectorDbSession
async def get_cm_cost_summary(collector_db: CollectorDbSession, last_oh_date:datetime, upcoming_oh_date:datetime): async def get_cm_cost_summary(collector_db: CollectorDbSession, last_oh_date:datetime, upcoming_oh_date:datetime):
query = text("""WITH part_costs AS ( query = select(
SELECT WorkOrderData.location,
mu.wonum, (func.sum(WorkOrderData.total_cost_max).cast(Numeric) / func.count(WorkOrderData.wonum)).label('avg_cost')
SUM(mu.itemqty * COALESCE(inv.avgcost, po.unit_cost, 0)) AS parts_total_cost ).where(
FROM maximo_workorder_materials mu and_(
LEFT JOIN maximo_inventory inv # WorkOrderData.wo_start >= last_oh_date,
ON mu.itemnum = inv.itemnum # WorkOrderData.wo_start <= upcoming_oh_date,
LEFT JOIN ( WorkOrderData.worktype.in_(['CM', 'EM', 'PROACTIVE']),
SELECT item_num, AVG(unit_cost) AS unit_cost WorkOrderData.system_tag.in_(['HPB', 'AH', 'APC', 'SCR', 'CL', 'DM', 'CRH', 'ASH', 'BAD', 'DS', 'WTP',
FROM maximo_sparepart_pr_po_line 'MT', 'SUP', 'DCS', 'FF', 'EG', 'AI', 'SPS', 'EVM', 'SCW', 'KLH', 'CH',
GROUP BY item_num 'TUR', 'LOT', 'HRH', 'ESP', 'CAE', 'GMC', 'BFT', 'LSH', 'CHB', 'BSS',
) po 'LOS', 'LPB', 'SAC', 'CP', 'EHS', 'RO', 'GG', 'MS', 'CW', 'SO', 'ATT',
ON mu.itemnum = po.item_num 'AFG', 'EHB', 'RP', 'FO', 'PC', 'APE', 'AF', 'DMW', 'BRS', 'GEN', 'ABS',
GROUP BY mu.wonum 'CHA', 'TR', 'H2', 'BDW', 'LOM', 'ACR', 'AL', 'FW', 'COND', 'CCCW', 'IA',
), 'GSS', 'BOL', 'SSB', 'CO', 'OA', 'CTH-UPD', 'AS', 'DP']),
wo_costs AS ( WorkOrderData.reportdate.is_not(None),
SELECT WorkOrderData.actstart.is_not(None),
w.wonum, WorkOrderData.actfinish.is_not(None),
w.asset_location, WorkOrderData.unit.in_([3, 0]),
(COALESCE(w.mat_cost_max, 0) + COALESCE(pc.parts_total_cost, 0)) AS total_wo_cost WorkOrderData.reportdate >= datetime.strptime('2015-01-01', '%Y-%m-%d'),
FROM wo_staging_maximo_2 w not_(WorkOrderData.wonum.like('T%'))
LEFT JOIN part_costs pc
ON w.wonum = pc.wonum
WHERE
w.worktype IN ('CM', 'EM', 'PROACTIVE')
AND w.asset_system IN (
'HPB','AH','APC','SCR','CL','DM','CRH','ASH','BAD','DS','WTP',
'MT','SUP','DCS','FF','EG','AI','SPS','EVM','SCW','KLH','CH',
'TUR','LOT','HRH','ESP','CAE','GMC','BFT','LSH','CHB','BSS',
'LOS','LPB','SAC','CP','EHS','RO','GG','MS','CW','SO','ATT',
'AFG','EHB','RP','FO','PC','APE','AF','DMW','BRS','GEN','ABS',
'CHA','TR','H2','BDW','LOM','ACR','AL','FW','COND','CCCW','IA',
'GSS','BOL','SSB','CO','OA','CTH-UPD','AS','DP'
) )
AND w.reportdate IS NOT NULL ).group_by(
AND w.actstart IS NOT NULL WorkOrderData.location
AND w.actfinish IS NOT NULL ).order_by(
AND w.asset_unit IN ('3','00') func.count(WorkOrderData.wonum).desc()
AND w.reportdate >= '2015-01-01' )
AND w.wonum NOT LIKE 'T%' result = await collector_db.execute(query)
), data = result.all()
-- find max cost per location
location_max AS (
SELECT asset_location, MAX(total_wo_cost) AS max_cost
FROM wo_costs
WHERE total_wo_cost > 0
GROUP BY asset_location
),
-- filter WO costs to only reasonable range (e.g. >0 and >=10% of max)
filtered_wo AS (
SELECT w.*
FROM wo_costs w
JOIN location_max lm ON w.asset_location = lm.asset_location
WHERE w.total_wo_cost > 0
)
SELECT
asset_location,
SUM(total_wo_cost)::numeric / COUNT(wonum) AS avg_cost
FROM filtered_wo
GROUP BY asset_location
ORDER BY avg_cost DESC;
""")
results = await collector_db.execute(query)
data = []
for row in results:
data.append({
"location_tag": row.asset_location,
"avg_cost": row.avg_cost
})
return { return {
item["location_tag"]: item["avg_cost"] for item in data data.location: data.avg_cost for data in data
} }
# async def get_oh_cost_summary(collector_db: CollectorDbSession, last_oh_date:datetime, upcoming_oh_date:datetime):
# query = text("""
# WITH target_wo AS (
# -- Get work orders under a specific parent(s)
# SELECT
# wonum,
# xx_parent,
# assetnum,
# location_tag AS asset_location,
# actmatcost,
# actservcost,
# reportdate
# FROM public.wo_maxim
# WHERE xx_parent = ANY(:parent_nums)
# ),
# part_costs AS (
# -- Calculate parts cost per WO if actmatcost = 0
# SELECT
# wm.wonum,
# SUM(
# wm.itemqty *
# COALESCE(wm.inv_avgcost, po.unit_cost, 0)
# ) AS parts_total_cost
# FROM public.wo_maxim_material wm
# LEFT JOIN (
# SELECT item_num, AVG(unit_cost) AS unit_cost
# FROM public.maximo_sparepart_pr_po_line
# GROUP BY item_num
# ) po ON wm.itemnum = po.item_num
# WHERE wm.itemnum IS NOT NULL
# GROUP BY wm.wonum
# ),
# wo_costs AS (
# SELECT
# w.wonum,
# w.asset_location,
# CASE
# WHEN COALESCE(w.actmatcost, 0) > 0 THEN COALESCE(w.actmatcost, 0)
# ELSE COALESCE(pc.parts_total_cost, 0)
# END AS material_cost,
# COALESCE(w.actservcost, 0) AS service_cost
# FROM target_wo w
# LEFT JOIN part_costs pc ON w.wonum = pc.wonum
# )
# SELECT
# asset_location,
# ROUND(SUM(material_cost + service_cost)::numeric / COUNT(wonum), 2) AS avg_cost,
# COUNT(wonum) AS total_wo_count
# FROM wo_costs
# GROUP BY asset_location
# ORDER BY total_wo_count DESC;
# """)
# parent_nums = []
# result = await collector_db.execute(query, {"parent_nums": parent_nums})
# data = []
# for row in result:
# data.append({
# "location_tag": row.asset_location,
# "avg_cost": float(row.avg_cost or 0.0),
# "total_wo_count": row.total_wo_count,
# })
# return {item["location_tag"]: item["avg_cost"] for item in data}
async def get_oh_cost_summary(collector_db: CollectorDbSession, last_oh_date:datetime, upcoming_oh_date:datetime): async def get_oh_cost_summary(collector_db: CollectorDbSession, last_oh_date:datetime, upcoming_oh_date:datetime):
# query = text(""" query = select(
# WITH part_costs AS ( WorkOrderData.location,
# SELECT (func.sum(WorkOrderData.total_cost_max).cast(Numeric) / func.count(WorkOrderData.wonum)).label('avg_cost')
# wm.wonum, ).where(
# SUM(wm.itemqty * COALESCE(wm.inv_avgcost, po.unit_cost, 0)) AS parts_total_cost and_(
# FROM public.wo_maxim_material wm # WorkOrderData.wo_start >= last_oh_date,
# LEFT JOIN ( # WorkOrderData.wo_start <= upcoming_oh_date,
# SELECT item_num, AVG(unit_cost) AS unit_cost WorkOrderData.worktype.in_(['OH']),
# FROM public.maximo_sparepart_pr_po_line WorkOrderData.system_tag.in_(['HPB', 'AH', 'APC', 'SCR', 'CL', 'DM', 'CRH', 'ASH', 'BAD', 'DS', 'WTP',
# GROUP BY item_num 'MT', 'SUP', 'DCS', 'FF', 'EG', 'AI', 'SPS', 'EVM', 'SCW', 'KLH', 'CH',
# ) po ON wm.itemnum = po.item_num 'TUR', 'LOT', 'HRH', 'ESP', 'CAE', 'GMC', 'BFT', 'LSH', 'CHB', 'BSS',
# WHERE wm.itemnum IS NOT NULL 'LOS', 'LPB', 'SAC', 'CP', 'EHS', 'RO', 'GG', 'MS', 'CW', 'SO', 'ATT',
# GROUP BY wm.wonum 'AFG', 'EHB', 'RP', 'FO', 'PC', 'APE', 'AF', 'DMW', 'BRS', 'GEN', 'ABS',
# ), 'CHA', 'TR', 'H2', 'BDW', 'LOM', 'ACR', 'AL', 'FW', 'COND', 'CCCW', 'IA',
# wo_costs AS ( 'GSS', 'BOL', 'SSB', 'CO', 'OA', 'CTH-UPD', 'AS', 'DP']),
# SELECT WorkOrderData.reportdate.is_not(None),
# w.wonum, WorkOrderData.actstart.is_not(None),
# w.asset_location, WorkOrderData.actfinish.is_not(None),
# -- Use mat_cost_max if parts_total_cost = 0 WorkOrderData.unit.in_([3, 0]),
# CASE WorkOrderData.reportdate >= datetime.strptime('2015-01-01', '%Y-%m-%d'),
# WHEN COALESCE(pc.parts_total_cost, 0) = 0 THEN COALESCE(w.mat_cost_max , 0) not_(WorkOrderData.wonum.like('T%'))
# ELSE COALESCE(pc.parts_total_cost, 0) )
# END AS total_wo_cost ).group_by(
# FROM wo_staging_maximo_2 w WorkOrderData.location
# LEFT JOIN part_costs pc ).order_by(
# ON w.wonum = pc.wonum func.count(WorkOrderData.wonum).desc()
# WHERE
# w.worktype = 'OH'
# AND w.reportdate IS NOT NULL
# AND w.actstart IS NOT NULL
# AND w.actfinish IS NOT NULL
# AND w.asset_unit IN ('3', '00')
# AND w.wonum NOT LIKE 'T%'
# )
# SELECT
# asset_location,
# AVG(total_wo_cost) AS avg_cost
# FROM wo_costs
# GROUP BY asset_location
# ORDER BY COUNT(wonum) DESC;
# """)
query = text("""
WITH part_costs AS (
SELECT
wm.wonum,
SUM(wm.itemqty * COALESCE(inv.avgcost, po.unit_cost, 0)) AS parts_total_cost
FROM public.maximo_workorder_materials wm
JOIN public.maximo_inventory AS inv on inv.itemnum = wm.itemnum
LEFT JOIN (
SELECT item_num, AVG(unit_cost) AS unit_cost
FROM public.maximo_sparepart_pr_po_line
GROUP BY item_num
) po ON wm.itemnum = po.item_num
WHERE wm.itemnum IS NOT NULL
GROUP BY wm.wonum
),
wo_costs AS (
SELECT
w.wonum,
w.asset_location,
-- Use mat_cost_max if parts_total_cost = 0
CASE
WHEN COALESCE(pc.parts_total_cost, 0) = 0 THEN COALESCE(w.mat_cost_max , 0)
ELSE COALESCE(pc.parts_total_cost, 0)
END AS total_wo_cost
FROM wo_staging_maximo_2 w
LEFT JOIN part_costs pc
ON w.wonum = pc.wonum
WHERE
w.worktype = 'OH'
AND w.reportdate IS NOT NULL
AND w.actstart IS NOT NULL
AND w.actfinish IS NOT NULL
AND w.asset_unit IN ('3', '00')
AND w.wonum NOT LIKE 'T%'
) )
SELECT
asset_location,
AVG(total_wo_cost) AS avg_cost
FROM wo_costs
GROUP BY asset_location
ORDER BY COUNT(wonum) DESC;
""")
result = await collector_db.execute(query) result = await collector_db.execute(query)
data = [] data = result.all()
for row in result:
data.append({
"location_tag": row.asset_location,
"avg_cost": row.avg_cost
})
return { return {
item["location_tag"]: item["avg_cost"] for item in data data.location: data.avg_cost for data in data
} }

@ -3,7 +3,7 @@ from typing import List
from fastapi import APIRouter, HTTPException, status from fastapi import APIRouter, HTTPException, status
from src.auth.service import Token from src.auth.service import Token
from src.database.core import CollectorDbSession, DbSession from src.database.core import DbSession
from src.models import StandardResponse from src.models import StandardResponse
from src.overhaul.service import (get_overhaul_critical_parts, from src.overhaul.service import (get_overhaul_critical_parts,
get_overhaul_overview, get_overhaul_overview,
@ -18,11 +18,11 @@ router = APIRouter()
@router.get("", response_model=StandardResponse[OverhaulRead]) @router.get("", response_model=StandardResponse[OverhaulRead])
async def get_overhaul(db_session: DbSession, token:Token, collector_db_session:CollectorDbSession): async def get_overhaul(db_session: DbSession, token:Token):
"""Get all scope pagination.""" """Get all scope pagination."""
overview = await get_overhaul_overview(db_session=db_session) overview = await get_overhaul_overview(db_session=db_session)
schedules = await get_overhaul_schedules(db_session=db_session) schedules = await get_overhaul_schedules(db_session=db_session)
criticalParts = await get_overhaul_critical_parts(db_session=db_session, session_id=overview["overhaul"]["id"], token=token, collector_db_session=collector_db_session) criticalParts = await get_overhaul_critical_parts(db_session=db_session, session_id=overview["overhaul"]["id"], token=token)
systemComponents = get_overhaul_system_components() systemComponents = get_overhaul_system_components()
return StandardResponse( return StandardResponse(

@ -6,7 +6,6 @@ from sqlalchemy import Delete, Select
from src.auth.service import CurrentUser from src.auth.service import CurrentUser
from src.calculation_target_reliability.service import RBD_SERVICE_API from src.calculation_target_reliability.service import RBD_SERVICE_API
from src.config import TC_RBD_ID
from src.database.core import DbSession from src.database.core import DbSession
from src.contribution_util import calculate_contribution from src.contribution_util import calculate_contribution
from src.overhaul_activity.service import get_standard_scope_by_session_id from src.overhaul_activity.service import get_standard_scope_by_session_id
@ -29,9 +28,9 @@ async def get_simulation_results(*, simulation_id: str, token: str):
"Content-Type": "application/json" "Content-Type": "application/json"
} }
calc_result_url = f"{RBD_SERVICE_API}/aeros/simulation/result/calc/{simulation_id}?nodetype=RegularNode" calc_result_url = f"{RBD_SERVICE_API}/aeros/simulation/result/calc/default?nodetype=RegularNode"
# plot_result_url = f"{RBD_SERVICE_API}/aeros/simulation/result/plot/{simulation_id}?nodetype=RegularNode" # plot_result_url = f"{RBD_SERVICE_API}/aeros/simulation/result/plot/{simulation_id}?nodetype=RegularNode"
calc_plant_result = f"{RBD_SERVICE_API}/aeros/simulation/result/calc/{simulation_id}/plant" calc_plant_result = f"{RBD_SERVICE_API}/aeros/simulation/result/calc/default/plant"
async with httpx.AsyncClient(timeout=300.0) as client: async with httpx.AsyncClient(timeout=300.0) as client:
calc_task = client.get(calc_result_url, headers=headers) calc_task = client.get(calc_result_url, headers=headers)
@ -55,17 +54,16 @@ async def get_simulation_results(*, simulation_id: str, token: str):
"plant_result": plant_data "plant_result": plant_data
} }
async def get_overhaul_critical_parts(db_session, session_id, token, collector_db_session): async def get_overhaul_critical_parts(db_session, session_id, token):
"""Get all overhaul critical parts.""" """Get all overhaul critical parts."""
equipments = await get_standard_scope_by_session_id( equipments, _ = await get_by_oh_session_id(
db_session=db_session, db_session=db_session,
overhaul_session_id=session_id, oh_session_id=session_id,
collector_db=collector_db_session
) )
criticality_simulation = await get_simulation_results( criticality_simulation = await get_simulation_results(
simulation_id = TC_RBD_ID, simulation_id="default",
token=token token=token
) )
@ -81,7 +79,7 @@ async def get_overhaul_critical_parts(db_session, session_id, token, collector_d
{ {
"id": equipment.id, "id": equipment.id,
"location_tag": equipment.location_tag, "location_tag": equipment.location_tag,
"name": equipment.equipment_name, "name": equipment.master_equipment.name,
"matrix": rbd_simulation.get(equipment.location_tag) "matrix": rbd_simulation.get(equipment.location_tag)
} for equipment in equipments } for equipment in equipments
@ -105,7 +103,7 @@ async def get_overhaul_critical_parts(db_session, session_id, token, collector_d
)[:10] )[:10]
return { return {
"availability" :availability_result, "availability" : availability_result,
"criticality": criticality_result "criticality": criticality_result
} }

@ -136,6 +136,9 @@ async def get_all(
# page = common.get("page", 1) # page = common.get("page", 1)
# items_per_page = common.get("items_per_page", 10) # items_per_page = common.get("items_per_page", 10)
# Sort by overhaul_cost descending
results.sort(key=lambda x: x.overhaul_cost, reverse=True)
# Build response data # Build response data
data = { data = {
"items": results, "items": results,

@ -1,17 +0,0 @@
from sqlalchemy import Column, String
from src.database.core import Base
from src.models import DefaultMixin
class OverhaulGantt(Base, DefaultMixin):
__tablename__ = "oh_ms_monitoring_spreadsheet"
spreadsheet_id = Column(String, nullable=True)
spreadsheet_link = Column(String, nullable=True)

@ -1,15 +1,11 @@
import re
from typing import List, Optional from typing import List, Optional
from fastapi import APIRouter, HTTPException, status from fastapi import APIRouter, HTTPException, status
from sqlalchemy import select
from src.auth.service import CurrentUser from src.auth.service import CurrentUser
from src.database.core import DbSession from src.database.core import DbSession
from src.database.service import CommonParameters from src.database.service import CommonParameters
from src.models import StandardResponse from src.models import StandardResponse
from src.overhaul_gantt.model import OverhaulGantt
from src.overhaul_gantt.schema import OverhaulGanttIn
# from .schema import (OverhaulScheduleCreate, OverhaulSchedulePagination, OverhaulScheduleUpdate) # from .schema import (OverhaulScheduleCreate, OverhaulSchedulePagination, OverhaulScheduleUpdate)
from .service import get_gantt_performance_chart from .service import get_gantt_performance_chart
@ -18,93 +14,18 @@ router = APIRouter()
@router.get( @router.get(
"", response_model=StandardResponse[dict] "", response_model=StandardResponse[list]
) )
async def get_gantt_performance(db_session: DbSession): async def get_gantt_performance():
"""Get all scope pagination.""" """Get all scope pagination."""
# return # return
query = select(OverhaulGantt).limit(1) results = await get_gantt_performance_chart()
data = (await db_session.execute(query)).scalar_one_or_none()
results, gantt_data = await get_gantt_performance_chart(spreadsheet_id=data.spreadsheet_id)
return StandardResponse(
data={
"chart_data": results,
"gantt_data": gantt_data
},
message="Data retrieved successfully",
)
@router.get(
"/spreadsheet", response_model=StandardResponse[dict]
)
async def get_gantt_spreadsheet(db_session: DbSession):
"""Get all scope pagination."""
# return
query = select(OverhaulGantt).limit(1)
data = (await db_session.execute(query)).scalar_one_or_none()
result = {
"spreadsheet_id": None,
"spreadsheet_link": None
}
if data:
result = {
"spreadsheet_id": data.spreadsheet_id,
"spreadsheet_link": data.spreadsheet_link
}
return StandardResponse( return StandardResponse(
data=result, data=results,
message="Data retrieved successfully", message="Data retrieved successfully",
) )
@router.post(
"/spreadsheet", response_model=StandardResponse[dict]
)
async def update_gantt_spreadsheet(db_session: DbSession, spreadsheet_in: OverhaulGanttIn):
"""Get all scope pagination."""
# return
match = re.search(r"/d/([a-zA-Z0-9-_]+)", spreadsheet_in.spreadsheet_link)
if not match:
raise ValueError("Invalid Google Sheets URL")
spreadsheet_id = match.group(1)
query = select(OverhaulGantt).limit(1)
data = (await db_session.execute(query)).scalar_one_or_none()
if data:
data.spreadsheet_link = spreadsheet_in.spreadsheet_link
data.spreadsheet_id = spreadsheet_id
else:
spreadsheet = OverhaulGantt(
spreadsheet_id=spreadsheet_id,
spreadsheet_link=spreadsheet_in.spreadsheet_link
)
db_session.add(spreadsheet)
await db_session.commit()
if data:
result = {
"spreadsheet_id": spreadsheet_id
}
return StandardResponse(
data=result,
message="Data retrieved successfully",
)
# @router.post("", response_model=StandardResponse[None]) # @router.post("", response_model=StandardResponse[None])

@ -9,12 +9,8 @@
# from src.scope_equipment_job.schema import ScopeEquipmentJobRead # from src.scope_equipment_job.schema import ScopeEquipmentJobRead
# from src.job.schema import ActivityMasterRead # from src.job.schema import ActivityMasterRead
from pydantic import Field # class OverhaulScheduleBase(DefultBase):
from src.models import DefultBase # pass
class OverhaulGanttIn(DefultBase):
spreadsheet_link: str = Field(...)
# class OverhaulScheduleCreate(OverhaulScheduleBase): # class OverhaulScheduleCreate(OverhaulScheduleBase):

@ -6,7 +6,7 @@ from sqlalchemy.orm import selectinload
# from .model import OverhaulSchedule # from .model import OverhaulSchedule
# from .schema import OverhaulScheduleCreate, OverhaulScheduleUpdate # from .schema import OverhaulScheduleCreate, OverhaulScheduleUpdate
from .utils import fetch_all_sections, get_google_creds, get_spreatsheed_service, process_spreadsheet_data from .utils import get_google_creds, get_spreatsheed_service, process_spreadsheet_data
# async def get_all(*, common): # async def get_all(*, common):
# """Returns all documents.""" # """Returns all documents."""
@ -53,60 +53,20 @@ from .utils import fetch_all_sections, get_google_creds, get_spreatsheed_service
async def get_gantt_performance_chart(*, spreadsheet_id = "1gZXuwA97zU1v4QBv56wKeiqadc6skHUucGKYG8qVFRk"): async def get_gantt_performance_chart(*, spreadsheet_id = "1gZXuwA97zU1v4QBv56wKeiqadc6skHUucGKYG8qVFRk"):
creds = get_google_creds() creds = get_google_creds()
RANGE_NAME = "'SUMMARY'!K34:AZ38" # Or just "2024 schedule" RANGE_NAME = "'2024 kurva s'!N79:BJ83" # Or just "2024 schedule"
GANTT_DATA_NAME = "ACTUAL PROGRESS"
try: try:
service = get_spreatsheed_service(creds) service = get_spreatsheed_service(creds)
sheet = service.spreadsheets() sheet = service.spreadsheets()
response = sheet.values().get(spreadsheetId=spreadsheet_id, range=RANGE_NAME).execute()
response = sheet.values().get(
spreadsheetId=spreadsheet_id,
range=RANGE_NAME
).execute()
values = response.get("values", []) values = response.get("values", [])
keys = ['day', 'time', 'plan', 'actual', 'gap']
if len(values) < 4: transposed = list(zip(*values))
raise Exception("Spreadsheet format invalid: need 4 rows (DAY, DATE, PLAN, ACTUAL).") results = [dict(zip(keys, result)) for result in transposed]
# Extract rows
day_row = values[0][1:]
date_row = values[1][1:]
plan_row = values[3][1:]
actual_row = values[4][1:]
total_days = len(day_row)
# PAD rows so lengths match day count
date_row += [""] * (total_days - len(date_row))
plan_row += [""] * (total_days - len(plan_row))
actual_row += [""] * (total_days - len(actual_row))
results = []
for i in range(total_days):
day = day_row[i]
date = date_row[i]
plan = plan_row[i]
actual = actual_row[i] if actual_row[i] else "0%" # <-- FIX HERE
results.append({
"day": day,
"date": date,
"plan": plan,
"actual": actual
})
except Exception as e: except Exception as e:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e)) raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=e)
processed_data = process_spreadsheet_data(results) processed_data = process_spreadsheet_data(results)
gantt_data = fetch_all_sections(service=service, spreadsheet_id=spreadsheet_id, sheet_name=GANTT_DATA_NAME)
return processed_data, gantt_data
return processed_data

@ -22,176 +22,44 @@ def process_spreadsheet_data(rows):
processed_data = [] processed_data = []
for row in rows: for row in rows:
processed_row = convert_spreadsheet_data(row) processed_row = convert_spreadsheet_data(row)
processed_data.append(processed_row) if processed_row else None processed_data.append(processed_row)
return processed_data return processed_data
from datetime import datetime def convert_spreadsheet_data(data):
from datetime import datetime
def convert_spreadsheet_data(data, default_year=None):
"""
Convert spreadsheet row into structured data.
Expected keys: day, date, plan, actual
"""
# Skip header or invalid rows
if not data.get("day") or not data["day"].isdigit():
return None
result = {} result = {}
# Convert day # Convert day to integer
result["day"] = int(data["day"]) result['day'] = int(data['day'])
# Determine default year # Convert time to a datetime object
if default_year is None: from datetime import datetime
default_year = datetime.now().year # Assuming Indonesian format with month names
# Replace Indonesian month names with English if needed
date_str = data.get("date", "").strip() month_mapping = {
'Januari': 'January', 'Februari': 'February', 'Maret': 'March',
# ---------- DATE HANDLING ---------- 'April': 'April', 'Mei': 'May', 'Juni': 'June',
# Accept formats like: "Nov 20", "Dec 3", "Jan 1" 'Juli': 'July', 'Agustus': 'August', 'September': 'September',
parsed_date = None 'Oktober': 'October', 'November': 'November', 'Desember': 'December'
if date_str:
try:
parsed_date = datetime.strptime(f"{date_str} {default_year}", "%b %d %Y")
except ValueError:
try:
parsed_date = datetime.strptime(f"{date_str} {default_year}", "%B %d %Y")
except:
parsed_date = None
# YEAR ROLLOVER (Dec → Jan next year)
if parsed_date and parsed_date.month == 1 and "Dec" in data.get("date", ""):
parsed_date = parsed_date.replace(year=default_year + 1)
result["date"] = parsed_date
# ---------- PERCENT HANDLING ----------
def parse_percent(value):
if not value:
return 0.0
v = value.strip().replace(",", ".").replace("%", "")
try:
return float(v) / 100.0
except:
return 0.0
result["plan"] = parse_percent(data.get("plan", "0"))
result["actual"] = parse_percent(data.get("actual", "0"))
# Gap calculation
result["gap"] = result["actual"] - result["plan"]
return result
def fetch_all_sections(service, spreadsheet_id, sheet_name):
# Fetch a wide range including columns AL
result = service.spreadsheets().values().get(
spreadsheetId=spreadsheet_id,
range=f"{sheet_name}!A5:M5000"
).execute()
values = result.get("values", [])
if not values:
raise ValueError("No data found in sheet")
data = []
current_section = None
current_subsystem = None
for row in values:
# Pad missing columns to avoid index errors
row += [""] * (13 - len(row))
colA, colB, colC, colD, colE, colF, colG, colH, colI, colJ, colK, colL, colM = row
# Detect a SECTION — bold blue rows in Column C
if colC and not colD and not colE:
current_section = colC.strip()
current_subsystem = None
continue
# Detect a SUBSYSTEM — indented header in Column D
if colD and not colE:
current_subsystem = colD.strip()
continue
# Detect a TASK — Column E populated
if colE:
task = colE.strip()
pic = colF.strip()
start_date = indo_formatted_date(colG.strip())
finish_date = indo_formatted_date(colH.strip())
duration = colI.strip()
plan = colK.strip()
actual = colL.strip()
gap = colM.strip()
data.append({
"system": current_section,
"subsystem": current_subsystem,
"task": task,
"PIC": pic,
"start_date": start_date,
"end_date": finish_date,
"duration": int(duration),
"plan": plan,
"actual": actual,
"gap": gap
})
return data
def indo_formatted_date(date_str, base_year=2025):
"""
Convert short date like 'Nov 20', '30-Dec', 'Jan 1'
into: 'Rabu, November 20, 2025'
If month is January, year becomes 2026.
"""
# Month mappings
eng_to_indo_month = {
"Jan": "Januari", "Feb": "Februari", "Mar": "Maret", "Apr": "April",
"May": "Mei", "Jun": "Juni", "Jul": "Juli", "Aug": "Agustus",
"Sep": "September", "Oct": "Oktober", "Nov": "November", "Dec": "Desember"
} }
indo_days = { time_str = data['time']
0: "Senin", for indo, eng in month_mapping.items():
1: "Selasa", time_str = time_str.replace(indo, eng)
2: "Rabu",
3: "Kamis", # Format: "Sabtu, Juli 13, 2024" -> "Saturday, July 13, 2024"
4: "Jumat", # Removing the day of week to simplify parsing
5: "Sabtu", time_str = time_str.split(', ', 1)[1] # Remove "Sabtu, "
6: "Minggu" result['time'] = datetime.strptime(time_str, '%B %d, %Y')
}
# Convert percentage strings to floats
# Normalize formats ("30-Dec" → "Dec 30") # Handling format like "0,12%" -> 0.12
if "-" in date_str: for key in ['plan', 'actual', 'gap']:
d, m = date_str.split("-") # Replace comma with dot (European to US decimal notation)
date_str = f"{m} {d}" value = data[key].replace(',', '.')
# Remove percentage sign
# Parse using English abbreviation value = value.rstrip('%')
try: # Convert to float
dt = datetime.strptime(f"{date_str} {base_year}", "%b %d %Y") result[key] = float(value) / 100 # Divide by 100 to get the actual decimal value
except:
return None
# Handle year rollover (Jan -> next year) return result
if dt.month == 1:
dt = dt.replace(year=base_year + 1)
# Convert to Indonesian components
day_name = indo_days[dt.weekday()]
month_name = eng_to_indo_month[dt.strftime("%b")]
return f"{day_name}, {month_name} {dt.day}, {dt.year}"

@ -161,7 +161,7 @@ async def get_overview_overhaul(*, db_session: DbSession):
) )
) )
ongoing_result = await db_session.execute(ongoing_query.options(selectinload(OverhaulScope.maintenance_type))) ongoing_result = await db_session.execute(ongoing_query.options(selectinload(OverhaulScope.maintenance_type)))
ongoing_overhaul = ongoing_result.scalar_one_or_none() ongoing_overhaul = ongoing_result.first()
# 2. If no ongoing overhaul, get the closest scheduled overhaul # 2. If no ongoing overhaul, get the closest scheduled overhaul
if ongoing_overhaul is None: if ongoing_overhaul is None:
@ -202,9 +202,6 @@ async def get_overview_overhaul(*, db_session: DbSession):
) )
results = await db_session.execute(equipments) results = await db_session.execute(equipments)
#Remaining days based on status
remaining_days = (selected_overhaul.start_date - current_date).days if selected_overhaul.status == "Upcoming" else (selected_overhaul.end_date - current_date).days
return { return {
"status": selected_overhaul.status, "status": selected_overhaul.status,
@ -215,7 +212,7 @@ async def get_overview_overhaul(*, db_session: DbSession):
"end_date": selected_overhaul.end_date, "end_date": selected_overhaul.end_date,
"duration_oh": selected_overhaul.duration_oh, "duration_oh": selected_overhaul.duration_oh,
"crew_number": selected_overhaul.crew_number, "crew_number": selected_overhaul.crew_number,
"remaining_days": remaining_days, "remaining_days": (selected_overhaul.start_date - current_date).days,
"equipment_count": len(results.scalars().all()), "equipment_count": len(results.scalars().all()),
}, },
} }

@ -33,11 +33,3 @@ class MasterSparepartProcurement(Base, DefaultMixin):
eta_requisition = Column(Date, nullable=False) eta_requisition = Column(Date, nullable=False)
eta_ordered = Column(Date, nullable=True) eta_ordered = Column(Date, nullable=True)
eta_received = Column(Date, nullable=True) eta_received = Column(Date, nullable=True)
class SparepartRemark(Base, DefaultMixin):
__tablename__ = "oh_ms_sparepart_remark"
itemnum = Column(String, nullable=False)
remark = Column(String, nullable=False)

@ -4,18 +4,17 @@ from src.database.core import CollectorDbSession
from src.database.service import (CommonParameters, DbSession, from src.database.service import (CommonParameters, DbSession,
search_filter_sort_paginate) search_filter_sort_paginate)
from src.models import StandardResponse from src.models import StandardResponse
from src.sparepart.schema import SparepartRemark
from .service import create_remark, get_spareparts_paginated from .service import get_all
router = APIRouter() router = APIRouter()
@router.get("", response_model=StandardResponse[list]) @router.get("", response_model=StandardResponse[list])
async def get_sparepart(collector_db_session:CollectorDbSession, db_session: DbSession): async def get_sparepart(collector_db_session:CollectorDbSession):
"""Get all scope activity pagination.""" """Get all scope activity pagination."""
# return # return
data = await get_spareparts_paginated(db_session=db_session, collector_db_session=collector_db_session) data = await get_all(collector_db_session)
@ -23,17 +22,6 @@ async def get_sparepart(collector_db_session:CollectorDbSession, db_session: DbS
data=data, data=data,
message="Data retrieved successfully", message="Data retrieved successfully",
) )
@router.post("", response_model=StandardResponse[SparepartRemark])
async def create_remark_route(collector_db_session:CollectorDbSession, db_session: DbSession, remark_in:SparepartRemark):
sparepart_remark = await create_remark(db_session=db_session, collector_db_session=collector_db_session, remark_in=remark_in)
return StandardResponse(
data=sparepart_remark,
message="Remark Created successfully"
)
# @router.post("", response_model=StandardResponse[ActivityMasterCreate]) # @router.post("", response_model=StandardResponse[ActivityMasterCreate])

@ -1,6 +1,4 @@
from dataclasses import dataclass from datetime import datetime
from datetime import date, datetime
from enum import Enum
from typing import Any, Dict, List, Optional from typing import Any, Dict, List, Optional
from uuid import UUID from uuid import UUID
@ -37,48 +35,41 @@ class ActivityMasterRead(ActivityMaster):
class ActivityMasterPagination(Pagination): class ActivityMasterPagination(Pagination):
items: List[ActivityMasterRead] = [] items: List[ActivityMasterRead] = []
class ProcurementStatus(Enum):
PLANNED = "planned" # {
ORDERED = "ordered" # "overview": {
RECEIVED = "received" # "totalEquipment": 30,
CANCELLED = "cancelled" # "nextSchedule": {
# "date": "2025-01-12",
@dataclass # "Overhaul": "B",
class SparepartRequirement: # "equipmentCount": 30
"""Sparepart requirement for equipment overhaul""" # }
sparepart_id: str # },
quantity_required: int # "criticalParts": [
lead_time: int # "Boiler feed pump",
sparepart_name: str # "Boiler reheater system",
unit_cost: float # "Drum Level (Right) Root Valve A",
avg_cost: float # "BCP A Discharge Valve",
remark:str # "BFPT A EXH Press HI Root VLV"
# ],
@dataclass # "schedules": [
class SparepartStock: # {
"""Current sparepart stock information""" # "date": "2025-01-12",
sparepart_id: str # "Overhaul": "B",
sparepart_name: str # "status": "upcoming"
current_stock: int # }
unit_cost: float # // ... other scheduled overhauls
location: str # ],
remark:str # "systemComponents": {
# "boiler": {
@dataclass # "status": "operational",
class ProcurementRecord: # "lastOverhaul": "2024-06-15"
"""Purchase Order/Purchase Request record""" # },
po_pr_id: str # "turbine": {
sparepart_id: str # "hpt": { "status": "operational" },
sparepart_name: str # "ipt": { "status": "operational" },
quantity: int # "lpt": { "status": "operational" }
unit_cost: float # }
total_cost: float # // ... other major components
order_date: date # }
expected_delivery_date: date # }
status: ProcurementStatus
po_vendor_delivery_date: date
class SparepartRemark(DefultBase):
itemnum: str
remark:str

@ -9,402 +9,147 @@ import numpy as np
from dataclasses import dataclass from dataclasses import dataclass
from enum import Enum from enum import Enum
from sqlalchemy import Delete, Select, select, text from sqlalchemy import Delete, Select, text
from sqlalchemy.orm import joinedload, selectinload from sqlalchemy.orm import joinedload, selectinload
from src.auth.service import CurrentUser from src.auth.service import CurrentUser
from src.database.core import DbSession from src.database.core import DbSession
from src.database.service import CommonParameters, search_filter_sort_paginate from src.database.service import CommonParameters, search_filter_sort_paginate
from src.logging import setup_logging from src.logging import setup_logging
from src.overhaul_activity.service import get_standard_scope_by_session_id from src.overhaul_scope.service import get as get_scope
from src.overhaul_scope.service import get as get_scope, get_overview_overhaul
from src.overhaul_scope.service import get_prev_oh from src.overhaul_scope.service import get_prev_oh
from src.sparepart.model import SparepartRemark
from src.sparepart.schema import ProcurementRecord, ProcurementStatus, SparepartRequirement, SparepartStock
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
setup_logging(logger=log) setup_logging(logger=log)
from sqlalchemy import text async def get_all(db_session: DbSession):
import math
from sqlalchemy import text
# async def get_spareparts_paginated(
# *,
# db_session,
# collector_db_session,
# ):
# """
# Get spare parts for work orders under specific parent WO(s),
# including inventory and PR/PO data.
# """
# # Normalize parent_num to array for SQL ANY()
# # parent_nums = parent_num if isinstance(parent_num, (list, tuple)) else [parent_num]
# parent_nums = []
# data_query = text("""
# WITH selected_wo AS (
# SELECT
# wonum,
# xx_parent,
# location_tag,
# assetnum,
# siteid,
# reportdate
# FROM public.wo_maxim
# WHERE xx_parent = ANY(:parent_nums)
# ),
# wo_materials AS (
# SELECT
# wm.wonum,
# wm.itemnum,
# wm.itemqty,
# wm.inv_itemnum,
# wm.inv_location,
# wm.inv_curbaltotal,
# wm.inv_avgcost,
# sw.location_tag
# FROM public.wo_maxim_material wm
# JOIN selected_wo sw ON wm.wonum = sw.wonum
# ),
# -- PR Lines
# pr_lines AS (
# SELECT
# pl.item_num,
# h.num AS pr_number,
# h.issue_date AS pr_issue_date,
# h.status AS pr_status,
# pl.qty_ordered AS pr_qty_ordered,
# pl.qty_requested AS pr_qty_requested
# FROM public.maximo_sparepart_pr_po h
# JOIN public.maximo_sparepart_pr_po_line pl
# ON h.num = pl.num
# WHERE h.type = 'PR'
# AND EXTRACT(YEAR FROM h.issue_date) >= 2019
# ),
# -- PO Lines
# po_lines AS (
# SELECT
# pl.item_num,
# h.num AS po_number,
# h.estimated_arrival_date AS po_estimated_arrival_date,
# h.vendeliverydate AS po_vendeliverydate,
# h.receipts AS po_receipt,
# h.status AS po_status,
# pl.qty_ordered AS po_qty_ordered,
# pl.qty_received AS po_qty_received
# FROM public.maximo_sparepart_pr_po h
# JOIN public.maximo_sparepart_pr_po_line pl
# ON h.num = pl.num
# WHERE h.type = 'PO'
# AND (h.receipts = 'NONE')
# AND (h.status IS NOT NULL)
# ),
# -- Item Descriptions
# item_descriptions AS (
# SELECT DISTINCT
# item_num,
# FIRST_VALUE(description) OVER (
# PARTITION BY item_num
# ORDER BY created_at DESC NULLS LAST
# ) AS description
# FROM public.maximo_sparepart_pr_po_line
# WHERE description IS NOT NULL
# ),
# -- Unified PR/PO data
# pr_po_unified AS (
# SELECT
# pr.item_num,
# pr.pr_number,
# pr.pr_issue_date,
# pr.pr_qty_ordered,
# pr.pr_status,
# po.po_number,
# COALESCE(po.po_qty_ordered, 0) AS po_qty_ordered,
# COALESCE(po.po_qty_received, 0) AS po_qty_received,
# po.po_estimated_arrival_date,
# po.po_vendeliverydate,
# po.po_receipt,
# po.po_status,
# CASE WHEN po.po_number IS NOT NULL THEN 'YES' ELSE 'NO' END AS po_exists
# FROM pr_lines pr
# LEFT JOIN po_lines po
# ON pr.item_num = po.item_num
# AND pr.pr_number = po.po_number
# ),
# -- Aggregate PR/PO info
# pr_po_agg AS (
# SELECT
# item_num,
# SUM(COALESCE(pr_qty_ordered, 0)) AS total_pr_qty,
# SUM(COALESCE(po_qty_ordered, 0)) AS total_po_qty,
# SUM(COALESCE(po_qty_received, 0)) AS total_po_received,
# JSON_AGG(
# JSON_BUILD_OBJECT(
# 'pr_number', pr_number,
# 'pr_issue_date', pr_issue_date,
# 'pr_qty_requested', pr_qty_ordered,
# 'pr_status', pr_status,
# 'po_exists', po_exists,
# 'po_qty_ordered', po_qty_ordered,
# 'po_qty_received', po_qty_received,
# 'po_estimated_arrival_date', po_estimated_arrival_date,
# 'po_vendeliverydate', po_vendeliverydate,
# 'po_receipt', po_receipt,
# 'po_status', po_status
# )
# ORDER BY pr_issue_date DESC
# ) AS pr_po_details
# FROM pr_po_unified
# GROUP BY item_num
# )
# SELECT
# wm.itemnum,
# COALESCE(id.description, 'No description available') AS item_description,
# SUM(wm.itemqty) AS total_required_for_oh,
# COALESCE(MAX(wm.inv_curbaltotal), 0) AS current_balance_total,
# COALESCE(ap.total_pr_qty, 0) AS total_pr_qty,
# COALESCE(ap.total_po_qty, 0) AS total_po_qty,
# COALESCE(ap.total_po_received, 0) AS total_po_received,
# ap.pr_po_details
# FROM wo_materials wm
# LEFT JOIN item_descriptions id
# ON wm.itemnum = id.item_num
# LEFT JOIN pr_po_agg ap
# ON wm.itemnum = ap.item_num
# GROUP BY
# wm.itemnum, id.description,
# ap.total_pr_qty, ap.total_po_qty, ap.total_po_received, ap.pr_po_details
# ORDER BY wm.itemnum;
# """)
# rows = await collector_db_session.execute(data_query, {"parent_nums": parent_nums})
# spare_parts = []
# for row in rows:
# spare_parts.append({
# "item_num": row.itemnum,
# "description": row.item_description,
# "current_balance_total": float(row.current_balance_total or 0.0),
# "total_required_for_oh": float(row.total_required_for_oh or 0.0),
# "total_pr_qty": row.total_pr_qty,
# "total_po_qty": row.total_po_qty,
# "total_po_received": row.total_po_received,
# "pr_po_details": row.pr_po_details,
# })
# return spare_parts
async def get_spareparts_paginated(*, db_session, collector_db_session):
""" """
Get paginated spare parts with usage, inventory, and PR/PO information. Get all spare parts with their latest PR and PO information.
Uses two queries: one for data, one for total count.
Args: Args:
db_session: SQLAlchemy database session db_session: SQLAlchemy database session
page (int): Page number (1-based) assetnum: Optional asset number filter (not used in this query but kept for compatibility)
items_per_page (int): Number of items per page
Returns:
List of dictionaries containing spare part information
""" """
# calculate limit/offset # Define the SQL query
# limit = items_per_page query = text("""
# offset = (page - 1) * items_per_page WITH latest_prs AS (
SELECT DISTINCT ON (pl.item_num)
# wo_materials AS ( pl.item_num,
# SELECT h.num as pr_number,
# wm.wonum, h.issue_date as pr_issue_date,
# wm.itemnum, h.status as pr_status,
# wm.itemqty, pl.qty_ordered as pr_qty_ordered,
# wm.inv_itemnum, pl.description,
# wm.inv_location, pl.unit_cost,
# wm.inv_curbaltotal, pl.line_cost
# wm.inv_avgcost, FROM public.maximo_sparepart_pr_po h
# sw.asset_location as location_tag JOIN public.maximo_sparepart_pr_po_line pl ON h.num = pl.num
# FROM public.wo_maxim_material wm WHERE h.type = 'PR'
# JOIN oh_workorders sw ON wm.wonum = sw.wonum AND h.issue_date IS NOT NULL
# ), AND h.num LIKE 'K%'
ORDER BY pl.item_num, h.issue_date DESC
# ----------------------------- )
# Query #1: Fetch paginated rows SELECT DISTINCT ON (pr.item_num)
# ----------------------------- pr.item_num,
data_query = text(""" pr.line_cost,
WITH oh_workorders AS ( pr.unit_cost,
SELECT DISTINCT wonum, asset_location, asset_unit pr.description,
FROM public.wo_maximo ma COALESCE(i.curbaltotal, 0) as current_balance_total,
WHERE ma.xx_parent IN ('155026', '155027', '155029', '155030') pr.pr_number,
), pr.pr_issue_date,
wo_materials AS ( pr.pr_qty_ordered,
SELECT CASE
wm.wonum, WHEN po.po_number IS NOT NULL THEN 'YES'
wm.itemnum, ELSE 'NO'
wm.itemqty, END as po_exists,
wm.inv_location AS inv_location, COALESCE(po.qty_received, 0) as po_qty_received,
wm.inv_curbaltotal AS inv_curbaltotal, COALESCE(po.qty_ordered, 0) as po_qty_ordered,
wm.inv_avgcost AS inv_avgcost, po.estimated_arrival_date as po_estimated_arrival_date
sw.asset_location as location_tag FROM latest_prs pr
FROM public.wo_maximo_material wm LEFT JOIN public.maximo_inventory i ON pr.item_num = i.itemnum
JOIN oh_workorders sw ON wm.wonum = sw.wonum LEFT JOIN LATERAL (
),
location_sparepart_stats AS (
SELECT location_tag, itemnum,
COUNT(DISTINCT wonum) as total_wo_count,
SUM(itemqty) as total_qty_used,
AVG(itemqty) as avg_qty_per_wo,
MIN(itemqty) as min_qty_used,
MAX(itemqty) as max_qty_used
FROM wo_materials
GROUP BY location_tag, itemnum
HAVING SUM(itemqty) > 0
),
pr_lines AS (
SELECT
pl.item_num,
h.num as pr_number,
h.issue_date as pr_issue_date,
h.status as pr_status,
pl.qty_ordered as pr_qty_ordered,
pl.qty_requested as pr_qty_requested
FROM public.maximo_sparepart_pr_po h
JOIN public.maximo_sparepart_pr_po_line pl ON h.num = pl.num
WHERE h.type = 'PR' AND EXTRACT(YEAR FROM h.issue_date) >= 2023
),
item_descriptions AS (
SELECT DISTINCT
item_num,
FIRST_VALUE(description) OVER (
PARTITION BY item_num
ORDER BY created_at DESC NULLS LAST
) as description
FROM public.maximo_sparepart_pr_po_line
WHERE description IS NOT NULL
),
po_lines AS (
SELECT SELECT
pl.item_num,
h.num as po_number, h.num as po_number,
h.estimated_arrival_date as po_estimated_arrival_date, pl.qty_received,
h.vendeliverydate as po_vendeliverydate, pl.qty_ordered,
h.receipts as po_receipt, h.estimated_arrival_date
h.status as po_status,
pl.qty_ordered as po_qty_ordered,
pl.qty_received as po_qty_received
FROM public.maximo_sparepart_pr_po h FROM public.maximo_sparepart_pr_po h
JOIN public.maximo_sparepart_pr_po_line pl ON h.num = pl.num JOIN public.maximo_sparepart_pr_po_line pl ON h.num = pl.num
WHERE h.type = 'PO' WHERE h.type = 'PO'
AND (h.receipts = 'NONE') AND h.num = pr.pr_number
AND (h.status IS NOT NULL) AND pl.item_num = pr.item_num
), LIMIT 1
pr_po_unified AS ( ) po ON true
SELECT ORDER BY pr.item_num;
pr.item_num,
pr.pr_number,
pr.pr_issue_date,
pr.pr_qty_ordered,
pr.pr_status,
po.po_number,
COALESCE(po.po_qty_ordered,0) as po_qty_ordered,
COALESCE(po.po_qty_received,0) as po_qty_received,
po.po_estimated_arrival_date,
po.po_vendeliverydate,
po.po_receipt,
po.po_status,
CASE WHEN po.po_number IS NOT NULL THEN 'YES' ELSE 'NO' END as po_exists
FROM pr_lines pr
LEFT JOIN po_lines po
ON pr.item_num = po.item_num
AND pr.pr_number = po.po_number
),
pr_po_agg AS (
SELECT
item_num,
SUM(COALESCE(pr_qty_ordered,0)) as total_pr_qty,
SUM(COALESCE(po_qty_ordered,0)) as total_po_qty,
SUM(COALESCE(po_qty_received,0)) as total_po_received,
JSON_AGG(
JSON_BUILD_OBJECT(
'pr_number', pr_number,
'pr_issue_date', pr_issue_date,
'pr_qty_requested', pr_qty_ordered,
'pr_status', pr_status,
'po_exists', po_exists,
'po_qty_ordered', po_qty_ordered,
'po_qty_received', po_qty_received,
'po_estimated_arrival_date', po_estimated_arrival_date,
'po_vendeliverydate', po_vendeliverydate,
'po_receipt', po_receipt,
'po_status', po_status
) ORDER BY pr_issue_date DESC
) as pr_po_details
FROM pr_po_unified
GROUP BY item_num
),
inv_summary AS (
SELECT
itemnum,
MAX(inv_curbaltotal) AS total_curbaltotal,
AVG(inv_avgcost) AS avg_cost
FROM wo_materials
GROUP BY itemnum
)
SELECT
lss.itemnum,
COALESCE(id.description, 'No description available') as item_description,
lss.total_wo_count,
lss.total_qty_used,
ROUND(CAST(lss.avg_qty_per_wo AS NUMERIC), 2) as avg_qty_per_wo,
lss.min_qty_used,
lss.max_qty_used,
COALESCE(i.total_curbaltotal,0) as current_balance_total,
COALESCE(ap.total_pr_qty,0) as total_pr_qty,
COALESCE(ap.total_po_qty,0) as total_po_qty,
COALESCE(ap.total_po_received,0) as total_po_received,
ap.pr_po_details
FROM location_sparepart_stats lss
LEFT JOIN item_descriptions id ON lss.itemnum = id.item_num
LEFT JOIN inv_summary i ON lss.itemnum = i.itemnum
LEFT JOIN pr_po_agg ap ON lss.itemnum = ap.item_num
ORDER BY lss.location_tag, lss.itemnum;
""") """)
overhaul = await get_overview_overhaul(db_session=db_session) # Execute the query
result = await db_session.execute(query)
standard_overhaul = await get_standard_scope_by_session_id(db_session=db_session, collector_db=collector_db_session, overhaul_session_id=overhaul['overhaul']['id'])
asset_locations = [eq.location_tag for eq in standard_overhaul]
rows = await collector_db_session.execute(
data_query,
{"asset_locations": asset_locations}
)
sparepart_remark = (await db_session.execute(
select(SparepartRemark)
)).scalars().all()
sparepart_remark_dict = {item.itemnum: item.remark for item in sparepart_remark}
# Fetch all results and convert to list of dictionaries
spare_parts = [] spare_parts = []
for row in rows: for row in result:
spare_parts.append({ spare_parts.append({
"item_num": row.itemnum, "item_num": row.item_num,
"description": row.item_description, "description": row.description,
"remark": sparepart_remark_dict.get(row.itemnum, ""), "line_cost": row.line_cost,
"current_balance_total": float(row.current_balance_total) if row.current_balance_total else 0.0, "unit_cost": row.unit_cost,
"total_required_for_oh": float(row.avg_qty_per_wo), "current_balance_total": float(row.current_balance_total) if row.current_balance_total is not None else 0.0,
"total_pr_qty": row.total_pr_qty, "pr_number": row.pr_number,
"total_po_qty": row.total_po_qty, "pr_issue_date": row.pr_issue_date,
"total_po_received": row.total_po_received, "pr_qty_ordered": float(row.pr_qty_ordered) if row.pr_qty_ordered is not None else 0.0,
"pr_po_details": row.pr_po_details "po_exists": row.po_exists,
"po_qty_received": float(row.po_qty_received) if row.po_qty_received is not None else 0.0,
"po_qty_ordered": float(row.po_qty_ordered) if row.po_qty_ordered is not None else 0.0,
"po_estimated_arrival_date": row.po_estimated_arrival_date
}) })
return spare_parts return spare_parts
class ProcurementStatus(Enum):
PLANNED = "planned"
ORDERED = "ordered"
RECEIVED = "received"
CANCELLED = "cancelled"
@dataclass
class SparepartRequirement:
"""Sparepart requirement for equipment overhaul"""
sparepart_id: str
quantity_required: int
lead_time: int
sparepart_name: str
unit_cost: float
@dataclass
class SparepartStock:
"""Current sparepart stock information"""
sparepart_id: str
sparepart_name: str
current_stock: int
unit_cost: float
location: str
@dataclass
class ProcurementRecord:
"""Purchase Order/Purchase Request record"""
po_pr_id: str
sparepart_id: str
sparepart_name: str
quantity: int
unit_cost: float
total_cost: float
order_date: date
expected_delivery_date: date
status: ProcurementStatus
po_vendor_delivery_date: date
class SparepartManager: class SparepartManager:
"""Manages sparepart availability and procurement for overhaul optimization""" """Manages sparepart availability and procurement for overhaul optimization"""
@ -534,8 +279,7 @@ class SparepartManager:
sparepart_id = requirement.sparepart_id sparepart_id = requirement.sparepart_id
needed_quantity = requirement.quantity_required needed_quantity = requirement.quantity_required
sparepart_name = requirement.sparepart_name sparepart_name = requirement.sparepart_name
sparepart_remark= requirement.remark unit_cost = requirement.unit_cost
unit_cost = requirement.avg_cost if requirement.avg_cost > 0 else requirement.unit_cost
current_stock = adjusted_stocks.get(sparepart_id, 0) current_stock = adjusted_stocks.get(sparepart_id, 0)
@ -563,8 +307,7 @@ class SparepartManager:
'status': order.status.value, 'status': order.status.value,
'months_until_delivery': self._calculate_months_until_delivery(order.expected_delivery_date, target_month), 'months_until_delivery': self._calculate_months_until_delivery(order.expected_delivery_date, target_month),
'is_on_time': self._is_delivery_on_time(order.expected_delivery_date, target_month), 'is_on_time': self._is_delivery_on_time(order.expected_delivery_date, target_month),
'usage': 'covers_requirement', 'usage': 'covers_requirement'
'remark': sparepart_remark
} }
pr_po_summary['existing_orders'].append(order_info) pr_po_summary['existing_orders'].append(order_info)
pr_po_summary['total_existing_value'] += order.total_cost pr_po_summary['total_existing_value'] += order.total_cost
@ -576,7 +319,6 @@ class SparepartManager:
missing_parts.append({ missing_parts.append({
'sparepart_id': sparepart_id, 'sparepart_id': sparepart_id,
'sparepart_name': sparepart_name, 'sparepart_name': sparepart_name,
'remark': sparepart_remark,
'required': needed_quantity, 'required': needed_quantity,
'current_stock': current_stock, 'current_stock': current_stock,
'ordered_quantity': total_ordered_quantity, 'ordered_quantity': total_ordered_quantity,
@ -608,7 +350,6 @@ class SparepartManager:
new_order = { new_order = {
'sparepart_id': sparepart_id, 'sparepart_id': sparepart_id,
'sparepart_name': sparepart_name, 'sparepart_name': sparepart_name,
'remark': sparepart_remark,
'quantity_needed': shortage, 'quantity_needed': shortage,
'unit_cost': unit_cost, 'unit_cost': unit_cost,
'total_cost': procurement_cost, 'total_cost': procurement_cost,
@ -915,7 +656,7 @@ class SparepartManager:
# Integration functions for database operations # Integration functions for database operations
async def load_sparepart_data_from_db(scope, prev_oh_scope, db_session, app_db_session, analysis_window_months = None) -> SparepartManager: async def load_sparepart_data_from_db(scope, prev_oh_scope, db_session) -> SparepartManager:
"""Load sparepart data from database""" """Load sparepart data from database"""
# You'll need to implement these queries based on your database schema # You'll need to implement these queries based on your database schema
# Get scope dates for analysis window # Get scope dates for analysis window
@ -923,40 +664,31 @@ async def load_sparepart_data_from_db(scope, prev_oh_scope, db_session, app_db_s
# prev_oh_scope = await get_prev_oh(db_session=db_session, overhaul_session=scope) # prev_oh_scope = await get_prev_oh(db_session=db_session, overhaul_session=scope)
analysis_start_date = prev_oh_scope.end_date analysis_start_date = prev_oh_scope.end_date
analysis_window_months = int(((scope.start_date - prev_oh_scope.end_date).days / 30) * 1.2) if not analysis_window_months else analysis_window_months analysis_window_months = int(((scope.start_date - prev_oh_scope.end_date).days / 30) * 1.5)
sparepart_manager = SparepartManager(analysis_start_date, analysis_window_months) sparepart_manager = SparepartManager(analysis_start_date, analysis_window_months)
start_date = prev_oh_scope.end_date
end_date = scope.start_date
# Load sparepart stocks # Load sparepart stocks
# Example query - adjust based on your schema # Example query - adjust based on your schema
query = text("""SELECT query = text("""
wm.inv_itemnum AS itemnum, SELECT
wm.inv_itemsetid AS itemsetid, mi.id,
wm.inv_location AS location, mi.itemnum,
MAX(wm.inv_curbaltotal) AS curbaltotal, mi.itemsetid,
AVG(wm.inv_avgcost) AS avgcost, mi."location",
COALESCE(mspl.description, 'No description available') AS description mi.curbaltotal,
FROM public.wo_maximo_material wm mi.avgcost,
LEFT JOIN public.maximo_sparepart_pr_po_line mspl mspl.description
ON wm.inv_itemnum = mspl.item_num FROM public.maximo_inventory mi
WHERE wm.inv_itemnum IS NOT NULL LEFT JOIN public.maximo_sparepart_pr_po_line mspl
GROUP BY wm.inv_itemnum, wm.inv_itemsetid, wm.inv_location, mspl.description ON mi.itemnum = mspl.item_num
""") """)
log.info("Fetch sparepart") log.info("Fetch sparepart")
sparepart_stocks_query = await db_session.execute(query) sparepart_stocks_query = await db_session.execute(query)
sparepart_remark = (await app_db_session.execute(
select(SparepartRemark)
)).scalars().all()
sparepart_remark_dict = {item.itemnum: item.remark for item in sparepart_remark}
for stock_record in sparepart_stocks_query: for stock_record in sparepart_stocks_query:
stock = SparepartStock( stock = SparepartStock(
sparepart_id=stock_record.itemnum, sparepart_id=stock_record.itemnum,
remark=sparepart_remark_dict.get(stock_record.itemnum),
sparepart_name=stock_record.description, sparepart_name=stock_record.description,
current_stock=stock_record.curbaltotal, current_stock=stock_record.curbaltotal,
unit_cost=stock_record.avgcost, unit_cost=stock_record.avgcost,
@ -964,186 +696,27 @@ async def load_sparepart_data_from_db(scope, prev_oh_scope, db_session, app_db_s
) )
sparepart_manager.add_sparepart_stock(stock) sparepart_manager.add_sparepart_stock(stock)
# parent_nums = [] # Load equipment sparepart requirements
# query = text("""
# WITH target_wo AS (
# -- Work orders from the given parent(s)
# SELECT
# wonum,
# xx_parent,
# location_tag AS asset_location
# FROM public.wo_maxim
# WHERE xx_parent = ANY(:parent_nums)
# ),
# target_materials AS (
# -- Materials directly linked to target WOs (new requirement data)
# SELECT
# tw.asset_location,
# wm.itemnum,
# wm.inv_avgcost
# SUM(wm.itemqty) AS total_qty_required
# FROM public.wo_maxim_material wm
# JOIN target_wo tw ON wm.wonum = tw.wonum
# WHERE wm.itemnum IS NOT NULL
# GROUP BY tw.asset_location, wm.itemnum
# ),
# -- Historical OH work orders (for lead time reference)
# oh_workorders AS (
# SELECT DISTINCT
# wonum,
# asset_location
# FROM public.wo_staging_maximo_2
# WHERE worktype = 'OH'
# AND asset_location IS NOT NULL
# AND asset_unit IN ('3', '00')
# ),
# sparepart_usage AS (
# SELECT
# oh.asset_location,
# mwm.itemnum,
# mwm.itemqty,
# mwm.wonum
# FROM oh_workorders oh
# INNER JOIN public.wo_maxim_material mwm
# ON oh.wonum = mwm.wonum
# ),
# location_sparepart_stats AS (
# SELECT
# asset_location,
# itemnum,
# COUNT(DISTINCT wonum) as total_wo_count,
# SUM(itemqty) as total_qty_used,
# AVG(itemqty) as avg_qty_per_wo
# FROM sparepart_usage
# GROUP BY asset_location, itemnum
# ),
# pr_po_combined AS (
# SELECT
# mspl.item_num,
# mspl.num,
# mspl.unit_cost,
# mspl.qty_ordered,
# MAX(CASE WHEN mspo.type = 'PR' THEN mspo.issue_date END) as issue_date,
# MAX(CASE WHEN mspo.type = 'PO' THEN mspo.vendeliverydate END) as vendeliverydate,
# MAX(CASE WHEN mspo.type = 'PO' THEN mspo.estimated_arrival_date END) as estimated_arrival_date
# FROM public.maximo_sparepart_pr_po_line mspl
# INNER JOIN public.maximo_sparepart_pr_po mspo
# ON mspl.num = mspo.num
# WHERE mspo.type IN ('PR', 'PO')
# GROUP BY mspl.item_num, mspl.num, mspl.unit_cost, mspl.qty_ordered
# ),
# leadtime_stats AS (
# SELECT
# item_num,
# ROUND(CAST(AVG(
# EXTRACT(EPOCH FROM (
# COALESCE(vendeliverydate, estimated_arrival_date) - issue_date
# )) / 86400 / 30.44
# ) AS NUMERIC), 1) as avg_leadtime_months,
# ROUND(CAST(MIN(
# EXTRACT(EPOCH FROM (
# COALESCE(vendeliverydate, estimated_arrival_date) - issue_date
# )) / 86400 / 30.44
# ) AS NUMERIC), 1) as min_leadtime_months,
# ROUND(CAST(MAX(
# EXTRACT(EPOCH FROM (
# COALESCE(vendeliverydate, estimated_arrival_date) - issue_date
# )) / 86400 / 30.44
# ) AS NUMERIC), 1) as max_leadtime_months,
# COUNT(*) as leadtime_sample_size,
# COUNT(CASE WHEN vendeliverydate IS NOT NULL THEN 1 END) as vendelivery_count,
# COUNT(CASE WHEN vendeliverydate IS NULL AND estimated_arrival_date IS NOT NULL THEN 1 END) as estimated_only_count
# FROM pr_po_combined
# WHERE issue_date IS NOT NULL
# AND COALESCE(vendeliverydate, estimated_arrival_date) IS NOT NULL
# AND COALESCE(vendeliverydate, estimated_arrival_date) > issue_date
# GROUP BY item_num
# ),
# cost_stats AS (
# SELECT
# item_num,
# ROUND(CAST(AVG(unit_cost) AS NUMERIC), 2) as avg_unit_cost,
# ROUND(CAST(MIN(unit_cost) AS NUMERIC), 2) as min_unit_cost,
# ROUND(CAST(MAX(unit_cost) AS NUMERIC), 2) as max_unit_cost,
# COUNT(*) as cost_sample_size,
# ROUND(CAST(AVG(unit_cost * qty_ordered) AS NUMERIC), 2) as avg_order_value,
# ROUND(CAST(SUM(unit_cost * qty_ordered) AS NUMERIC), 2) as total_value_ordered
# FROM pr_po_combined
# WHERE unit_cost IS NOT NULL AND unit_cost > 0
# GROUP BY item_num
# ),
# item_descriptions AS (
# SELECT DISTINCT
# item_num,
# FIRST_VALUE(description) OVER (
# PARTITION BY item_num
# ORDER BY created_at DESC NULLS LAST
# ) as description
# FROM public.maximo_sparepart_pr_po_line
# WHERE description IS NOT NULL
# )
# SELECT
# tr.asset_location,
# tr.itemnum,
# COALESCE(id.description, 'No description available') as item_description,
# tr.total_qty_required AS total_required_for_oh,
# tr.inv_avgcost,
# COALESCE(lt.avg_leadtime_months, 0) as avg_leadtime_months,
# COALESCE(cs.avg_unit_cost, 0) as avg_unit_cost,
# ROUND(CAST(COALESCE(tr.total_qty_required * cs.avg_unit_cost, 0) AS NUMERIC), 2) as estimated_cost_for_oh
# FROM target_materials tr
# LEFT JOIN item_descriptions id ON tr.itemnum = id.item_num
# LEFT JOIN leadtime_stats lt ON tr.itemnum = lt.item_num
# LEFT JOIN cost_stats cs ON tr.itemnum = cs.item_num
# ORDER BY tr.asset_location, tr.itemnum;
# """)
# equipment_requirements_query = await db_session.execute(query, {"parent_nums": parent_nums})
# equipment_requirements = defaultdict(list)
# for req_record in equipment_requirements_query:
# requirement = SparepartRequirement(
# sparepart_id=req_record.itemnum,
# quantity_required=float(req_record.total_required_for_oh or 0.0),
# lead_time=float(req_record.avg_leadtime_months or 0.0),
# sparepart_name=req_record.item_description,
# unit_cost=float(req_record.avg_unit_cost or 0.0),
# avg_cost=float(req_record.inv_avgcost or 0.0),
# )
# equipment_requirements[req_record.asset_location].append(requirement)
# for equipment_tag, requirements in equipment_requirements.items():
# sparepart_manager.add_equipment_requirements(equipment_tag, requirements)
# Load equipment sparepart requirements
# You'll need to create this table/relationship # You'll need to create this table/relationship
query = text("""WITH oh_workorders AS ( query = text("""WITH oh_workorders AS (
-- First, get all OH work orders -- First, get all OH work orders
SELECT DISTINCT SELECT DISTINCT
wonum, wonum,
asset_location asset_location
FROM public.wo_maximo ma FROM public.wo_staging_maximo_2
WHERE worktype = 'OH' AND asset_location IS NOT NULL and asset_unit IN ('3', '00') AND EXTRACT(YEAR FROM reportdate) >= 2019 WHERE worktype = 'OH' AND asset_location IS NOT NULL
), ),
current_oh as ( sparepart_usage AS (
SELECT DISTINCT wonum, asset_location, asset_unit -- Get sparepart usage for OH work orders
FROM public.wo_maximo ma SELECT
WHERE ma.xx_parent IN ('155026', '155027', '155029', '155030') oh.asset_location,
), mwm.itemnum,
sparepart_usage AS ( mwm.itemqty,
SELECT mwm.wonum
oh.asset_location, FROM oh_workorders oh
mwm.itemnum, INNER JOIN public.maximo_workorder_materials mwm
mwm.itemqty, ON oh.wonum = mwm.wonum
mwm.wonum, ),
mwm.inv_avgcost
FROM current_oh oh
INNER JOIN public.wo_maximo_material mwm
ON oh.wonum = mwm.wonum
),
location_sparepart_stats AS ( location_sparepart_stats AS (
-- Calculate average usage per sparepart per location -- Calculate average usage per sparepart per location
SELECT SELECT
@ -1228,12 +801,6 @@ item_descriptions AS (
) as description ) as description
FROM public.maximo_sparepart_pr_po_line FROM public.maximo_sparepart_pr_po_line
WHERE description IS NOT NULL WHERE description IS NOT NULL
),
item_inventory as (
SELECT
itemnum,
avgcost
FROM public.maximo_inventory
) )
SELECT SELECT
lss.asset_location, lss.asset_location,
@ -1244,7 +811,6 @@ SELECT
ROUND(CAST(lss.avg_qty_per_wo AS NUMERIC), 2) as avg_qty_per_wo, ROUND(CAST(lss.avg_qty_per_wo AS NUMERIC), 2) as avg_qty_per_wo,
lss.min_qty_used, lss.min_qty_used,
lss.max_qty_used, lss.max_qty_used,
iin.inv_avgcost,
-- Lead time metrics -- Lead time metrics
COALESCE(lt.avg_leadtime_months, 0) as avg_leadtime_months, COALESCE(lt.avg_leadtime_months, 0) as avg_leadtime_months,
COALESCE(lt.min_leadtime_months, 0) as min_leadtime_months, COALESCE(lt.min_leadtime_months, 0) as min_leadtime_months,
@ -1265,7 +831,6 @@ FROM location_sparepart_stats lss
LEFT JOIN item_descriptions id ON lss.itemnum = id.item_num LEFT JOIN item_descriptions id ON lss.itemnum = id.item_num
LEFT JOIN leadtime_stats lt ON lss.itemnum = lt.item_num LEFT JOIN leadtime_stats lt ON lss.itemnum = lt.item_num
LEFT JOIN cost_stats cs ON lss.itemnum = cs.item_num LEFT JOIN cost_stats cs ON lss.itemnum = cs.item_num
LEFT JOIN sparepart_usage iin ON lss.itemnum = iin.itemnum
ORDER BY lss.asset_location, lss.itemnum;""") ORDER BY lss.asset_location, lss.itemnum;""")
equipment_requirements_query = await db_session.execute(query) equipment_requirements_query = await db_session.execute(query)
@ -1277,9 +842,7 @@ ORDER BY lss.asset_location, lss.itemnum;""")
quantity_required=float(req_record.avg_qty_per_wo), quantity_required=float(req_record.avg_qty_per_wo),
lead_time=float(req_record.avg_leadtime_months), lead_time=float(req_record.avg_leadtime_months),
sparepart_name=req_record.item_description, sparepart_name=req_record.item_description,
unit_cost=float(req_record.avg_unit_cost), unit_cost=float(req_record.avg_unit_cost)
avg_cost=float(req_record.inv_avgcost or 0),
remark=sparepart_remark_dict.get(req_record.itemnum, "")
) )
equipment_requirements[req_record.asset_location].append(requirement) equipment_requirements[req_record.asset_location].append(requirement)
@ -1291,65 +854,54 @@ ORDER BY lss.asset_location, lss.itemnum;""")
# Load procurement records (PO/PR) # Load procurement records (PO/PR)
query = text(""" query = text("""
WITH active_pos AS ( WITH active_pos AS (
-- Get all POs that are NOT complete (not in inventory yet) and NOT closed -- Get all POs that are NOT complete (not in inventory yet) and NOT closed
SELECT SELECT
pl.item_num, pl.item_num,
h.num as po_number, h.num as po_number,
pl.qty_received, pl.qty_received,
pl.qty_ordered, pl.qty_ordered,
h.estimated_arrival_date, h.estimated_arrival_date,
h.vendeliverydate, h.vendeliverydate,
h.receipts as po_receipts, h.receipts as po_receipts,
h.status as po_status, h.status as po_status,
pl.description, pl.description,
pl.unit_cost, pl.unit_cost,
pl.line_cost pl.line_cost
FROM public.maximo_sparepart_pr_po h FROM public.maximo_sparepart_pr_po h
JOIN public.maximo_sparepart_pr_po_line pl JOIN public.maximo_sparepart_pr_po_line pl ON h.num = pl.num
ON h.num = pl.num WHERE h.type = 'PO'
WHERE h.type = 'PO' -- Exclude POs where receipts = 'COMPLETE'
-- Exclude POs where receipts = 'COMPLETE' AND (h.receipts IS NULL OR h.receipts != 'COMPLETE')
AND (h.receipts IS NULL OR h.receipts != 'COMPLETE') -- Exclude closed POs
-- Exclude closed POs AND (h.status IS NULL OR h.status != 'CLOSE')
AND (h.status IS NULL OR h.status = 'APPR') ),
), po_with_pr_date AS (
po_with_pr_date AS ( -- Join with PR to get the issue_date
-- Force join with PR to ensure every PO has a PR SELECT
SELECT po.*,
po.*, pr.issue_date as pr_issue_date
pr.issue_date as pr_issue_date FROM active_pos po
FROM active_pos po LEFT JOIN public.maximo_sparepart_pr_po pr
INNER JOIN public.maximo_sparepart_pr_po pr ON pr.num = po.po_number
ON pr.num = po.po_number AND pr.type = 'PR'
AND pr.type = 'PR' )
),
item_inventory AS (
SELECT SELECT
itemnum, po.item_num,
MAX(inv_curbaltotal) AS current_balance_total, po.description,
AVG(inv_avgcost) AS avg_cost po.line_cost,
FROM public.wo_maximo_material po.unit_cost,
WHERE inv_itemnum IS NOT NULL COALESCE(i.curbaltotal, 0) as current_balance_total,
GROUP BY itemnum po.po_number,
) po.pr_issue_date,
SELECT po.po_status,
po.item_num, po.po_receipts,
po.description, COALESCE(po.qty_received, 0) as po_qty_received,
po.line_cost, COALESCE(po.qty_ordered, 0) as po_qty_ordered,
po.unit_cost, po.estimated_arrival_date as po_estimated_arrival_date,
COALESCE(i.current_balance_total, 0) as current_balance_total, po.vendeliverydate as po_vendor_delivery_date
po.po_number, FROM po_with_pr_date po
po.pr_issue_date, LEFT JOIN public.maximo_inventory i ON po.item_num = i.itemnum
po.po_status, ORDER BY po.item_num, po.pr_issue_date DESC;
po.po_receipts,
COALESCE(po.qty_received, 0) as po_qty_received,
COALESCE(po.qty_ordered, 0) as po_qty_ordered,
po.estimated_arrival_date as po_estimated_arrival_date,
po.vendeliverydate as po_vendor_delivery_date
FROM po_with_pr_date po
LEFT JOIN item_inventory i
ON po.item_num = i.itemnum
ORDER BY po.item_num, po.pr_issue_date DESC;
""") """)
# Execute the query # Execute the query
@ -1397,29 +949,4 @@ ORDER BY po.item_num, po.pr_issue_date DESC;
async def create_remark(*, db_session, collector_db_session, remark_in):
# Step 1: Check if remark already exists for this itemnum
result = await db_session.execute(
select(SparepartRemark).where(SparepartRemark.itemnum == remark_in.itemnum)
)
existing_remark = result.scalar_one_or_none()
# Step 2: If it already exists, you can decide what to do
if existing_remark:
# Option B: Update existing remark (if needed)
existing_remark.remark = remark_in.remark
await db_session.commit()
await db_session.refresh(existing_remark)
return existing_remark
# Step 3: If it doesnt exist, create new one
new_remark = SparepartRemark(
itemnum=remark_in.itemnum,
remark=remark_in.remark,
)
db_session.add(new_remark)
await db_session.commit()
await db_session.refresh(new_remark)
return new_remark

@ -37,9 +37,8 @@ class ScopeEquipmentRead(ScopeEquipmentBase):
master_equipment: Optional[MasterEquipmentBase] = Field(None) master_equipment: Optional[MasterEquipmentBase] = Field(None)
class ScopeEquipmentPagination(DefultBase): class ScopeEquipmentPagination(Pagination):
items: List[ScopeEquipmentRead] = [] items: List[ScopeEquipmentRead] = []
total: int
class MasterEquipmentRead(DefultBase): class MasterEquipmentRead(DefultBase):
assetnum: Optional[str] = Field(None, title="Asset Number") assetnum: Optional[str] = Field(None, title="Asset Number")

@ -60,15 +60,8 @@ async def get_all(*, common, oh_scope: Optional[str] = None):
# ).distinct() # ).distinct()
) )
results = await common['db_session'].execute(query) results = await search_filter_sort_paginate(model=query, **common)
return results
items = results.scalars().all()
return {
"items": items,
"total": len(items)
}
async def get_by_oh_session_id(*, db_session: DbSession, oh_session_id: UUID): async def get_by_oh_session_id(*, db_session: DbSession, oh_session_id: UUID):

Loading…
Cancel
Save