Compare commits

...

100 Commits

Author SHA1 Message Date
Cizz22 3c4c534930 fix gantt chart position 3 weeks ago
Cizz22 5510885c44 fix minor 1 month ago
CIzz22 bbcc1d61fe Update 'src/auth/service.py' 2 months ago
Cizz22 3c4bf9f02a fff 2 months ago
Cizz22 bded149abc fix 2 months ago
Cizz22 2f13925aff fix 2 months ago
Cizz22 d969059f22 fix 2 months ago
Cizz22 5325679ba7 fix 2 months ago
Cizz22 8459a49e34 fix 2 months ago
Cizz22 6293b8ba48 fix 2 months ago
Cizz22 d43c4d3e4d fix 2 months ago
Cizz22 f27e61293e fix 2 months ago
Cizz22 69d3bf28b3 minor fix 2 months ago
Cizz22 f78553e819 fix 2 months ago
Cizz22 af08469f7e fix 2 months ago
Cizz22 72eb207956 fix 3 months ago
Cizz22 e4e3342b03 fix 3 months ago
Cizz22 c1a0b25b31 fix 3 months ago
Cizz22 521910cae5 add remark to sparepart get data 3 months ago
Cizz22 2ad33fcbe4 add remark 3 months ago
Cizz22 5fba3682dc fixxx 3 months ago
Cizz22 c62ed8f2b9 fix 3 months ago
Cizz22 1a5d3c2ec8 fix 3 months ago
Cizz22 8c7b676d86 fix 3 months ago
Cizz22 0602fb73bf fixx 3 months ago
Cizz22 1a1c7a2c0b fix 3 months ago
Cizz22 a222a39230 fix 3 months ago
Cizz22 27a0e7e864 fff 3 months ago
Cizz22 5554ed11b7 gic 3 months ago
Cizz22 fd97475ca5 fix 3 months ago
Cizz22 7a9d5aacab fix query 3 months ago
Cizz22 110d7cc7ee fix 3 months ago
Cizz22 d52b43fcf3 fix 3 months ago
Cizz22 59458283f0 fix 3 months ago
Cizz22 4fd5237fce fix 3 months ago
Cizz22 7c6ce3d3b0 fix 3 months ago
Cizz22 78dfbd3227 fix 3 months ago
Cizz22 95196776a7 fix 3 months ago
Cizz22 a5627d5a6e fix 3 months ago
Cizz22 48dd7b5963 fix 3 months ago
Cizz22 bf6a2a4e27 fix 3 months ago
Cizz22 4d0251e429 fix 3 months ago
Cizz22 506c6b7978 fix 3 months ago
Cizz22 f475c7ae58 fix 3 months ago
Cizz22 42c3ef22af fix 3 months ago
Cizz22 ca5b274c52 sim 3 months ago
Cizz22 67abe04ab1 fix 3 months ago
Cizz22 07c3491573 fix 3 months ago
Cizz22 e9bddd82d8 f 3 months ago
Cizz22 a8d6cebb10 fix 3 months ago
Cizz22 303c03592e fix 3 months ago
Cizz22 b5639d14df fix 3 months ago
Cizz22 d1ded0730b fix token 3 months ago
Cizz22 d71f3212d5 fix 3 months ago
Cizz22 2204bac92b add statistic 3 months ago
Cizz22 1252a153cc fix 3 months ago
Cizz22 b4d02daf3e fix 3 months ago
Cizz22 8822c24d56 fix 3 months ago
Cizz22 7bbec54477 change k for exp risk cost 3 months ago
Cizz22 b5a601ebbe fix minor config 3 months ago
Cizz22 e7ab60c0aa fix 3 months ago
Cizz22 22bc48eef2 fix 3 months ago
Cizz22 1409f486ad fix 3 months ago
Cizz22 bed6367688 fix 3 months ago
Cizz22 13c2873459 gzip 3 months ago
Cizz22 b39e51af8c fix 3 months ago
Cizz22 701e6dcddd fix 3 months ago
Cizz22 078a03caa0 TR fix 3 months ago
Cizz22 32276ef053 fix 3 months ago
Cizz22 50a22fd83a fix 3 months ago
Cizz22 82e4a466f0 fix spareparts 3 months ago
Cizz22 681ab03438 fix 3 months ago
Cizz22 c499fcd7d4 fix 3 months ago
Cizz22 648794d841 fix 3 months ago
Cizz22 1702893189 fix 3 months ago
Cizz22 43a3aba7bf fix 3 months ago
Cizz22 509c9b6a34 fox 3 months ago
Cizz22 5453556ae5 fix 3 months ago
Cizz22 fc5b999a63 fix oh 3 months ago
Cizz22 51dd9cbbae fix 3 months ago
Cizz22 49fbda2340 fix 3 months ago
Cizz22 e1592de91f fix 3 months ago
Cizz22 bb1b875f09 fix 3 months ago
Cizz22 77496b247a ffix 3 months ago
Cizz22 80e1dc87b3 fi 3 months ago
Cizz22 baa5f1e264 a 3 months ago
Cizz22 568c824b34 fix 3 months ago
Cizz22 433fe2f4b5 fix 3 months ago
Cizz22 509e8d48c2 fix 3 months ago
Cizz22 0cde56d64a fix 3 months ago
Cizz22 e7581a8983 fix 3 months ago
Cizz22 d651441a8a fix 3 months ago
Cizz22 ecce361805 fix 3 months ago
Cizz22 f1dce9210b minor config 3 months ago
Cizz22 84254862f2 fantt 3 months ago
Cizz22 c2f8fb9f22 fix 3 months ago
Cizz22 74b084b417 fix 3 months ago
Cizz22 05992bd4e2 fix 3 months ago
Cizz22 1aa9e1e9aa add rqeuirement sparepart for oh 3 months ago
Cizz22 b7c2dc6ed2 fix schedule api 3 months ago

20
.env

@ -9,15 +9,15 @@ DATABASE_CREDENTIAL_USER=postgres
DATABASE_CREDENTIAL_PASSWORD=postgres
DATABASE_NAME=digital_twin
# COLLECTOR_HOSTNAME=192.168.1.82
# COLLECTOR_PORT=1111
# COLLECTOR_CREDENTIAL_USER=digital_twin
# COLLECTOR_CREDENTIAL_PASSWORD=Pr0jec7@D!g!tTwiN
# COLLECTOR_NAME=digital_twin
COLLECTOR_HOSTNAME=192.168.1.82
COLLECTOR_PORT=1111
COLLECTOR_CREDENTIAL_USER=digital_twin
COLLECTOR_CREDENTIAL_PASSWORD=Pr0jec7@D!g!tTwiN
COLLECTOR_NAME=digital_twin
COLLECTOR_HOSTNAME=192.168.1.86
COLLECTOR_PORT=5432
COLLECTOR_CREDENTIAL_USER=postgres
COLLECTOR_CREDENTIAL_PASSWORD=postgres
COLLECTOR_NAME=digital_twin
# COLLECTOR_HOSTNAME=192.168.1.86
# COLLECTOR_PORT=5432
# COLLECTOR_CREDENTIAL_USER=postgres
# COLLECTOR_CREDENTIAL_PASSWORD=postgres
# COLLECTOR_NAME=digital_twin

10
Jenkinsfile vendored

@ -61,6 +61,16 @@ pipeline {
"""
}
}
// stage('Watchtower Deployment') {
// steps {
// sh """
// # Push both tags
// docker push ${DOCKER_HUB_USERNAME}/${IMAGE_NAME}:${GIT_COMMIT_HASH}
// docker push ${DOCKER_HUB_USERNAME}/${IMAGE_NAME}:latest
// """
// }
// }
// stage('Deploy') {
// steps {

113
poetry.lock generated

@ -1,4 +1,16 @@
# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand.
# This file is automatically @generated by Poetry 2.1.4 and should not be changed by hand.
[[package]]
name = "absl-py"
version = "2.3.1"
description = "Abseil Python Common Libraries, see https://github.com/abseil/abseil-py."
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "absl_py-2.3.1-py3-none-any.whl", hash = "sha256:eeecf07f0c2a93ace0772c92e596ace6d3d3996c042b2128459aaae2a76de11d"},
{file = "absl_py-2.3.1.tar.gz", hash = "sha256:a97820526f7fbfd2ec1bce83f3f25e3a14840dac0d8e02a0b71cd75db3f77fc9"},
]
[[package]]
name = "aiohappyeyeballs"
@ -686,8 +698,8 @@ files = [
google-auth = ">=2.14.1,<3.0.0"
googleapis-common-protos = ">=1.56.2,<2.0.0"
proto-plus = [
{version = ">=1.22.3,<2.0.0"},
{version = ">=1.25.0,<2.0.0", markers = "python_version >= \"3.13\""},
{version = ">=1.22.3,<2.0.0", markers = "python_version < \"3.13\""},
]
protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<7.0.0"
requests = ">=2.18.0,<3.0.0"
@ -1030,6 +1042,18 @@ files = [
[package.extras]
all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"]
[[package]]
name = "immutabledict"
version = "4.2.1"
description = "Immutable wrapper around dictionaries (a fork of frozendict)"
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "immutabledict-4.2.1-py3-none-any.whl", hash = "sha256:c56a26ced38c236f79e74af3ccce53772827cef5c3bce7cab33ff2060f756373"},
{file = "immutabledict-4.2.1.tar.gz", hash = "sha256:d91017248981c72eb66c8ff9834e99c2f53562346f23e7f51e7a5ebcf66a3bcc"},
]
[[package]]
name = "importlib-resources"
version = "6.4.5"
@ -1420,6 +1444,63 @@ rsa = ["cryptography (>=3.0.0)"]
signals = ["blinker (>=1.4.0)"]
signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"]
[[package]]
name = "ortools"
version = "9.14.6206"
description = "Google OR-Tools python libraries and modules"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "ortools-9.14.6206-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:6e2364edd1577cd094e7c7121ec5fb0aa462a69a78ce29cdc40fa45943ff0091"},
{file = "ortools-9.14.6206-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:164b726b4d358ae68a018a52ff1999c0646d6f861b33676c2c83e2ddb60cfa13"},
{file = "ortools-9.14.6206-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ebb0e210969cc3246fe78dadf9038936a3a18edc8156e23a394e2bbcec962431"},
{file = "ortools-9.14.6206-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:174de2f04c106c7dcc5989560f2c0e065e78fba0ad0d1fd029897582f4823c3a"},
{file = "ortools-9.14.6206-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e6d994ebcf9cbdda1e20a75662967124e7e6ffd707c7f60b2db1a11f2104d384"},
{file = "ortools-9.14.6206-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5763472f8b05072c96c36c4eafadd9f6ffcdab38a81d8f0142fc408ad52a4342"},
{file = "ortools-9.14.6206-cp310-cp310-win_amd64.whl", hash = "sha256:6711516f837f06836ff9fda66fe4337b88c214f2ba6a921b84d3b05876f1fa8c"},
{file = "ortools-9.14.6206-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:8bcd8481846090585a4fac82800683555841685c49fa24578ad1e48a37918568"},
{file = "ortools-9.14.6206-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5af2bbf2fff7d922ba036e27d7ff378abecb24749380c86a77fa6208d5ba35cd"},
{file = "ortools-9.14.6206-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a6ab43490583c4bbf0fff4e51bb1c15675d5651c2e8e12ba974fd08e8c05a48f"},
{file = "ortools-9.14.6206-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9aa2c0c50a765c6a060960dcb0207bd6aeb6341f5adacb3d33e613b7e7409428"},
{file = "ortools-9.14.6206-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:64ec63fd92125499e9ca6b72700406dda161eefdfef92f04c35c5150391f89a4"},
{file = "ortools-9.14.6206-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:8651008f05257471f45a919ade5027afa12ab6f7a4fdf0a8bcc18c92032f8571"},
{file = "ortools-9.14.6206-cp311-cp311-win_amd64.whl", hash = "sha256:ca60877830a631545234e83e7f6bd55830334a4d0c2b51f1669b1f2698d58b84"},
{file = "ortools-9.14.6206-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:e38c8c4a184820cbfdb812a8d484f6506cf16993ce2a95c88bc1c9d23b17c63e"},
{file = "ortools-9.14.6206-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:db685073cbed9f8bfaa744f5e883f3dea57c93179b0abe1788276fd3b074fa61"},
{file = "ortools-9.14.6206-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d4bfb8bffb29991834cf4bde7048ca8ee8caed73e8dd21e5ec7de99a33bbfea0"},
{file = "ortools-9.14.6206-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:eb464a698837e7f90ca5f9b3d748b6ddf553198a70032bc77824d1cd88695d2b"},
{file = "ortools-9.14.6206-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8f33deaeb7c3dda8ca1d29c5b9aa9c3a4f2ca9ecf34f12a1f809bb2995f41274"},
{file = "ortools-9.14.6206-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:086e7c2dc4f23efffb20a5e20f618c7d6adb99b2d94f684cab482387da3bc434"},
{file = "ortools-9.14.6206-cp312-cp312-win_amd64.whl", hash = "sha256:17c13b0bfde17ac57789ad35243edf1318ecd5db23cf949b75ab62480599f188"},
{file = "ortools-9.14.6206-cp313-cp313-macosx_10_15_x86_64.whl", hash = "sha256:8d0df7eef8ba53ad235e29018389259bad2e667d9594b9c2a412ed6a5756bd4e"},
{file = "ortools-9.14.6206-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:57dfe10844ce8331634d4723040fe249263fd490407346efc314c0bc656849b5"},
{file = "ortools-9.14.6206-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5c0c2c00a6e5d5c462e76fdda7dbd40d0f9139f1df4211d34b36906696248020"},
{file = "ortools-9.14.6206-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:38044cf39952d93cbcc02f6acdbe0a9bd3628fbf17f0d7eb0374060fa028c22e"},
{file = "ortools-9.14.6206-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:98564de773d709e1e49cb3c32f6917589c314f047786d88bd5f324c0eb7be96e"},
{file = "ortools-9.14.6206-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:80528b0ac72dc3de00cbeef2ce028517a476450b5877b1cda1b8ecb9fa98505e"},
{file = "ortools-9.14.6206-cp313-cp313-win_amd64.whl", hash = "sha256:47b1b15dcb085d32c61621b790259193aefa9e4577abadf233d47fbe7d0b81ef"},
{file = "ortools-9.14.6206-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d26a0f9ed97ef9d3384a9069923585f5f974c3fde555a41f4d6381fbe7840bc4"},
{file = "ortools-9.14.6206-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d40d8141667d47405f296a9f687058c566d7816586e9a672b59e9fcec8493133"},
{file = "ortools-9.14.6206-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:aefea81ed81aa937873efc520381785ed65380e52917f492ab566f46bbb5660d"},
{file = "ortools-9.14.6206-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f044bb277db3ab6a1b958728fe1cf14ca87c3800d67d7b321d876b48269340f6"},
{file = "ortools-9.14.6206-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:070dc7cebfa0df066acb6b9a6d02339351be8f91b2352b782ee7f40412207e20"},
{file = "ortools-9.14.6206-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5eb558a03b4ada501ecdea7b89f0d3bdf2cc6752e1728759ccf27923f592a8c2"},
{file = "ortools-9.14.6206-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:646329fa74a5c48c591b7fabfd26743f6d2de4e632b3b96ec596c47bfe19177a"},
{file = "ortools-9.14.6206-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aa5161924f35b8244295acd0fab2a8171bb08ef8d5cfaf1913a21274475704cc"},
{file = "ortools-9.14.6206-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e253526a026ae194aed544a0d065163f52a0c9cb606a1061c62df546877d5452"},
{file = "ortools-9.14.6206-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:dcb496ef633d884036770783f43bf8a47ff253ecdd8a8f5b95f00276ec241bfd"},
{file = "ortools-9.14.6206-cp39-cp39-win_amd64.whl", hash = "sha256:2733f635675de631fdc7b1611878ec9ee2f48a26434b7b3c07d0a0f535b92e03"},
]
[package.dependencies]
absl-py = ">=2.0.0"
immutabledict = ">=3.0.0"
numpy = ">=1.13.3"
pandas = ">=2.0.0"
protobuf = ">=6.31.1,<6.32"
typing-extensions = ">=4.12"
[[package]]
name = "packaging"
version = "24.2"
@ -1486,8 +1567,8 @@ files = [
[package.dependencies]
numpy = [
{version = ">=1.23.2", markers = "python_version == \"3.11\""},
{version = ">=1.26.0", markers = "python_version >= \"3.12\""},
{version = ">=1.23.2", markers = "python_version == \"3.11\""},
]
python-dateutil = ">=2.8.2"
pytz = ">=2020.1"
@ -1662,23 +1743,21 @@ testing = ["google-api-core (>=1.31.5)"]
[[package]]
name = "protobuf"
version = "5.29.0"
version = "6.31.1"
description = ""
optional = false
python-versions = ">=3.8"
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "protobuf-5.29.0-cp310-abi3-win32.whl", hash = "sha256:ea7fb379b257911c8c020688d455e8f74efd2f734b72dc1ea4b4d7e9fd1326f2"},
{file = "protobuf-5.29.0-cp310-abi3-win_amd64.whl", hash = "sha256:34a90cf30c908f47f40ebea7811f743d360e202b6f10d40c02529ebd84afc069"},
{file = "protobuf-5.29.0-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:c931c61d0cc143a2e756b1e7f8197a508de5365efd40f83c907a9febf36e6b43"},
{file = "protobuf-5.29.0-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:85286a47caf63b34fa92fdc1fd98b649a8895db595cfa746c5286eeae890a0b1"},
{file = "protobuf-5.29.0-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:0d10091d6d03537c3f902279fcf11e95372bdd36a79556311da0487455791b20"},
{file = "protobuf-5.29.0-cp38-cp38-win32.whl", hash = "sha256:0cd67a1e5c2d88930aa767f702773b2d054e29957432d7c6a18f8be02a07719a"},
{file = "protobuf-5.29.0-cp38-cp38-win_amd64.whl", hash = "sha256:e467f81fdd12ded9655cea3e9b83dc319d93b394ce810b556fb0f421d8613e86"},
{file = "protobuf-5.29.0-cp39-cp39-win32.whl", hash = "sha256:17d128eebbd5d8aee80300aed7a43a48a25170af3337f6f1333d1fac2c6839ac"},
{file = "protobuf-5.29.0-cp39-cp39-win_amd64.whl", hash = "sha256:6c3009e22717c6cc9e6594bb11ef9f15f669b19957ad4087214d69e08a213368"},
{file = "protobuf-5.29.0-py3-none-any.whl", hash = "sha256:88c4af76a73183e21061881360240c0cdd3c39d263b4e8fb570aaf83348d608f"},
{file = "protobuf-5.29.0.tar.gz", hash = "sha256:445a0c02483869ed8513a585d80020d012c6dc60075f96fa0563a724987b1001"},
{file = "protobuf-6.31.1-cp310-abi3-win32.whl", hash = "sha256:7fa17d5a29c2e04b7d90e5e32388b8bfd0e7107cd8e616feef7ed3fa6bdab5c9"},
{file = "protobuf-6.31.1-cp310-abi3-win_amd64.whl", hash = "sha256:426f59d2964864a1a366254fa703b8632dcec0790d8862d30034d8245e1cd447"},
{file = "protobuf-6.31.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:6f1227473dc43d44ed644425268eb7c2e488ae245d51c6866d19fe158e207402"},
{file = "protobuf-6.31.1-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:a40fc12b84c154884d7d4c4ebd675d5b3b5283e155f324049ae396b95ddebc39"},
{file = "protobuf-6.31.1-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:4ee898bf66f7a8b0bd21bce523814e6fbd8c6add948045ce958b73af7e8878c6"},
{file = "protobuf-6.31.1-cp39-cp39-win32.whl", hash = "sha256:0414e3aa5a5f3ff423828e1e6a6e907d6c65c1d5b7e6e975793d5590bdeecc16"},
{file = "protobuf-6.31.1-cp39-cp39-win_amd64.whl", hash = "sha256:8764cf4587791e7564051b35524b72844f845ad0bb011704c3736cce762d8fe9"},
{file = "protobuf-6.31.1-py3-none-any.whl", hash = "sha256:720a6c7e6b77288b85063569baae8536671b39f15cc22037ec7045658d80489e"},
{file = "protobuf-6.31.1.tar.gz", hash = "sha256:d8cac4c982f0b957a4dc73a80e2ea24fab08e679c0de9deb835f4a12d69aca9a"},
]
[[package]]
@ -2929,4 +3008,4 @@ propcache = ">=0.2.1"
[metadata]
lock-version = "2.1"
python-versions = "^3.11"
content-hash = "696ea33fe5b5efd49565f0b6216a2f40a85d62d34b27693ac6271b676f94897d"
content-hash = "6c2a5a5a8e6a2732bd9e94de4bac3a7c0d3e63d959d5793b23eb327c7a95f3f8"

@ -30,6 +30,7 @@ google-api-python-client = "^2.169.0"
google-auth-httplib2 = "^0.2.0"
google-auth-oauthlib = "^1.2.2"
aiohttp = "^3.12.14"
ortools = "^9.14.6206"
[build-system]

@ -10,7 +10,7 @@ from src.calculation_budget_constrains.router import \
from src.calculation_target_reliability.router import \
router as calculation_target_reliability
from src.calculation_time_constrains.router import \
router as calculation_time_constrains_router
router as calculation_time_constrains_router, get_calculation
# from src.job.router import router as job_router
from src.overhaul.router import router as overhaul_router
@ -32,7 +32,7 @@ from src.equipment_sparepart.router import router as equipment_sparepart_router
# from src.overhaul.router import router as overhaul_router
# from src.overhaul_history.router import router as overhaul_history_router
# from src.overhaul_activity.router import router as scope_equipment_activity_router
# # from src.overhaul_schedule.router import router as ovehaul_schedule_router
from src.overhaul_scope.router import router as ovehaul_schedule_router
# from src.scope_equipment_part.router import router as scope_equipment_part_router
# from src.calculation_target_reliability.router import router as calculation_target_reliability
#
@ -142,9 +142,9 @@ authenticated_api_router.include_router(
# scope_equipment_part_router, prefix="/equipment-parts", tags=["scope_equipment_parts"]
# )
# authenticated_api_router.include_router(
# ovehaul_schedule_router, prefix="/overhaul-schedules", tags=["overhaul_schedules"]
# )
authenticated_api_router.include_router(
ovehaul_schedule_router, prefix="/overhaul-schedules", tags=["overhaul_schedules"]
)
# calculation
calculation_router = APIRouter(prefix="/calculation", tags=["calculations"])
@ -172,4 +172,10 @@ calculation_router.include_router(
authenticated_api_router.include_router(calculation_router)
api_router.include_router(
get_calculation,
prefix="/calculation/time-constraint",
tags=["calculation", "time_constraint"],
)
api_router.include_router(authenticated_api_router)

@ -1,5 +1,6 @@
# app/auth/auth_bearer.py
import json
from typing import Annotated, Optional
import requests
@ -70,15 +71,72 @@ class JWTBearer(HTTPBearer):
async def get_current_user(request: Request) -> UserBase:
return request.state.user
async def get_token(request: Request):
token = request.headers.get("Authorization")
if token:
return token.split(" ")[1]
return token.replace("Bearer ", "") # Menghapus prefix "Bearer "
else:
return request.cookies.get("access_token") # Fallback ke cookie
return "" # Mengembalikan token atau None jika tidak ada
async def internal_key(request: Request):
token = request.headers.get("Authorization")
if not token:
api_key = request.headers.get("X-Internal-Key")
if api_key != config.API_KEY:
raise HTTPException(
status_code=403, detail="Invalid Key."
)
try:
headers = {
'Content-Type': 'application/json'
}
response = requests.post(
f"{config.AUTH_SERVICE_API}/sign-in",
headers=headers,
data=json.dumps({
"username": "ohuser",
"password": "123456789"
})
)
if not response.ok:
print(str(response.json()))
raise Exception("error auth")
user_data = response.json()
return user_data['data']['access_token']
except Exception as e:
raise Exception(str(e))
else:
try:
response = requests.get(
f"{config.AUTH_SERVICE_API}/verify-token",
headers={"Authorization": f"{token}"},
)
if not response.ok:
raise HTTPException(
status_code=403, detail="Invalid token."
)
return token.split(" ")[1]
except Exception as e:
print(f"Token verification error: {str(e)}")
return False, str(e)
return ""
CurrentUser = Annotated[UserBase, Depends(get_current_user)]
Token = Annotated[str, Depends(get_token)]
InternalKey = Annotated[str, Depends(internal_key)]

@ -5,6 +5,7 @@ from fastapi.params import Query
from src.auth.service import Token
from src.calculation_target_reliability.service import get_simulation_results
from src.config import TC_RBD_ID
from src.database.core import CollectorDbSession, DbSession
from src.models import StandardResponse
@ -24,7 +25,7 @@ async def get_target_reliability(
"""Get all scope pagination."""
results = await get_simulation_results(
simulation_id = "default",
simulation_id = TC_RBD_ID,
token=token
)

@ -84,7 +84,7 @@ async def get_all_budget_constrains(
for item in result:
cost = item["total_cost"] or 1.0
efficiency = item["contribution_norm"] / cost
item["priority_score"] = 0.7 * item["contribution_norm"] + 0.3 * efficiency
item["priority_score"] = item["contribution_norm"]
# Choose method
if use_optimal:
@ -104,7 +104,7 @@ def calculate_asset_eaf_contributions(plant_result, eq_results):
for asset in eq_results:
node_name = asset.get("aeros_node", {}).get("node_name")
if node_name:
results[node_name] = asset.get("contribution", 0.0)
results[node_name] = asset.get("contribution_factor", 0.0)
return results
@ -150,7 +150,7 @@ def knapsack_selection(equipments: List[dict], budget: float, scale: int = 10_00
for i in range(n):
cost, value = costs[i], values[i]
for w in range(W, cost - 1, -1):
if dp[w - cost] + value > dp[w]:
if dp[w - cost] + value >= dp[w]: # <= FIXED HERE
dp[w] = dp[w - cost] + value
keep[i][w] = True
@ -164,5 +164,15 @@ def knapsack_selection(equipments: List[dict], budget: float, scale: int = 10_00
else:
excluded.append(equipments[i])
# Optional: fill leftover budget with zero-priority items
remaining_budget = budget - sum(eq["total_cost"] for eq in selected)
if remaining_budget > 0:
for eq in excluded[:]:
if eq["total_cost"] <= remaining_budget:
selected.append(eq)
excluded.remove(eq)
remaining_budget -= eq["total_cost"]
return selected, excluded

@ -1,8 +1,11 @@
import asyncio
from typing import Dict, List, Optional
from temporalio.client import Client
from fastapi import APIRouter, HTTPException, status
from fastapi.params import Query
from src.calculation_target_reliability.utils import wait_for_workflow
from src.config import TEMPORAL_URL, TR_RBD_ID
from src.database.core import DbSession, CollectorDbSession
from src.auth.service import Token
from src.models import StandardResponse
@ -36,8 +39,9 @@ async def get_target_reliability(
collector_db: CollectorDbSession,
oh_session_id: Optional[str] = Query(None),
eaf_input: float = Query(99.8),
duration: int = Query(8760),
simulation_id: Optional[str] = Query(None)
duration: int = Query(17520),
simulation_id: Optional[str] = Query(None),
cut_hours = Query(0)
):
"""Get all scope pagination."""
if not oh_session_id:
@ -52,27 +56,43 @@ async def get_target_reliability(
# eaf_input=eaf_input,
# oh_duration=duration
# )
# simulation_id = await run_rbd_simulation(
# sim_hours=duration,
# token=token
# )
if not simulation_id:
simulation_id = "default"
if duration != 17520:
if not simulation_id:
raise HTTPException(
status_code=status.HTTP_425_TOO_EARLY, # or 409 Conflict
detail="Simulation still running. Please wait.",
)
else:
temporal_client = await Client.connect(TEMPORAL_URL)
handle = temporal_client.get_workflow_handle(f"simulation-{simulation_id}")
desc = await handle.describe()
status_name = desc.status.name
if status_name in ["RUNNING", "CONTINUED_AS_NEW"]:
raise HTTPException(
status_code=status.HTTP_425_TOO_EARLY, # or 409 Conflict
detail="Simulation still running. Please wait.",
)
else:
simulation_id = TR_RBD_ID
results = await get_simulation_results(
simulation_id=simulation_id,
token=token
)
optimize_result = await identify_worst_eaf_contributors(
simulation_result=results,
target_eaf=eaf_input,
db_session=db_session,
oh_session_id=oh_session_id,
collector_db=collector_db,
simulation_id=simulation_id
simulation_id=simulation_id,
duration=duration,
po_duration=1200,
cut_hours=float(cut_hours)
)

@ -39,6 +39,8 @@ class AssetWeight(OverhaulBase):
num_of_failures: int
down_time: float
efficiency: float
improvement_impact:float
birbaum: float
class MaintenanceScenario(OverhaulBase):
location_tag: str
@ -52,10 +54,15 @@ class MaintenanceScenario(OverhaulBase):
class OptimizationResult(OverhaulBase):
current_plant_eaf: float
target_plant_eaf: float
possible_plant_eaf:float
eaf_gap: float
eaf_improvement_text:str
recommended_reduced_outage:Optional[float] = 0
warning_message:Optional[str]
asset_contributions: List[dict]
optimization_success: bool = False
simulation_id: Optional[str] = None
# {

@ -1,8 +1,10 @@
import math
from typing import Optional, List
from dataclasses import dataclass
from sqlalchemy import Delete, Select
import httpx
from src.auth.service import CurrentUser
from src.config import RBD_SERVICE_API
from src.contribution_util import calculate_contribution, calculate_contribution_accurate
from src.database.core import DbSession, CollectorDbSession
from datetime import datetime, timedelta
@ -16,18 +18,20 @@ from .schema import AssetWeight,MaintenanceScenario,OptimizationResult
from src.overhaul_activity.service import get_standard_scope_by_session_id
RBD_SERVICE_API = "http://192.168.1.82:8000/rbd"
client = httpx.AsyncClient(timeout=300.0)
async def run_rbd_simulation(*, sim_hours: int, token):
sim_data = {
"SimulationName": "Simulation OH Reliability Target",
"SchematicName": "- TJB - Unit 3 -",
"SimSeed": 1,
"SimDuration": sim_hours,
"DurationUnit": "UHour",
"SimulationName": f"Simulasi TR OH {sim_hours}",
"SchematicName": "- TJB - Unit 3 -",
"SimSeed": 1,
"SimDuration": sim_hours,
"OverhaulInterval": sim_hours - 1201,
"DurationUnit": "UHour",
"SimNumRun": 1,
"IsDefault": False,
"OverhaulDuration": 1200
}
headers = {
@ -74,77 +78,111 @@ async def get_simulation_results(*, simulation_id: str, token: str):
"plant_result": plant_data
}
def calculate_asset_eaf_contributions(plant_result, eq_results, standard_scope, eaf_gap):
def calculate_asset_eaf_contributions(plant_result, eq_results, standard_scope, eaf_gap, scheduled_outage):
"""
Calculate each asset's contribution to plant EAF with realistic improvement potential.
Ranking:
1. Highest contribution (Birnbaum Importance)
2. Tie-breaker: Contribution per downtime (efficiency)
Calculate each asset's contribution to plant EAF with realistic, fair improvement allocation.
The total EAF gap is distributed among assets proportionally to their contribution potential.
Automatically skips equipment with no unplanned downtime (only scheduled outages).
"""
eaf_gap_fraction = eaf_gap / 100.0 if eaf_gap > 1.0 else eaf_gap
MIN_BIRNBAUM_IMPORTANCE = 0.0005
REALISTIC_MAX_AVAILABILITY = 0.995 # 99.5%
MIN_IMPROVEMENT_PERCENT = 0.005 # 0.5%
total_hours = plant_result.get("total_uptime") + plant_result.get("total_downtime")
plant_operating_fraction = (total_hours - scheduled_outage) / total_hours
REALISTIC_MAX_TECHNICAL = 0.995
REALISTIC_MAX_AVAILABILITY = REALISTIC_MAX_TECHNICAL * plant_operating_fraction
MIN_IMPROVEMENT_PERCENT = 0.0001
min_improvement_fraction = MIN_IMPROVEMENT_PERCENT / 100.0
EPSILON = 0.001 # 1 ms or a fraction of an hour for comparison tolerance
results = []
weighted_assets = []
# Step 1: Collect eligible assets and their weights
for asset in eq_results:
asset_name = asset.get("aeros_node").get("node_name")
node = asset.get("aeros_node")
if not node:
continue
asset_name = node.get("node_name")
num_of_events = asset.get("num_events", 0)
if asset_name not in standard_scope:
continue
birnbaum = asset.get("contribution", 0.0)
contribution_factor = asset.get("contribution_factor", 0.0)
birbaum = asset.get("contribution", 0.0)
current_availability = asset.get("availability", 0.0)
downtime = asset.get("total_downtime", 0.0)
# Filter 1: Importance too low
if birnbaum < MIN_BIRNBAUM_IMPORTANCE:
# --- NEW: Skip equipment with no failures and near-maximum availability ---
if (
num_of_events < 2 # no unplanned events
or contribution_factor <= 0
):
# This equipment has nothing to improve realistically
continue
# Max possible availability improvement
max_possible_improvement = REALISTIC_MAX_AVAILABILITY - current_availability
if max_possible_improvement <= 0:
continue
# --- Compute realistic possible improvement ---
if REALISTIC_MAX_AVAILABILITY > current_availability:
max_possible_improvement = REALISTIC_MAX_AVAILABILITY - current_availability
else:
max_possible_improvement = 0.0 # No improvement possible
# Required improvement (limited by plant gap and availability ceiling)
required_impr = min(eaf_gap_fraction, max_possible_improvement) * birnbaum
# Filter 2: Improvement too small
if required_impr < min_improvement_fraction:
continue
# Contribution efficiency (secondary metric)
efficiency = birnbaum / downtime if downtime > 0 else birnbaum
# Compute weighted importance (Birnbaum × FV)
raw_weight = birbaum
weight = math.sqrt(max(raw_weight, 0.0))
weighted_assets.append((asset, weight, 0))
# Step 2: Compute total weight
total_weight = sum(w for _, w, _ in weighted_assets) or 1.0
# Step 3: Distribute improvement proportionally to weight
for asset, weight, max_possible_improvement in weighted_assets:
node = asset.get("aeros_node")
contribution_factor = asset.get("contribution_factor", 0.0)
birbaum = asset.get("contribution", 0.0)
current_availability = asset.get("availability", 0.0)
downtime = asset.get("total_downtime", 0.0)
required_improvement = eaf_gap_fraction * (weight/total_weight)
required_improvement = min(required_improvement, max_possible_improvement)
required_improvement = max(required_improvement, min_improvement_fraction)
improvement_impact = required_improvement * contribution_factor
efficiency = birbaum / downtime if downtime > 0 else birbaum
contribution = AssetWeight(
node=asset.get("aeros_node"),
node=node,
availability=current_availability,
contribution=birnbaum,
required_improvement=required_impr,
contribution=contribution_factor,
required_improvement=required_improvement,
improvement_impact=improvement_impact,
num_of_failures=asset.get("num_events", 0),
down_time=downtime,
efficiency= efficiency
efficiency=efficiency,
birbaum=birbaum,
)
results.append(contribution)
# Sort: 1) contribution (desc), 2) efficiency (desc)
results.sort(key=lambda x: (x.contribution, x.efficiency), reverse=True)
# Step 4: Sort by Birnbaum importance
results.sort(key=lambda x: x.birbaum, reverse=True)
return results
def project_eaf_improvement(asset: AssetWeight, improvement_factor: float = 0.3) -> float:
"""
Project EAF improvement after maintenance
This is a simplified model - you should replace with your actual prediction logic
"""
current_downtime_pct = 100 - asset.eaf
# Assume maintenance reduces downtime by improvement_factor
improved_downtime_pct = current_downtime_pct * (1 - improvement_factor)
projected_eaf = 100 - improved_downtime_pct
return min(projected_eaf, 99.9) # Cap at 99.9%
@ -157,24 +195,69 @@ async def identify_worst_eaf_contributors(
oh_session_id: str,
collector_db: CollectorDbSession,
simulation_id: str,
duration: int,
po_duration: int,
cut_hours: float = 0, # new optional parameter: how many hours of planned outage user wants to cut
):
"""
Identify equipment that contributes most to plant EAF reduction
in order to reach a target EAF.
Identify equipment that contributes most to plant EAF reduction,
evaluate if target EAF is physically achievable, and optionally
calculate the additional improvement if user cuts scheduled outage.
"""
# Extract results
calc_result = simulation_result["calc_result"]
plant_result = simulation_result["plant_result"]
# Ensure list of equipment
eq_results = calc_result if isinstance(calc_result, list) else [calc_result]
# Current plant EAF and gap
# Base parameters
current_plant_eaf = plant_result.get("eaf", 0)
total_hours = duration
scheduled_outage = int(po_duration)
reduced_outage = max(scheduled_outage - cut_hours, 0)
max_eaf_possible = (total_hours - reduced_outage) / total_hours * 100
# Improvement purely from outage reduction (global)
scheduled_eaf_gain = (cut_hours / total_hours) * 100 if cut_hours > 0 else 0.0
# Target feasibility check
warning_message = None
if target_eaf > max_eaf_possible:
impossible_gap = target_eaf - max_eaf_possible
required_scheduled_hours = total_hours * (1 - target_eaf / 100)
required_reduction = reduced_outage - required_scheduled_hours
# Build dynamic phrase for clarity
if cut_hours > 0:
reduction_phrase = f" even after reducing planned outage by {cut_hours}h"
else:
reduction_phrase = ""
warning_message = (
f"⚠️ Target EAF {target_eaf:.2f}% exceeds theoretical maximum {max_eaf_possible:.2f}%"
f"{reduction_phrase}.\n"
f"To achieve it, planned outage must be further reduced by approximately "
f"{required_reduction:.1f} hours (from {reduced_outage:.0f}h → {required_scheduled_hours:.0f}h)."
)
# Cap target EAF to max achievable for calculation
target_eaf = max_eaf_possible
eaf_gap = (target_eaf - current_plant_eaf) / 100.0
if eaf_gap <= 0:
return OptimizationResult(
current_plant_eaf=current_plant_eaf,
target_plant_eaf=target_eaf,
possible_plant_eaf=current_plant_eaf,
eaf_gap=0,
warning_message=warning_message or "Target already achieved or exceeded.",
asset_contributions=[],
optimization_success=True,
simulation_id=simulation_id,
eaf_improvement_text=""
)
# Get standard scope (equipment allowed for overhaul/optimization)
# Get standard scope (equipment in OH)
standard_scope = await get_standard_scope_by_session_id(
db_session=db_session,
overhaul_session_id=oh_session_id,
@ -182,43 +265,92 @@ async def identify_worst_eaf_contributors(
)
standard_scope_location_tags = [tag.location_tag for tag in standard_scope]
# Compute contributions
# Compute contributions for reliability improvements
asset_contributions = calculate_asset_eaf_contributions(
plant_result, eq_results, standard_scope_location_tags, eaf_gap=eaf_gap
plant_result, eq_results, standard_scope_location_tags, eaf_gap, reduced_outage
)
project_eaf_improvement = 0.0
# Greedy improvement allocation
project_eaf_improvement_total = 0.0
selected_eq = []
# Greedy select until gap is closed
for asset in asset_contributions:
if project_eaf_improvement >= eaf_gap:
if project_eaf_improvement_total >= eaf_gap:
break
if (project_eaf_improvement + asset.required_improvement) <= eaf_gap:
if (project_eaf_improvement_total + asset.improvement_impact) <= eaf_gap:
selected_eq.append(asset)
project_eaf_improvement += asset.required_improvement
project_eaf_improvement_total += asset.improvement_impact
else:
# allow overshoot tolerance by skipping large ones, continue with smaller ones
continue
# Build output with efficiency included
# Total EAF after improvements + optional outage cut
possible_eaf_plant = current_plant_eaf + project_eaf_improvement_total * 100 + scheduled_eaf_gain
possible_eaf_plant = min(possible_eaf_plant, max_eaf_possible)
selected_eq.sort(key=lambda x: x.birbaum, reverse=True)
required_cut_hours = 0
# --- 2. Optimization feasible but cannot reach target (underperformance case) ---
if possible_eaf_plant < target_eaf:
# Calculate shortfall
performance_gap = target_eaf - possible_eaf_plant
# Estimate how many scheduled outage hours must be reduced to close the remaining gap
# Each hour reduced adds (1 / total_hours) * 100 % to plant EAF
required_cut_hours = (performance_gap / 100) * total_hours
reliability_limit_msg = (
f"⚠️ Optimization was unable to reach target EAF {target_eaf:.2f}%.\n"
f"The best achievable EAF based on current reliability is "
f"{possible_eaf_plant:.2f}% (short by {performance_gap:.2f}%)."
)
# Add actionable recommendation
recommendation_msg = (
f"To achieve the target EAF, consider reducing planned outage by approximately "
f"{required_cut_hours:.1f} hours or {int(required_cut_hours/24)} days (from {reduced_outage:.0f}h → {reduced_outage - required_cut_hours:.0f}h)."
)
if warning_message:
warning_message = warning_message + "\n\n" + reliability_limit_msg + "\n" + recommendation_msg
else:
warning_message = reliability_limit_msg + "\n" + recommendation_msg
# --- EAF improvement reporting ---
eaf_improvement_points = (possible_eaf_plant - current_plant_eaf)
# Express as text for user readability
if eaf_improvement_points > 0:
improvement_text = f"{eaf_improvement_points:.6f} percentage points increase"
else:
improvement_text = "No measurable improvement achieved"
# Build result
return OptimizationResult(
current_plant_eaf=current_plant_eaf,
target_plant_eaf=target_eaf,
possible_plant_eaf=possible_eaf_plant,
eaf_gap=eaf_gap,
warning_message=warning_message, # numeric
eaf_improvement_text=improvement_text,
recommended_reduced_outage=required_cut_hours,
asset_contributions=[
{
"node": asset.node,
"availability": asset.availability,
"contribution": asset.contribution,
"sensitivy": asset.birbaum,
"required_improvement": asset.required_improvement,
"system_impact": asset.improvement_impact,
"num_of_failures": asset.num_of_failures,
"down_time": asset.down_time,
"efficiency": asset.efficiency,
"efficiency": asset.efficiency,
}
for asset in selected_eq
],
optimization_success=(current_plant_eaf + project_eaf_improvement) >= target_eaf,
outage_reduction_hours=cut_hours,
optimization_success=(current_plant_eaf + project_eaf_improvement_total * 100 + scheduled_eaf_gain)
>= target_eaf,
simulation_id=simulation_id,
)
)

@ -1,6 +1,10 @@
import asyncio
from datetime import datetime, timedelta
import random
from typing import List, Optional
from temporalio.client import Client
from src.config import TEMPORAL_URL, TR_RBD_ID
def generate_down_periods(start_date: datetime, end_date: datetime,
num_periods: Optional[int] = None, min_duration: int = 3,
@ -52,3 +56,36 @@ def generate_down_periods(start_date: datetime, end_date: datetime,
down_periods.append((period_start, period_end))
return sorted(down_periods)
async def wait_for_workflow(simulation_id, max_retries=3):
workflow_id = f"simulation-{simulation_id}" # use returned ID
retries = 0
temporal_client = await Client.connect(TEMPORAL_URL)
while True:
try:
handle = temporal_client.get_workflow_handle(workflow_id=workflow_id)
desc = await handle.describe()
status = desc.status.name
if status not in ["RUNNING", "CONTINUED_AS_NEW"]:
print(f"✅ Workflow {workflow_id} finished with status: {status}")
break
print(f"⏳ Workflow {workflow_id} still {status}, checking again in 10s...")
except Exception as e:
retries += 1
if retries > max_retries:
print(f"⚠️ Workflow {workflow_id} not found after {max_retries} retries, treating as done. Error: {e}")
break
else:
print(f"⚠️ Workflow {workflow_id} not found (retry {retries}/{max_retries}), waiting 10s before retry...")
await asyncio.sleep(10)
continue
retries = 0 # reset retries if describe() worked
await asyncio.sleep(30)
return simulation_id

@ -7,6 +7,7 @@ from sqlalchemy import Select, func, select
from sqlalchemy.orm import joinedload
from src.auth.service import Token
from src.config import TC_RBD_ID
from src.database.core import DbSession
from src.overhaul_scope.service import get_all
from src.standard_scope.model import StandardScope
@ -20,8 +21,7 @@ from .service import (create_calculation_result_service, create_param_and_data,
get_avg_cost_by_asset,
get_calculation_by_reference_and_parameter,
get_calculation_data_by_id, get_calculation_result,
get_corrective_cost_time_chart,
get_overhaul_cost_by_time_chart, run_simulation, run_simulation_with_spareparts)
run_simulation_with_spareparts)
from src.database.core import CollectorDbSession
@ -86,22 +86,25 @@ async def create_calculation(
db_session: DbSession,
collector_db_session: CollectorDbSession,
calculation_time_constrains_in: CalculationTimeConstrainsParametersCreate,
created_by: str
created_by: str,
simulation_id
):
calculation_data = await create_param_and_data(
db_session=db_session,
calculation_param_in=calculation_time_constrains_in,
created_by=created_by,
)
rbd_simulation_id = simulation_id or TC_RBD_ID
# results = await create_calculation_result_service(
# db_session=db_session, calculation=calculation_data, token=token
# )
results = await run_simulation_with_spareparts(
db_session=db_session, calculation=calculation_data, token=token, collector_db_session=collector_db_session
db_session=db_session, calculation=calculation_data, token=token, collector_db_session=collector_db_session, simulation_id=rbd_simulation_id
)
return results["id"]
return results
async def get_or_create_scope_equipment_calculation(

@ -69,6 +69,10 @@ class CalculationData(Base, DefaultMixin, IdentityMixin):
optimum_oh_day = Column(Integer, nullable=True)
max_interval = Column(Integer, nullable=True)
rbd_simulation_id = Column(UUID(as_uuid=True), nullable=True)
optimum_analysis = Column(JSON, nullable=True)
session = relationship("OverhaulScope", lazy="raise")
@ -78,7 +82,9 @@ class CalculationData(Base, DefaultMixin, IdentityMixin):
"CalculationEquipmentResult", lazy="raise", viewonly=True
)
results = relationship("CalculationResult", lazy="raise", viewonly=True)
@classmethod
async def create_with_param(
@ -152,6 +158,7 @@ class CalculationEquipmentResult(Base, DefaultMixin):
optimum_day = Column(Integer, default=1)
is_included = Column(Boolean, default=True)
procurement_details = Column(JSON, nullable=True)
is_initial = Column(Boolean, default=True)
master_equipment = relationship(
"MasterEquipment",

@ -2,8 +2,11 @@ from typing import List, Optional, Union
from fastapi import APIRouter
from fastapi.params import Query
import requests
from src.auth.service import CurrentUser, Token
from src import config
from src.auth.service import CurrentUser, InternalKey, Token
from src.config import DEFAULT_TC_ID
from src.database.core import DbSession
from src.models import StandardResponse
@ -21,10 +24,11 @@ from .service import (bulk_update_equipment, get_calculation_result,
from src.database.core import CollectorDbSession
router = APIRouter()
get_calculation = APIRouter()
@router.post(
"", response_model=StandardResponse[Union[str, CalculationTimeConstrainsRead]]
"", response_model=StandardResponse[Union[dict, CalculationTimeConstrainsRead]]
)
async def create_calculation_time_constrains(
token: Token,
@ -34,6 +38,7 @@ async def create_calculation_time_constrains(
calculation_time_constrains_in: CalculationTimeConstrainsParametersCreate,
scope_calculation_id: Optional[str] = Query(None),
with_results: Optional[int] = Query(0),
simulation_id = Query(None)
):
"""Save calculation time constrains Here"""
@ -50,9 +55,10 @@ async def create_calculation_time_constrains(
collector_db_session=collector_db_session,
calculation_time_constrains_in=calculation_time_constrains_in,
created_by=current_user.name,
simulation_id=simulation_id
)
return StandardResponse(data=str(results), message="Data created successfully")
return StandardResponse(data=results, message="Data created successfully")
@router.get(
@ -79,13 +85,20 @@ async def get_calculation_parameters(
)
@router.get(
@get_calculation.get(
"/{calculation_id}", response_model=StandardResponse[CalculationTimeConstrainsRead]
)
async def get_calculation_results(db_session: DbSession, calculation_id):
async def get_calculation_results(db_session: DbSession, calculation_id, token:InternalKey, include_risk_cost:int = Query(1, alias="risk_cost")):
if calculation_id == 'default':
calculation_id = DEFAULT_TC_ID
results = await get_calculation_result(
db_session=db_session, calculation_id=calculation_id
db_session=db_session, calculation_id=calculation_id, token=token, include_risk_cost=include_risk_cost
)
requests.post(f"{config.AUTH_SERVICE_API}/sign-out", headers={
"Authorization": f"Bearer {token}"
})
return StandardResponse(
data=results,
@ -134,6 +147,9 @@ async def update_selected_equipment(
calculation_id,
calculation_time_constrains_in: List[CalculationSelectedEquipmentUpdate],
):
if calculation_id == 'default':
calculation_id = "3b9a73a2-bde6-418c-9e2f-19046f501a05"
results = await bulk_update_equipment(
db=db_session,
selected_equipments=calculation_time_constrains_in,

File diff suppressed because it is too large Load Diff

@ -2,6 +2,9 @@ import datetime
import json
import pandas as pd
import requests
from src.config import RBD_SERVICE_API
def get_months_between(start_date: datetime.datetime, end_date: datetime.datetime) -> int:
"""
@ -12,39 +15,42 @@ def get_months_between(start_date: datetime.datetime, end_date: datetime.datetim
return months
def create_time_series_data(chart_data, max_hours=24096):
# Filter out data points with currentEvent = "ON_OH"
filtered_data = [data for data in chart_data if data['currentEvent'] != 'ON_OH']
# Sort filtered data by cumulative time
sorted_data = sorted(filtered_data, key=lambda x: x['cumulativeTime'])
def create_time_series_data(chart_data, max_hours=None):
# Filter out ON_OH
filtered_data = [d for d in chart_data if d["currentEvent"] != "ON_OH"]
sorted_data = sorted(filtered_data, key=lambda x: x["cumulativeTime"])
if not sorted_data:
return []
hourly_data = []
current_state_index = 0
current_flow_rate = sorted_data[0]['flowRate']
current_eq_status = sorted_data[0]['currentEQStatus']
for hour in range(1, max_hours + 1):
# Check if we need to advance to the next state
while (current_state_index < len(sorted_data) - 1 and
hour >= int(sorted_data[current_state_index + 1]['cumulativeTime'])):
current_flow_rate = sorted_data[0]["flowRate"]
current_eq_status = sorted_data[0]["currentEQStatus"]
# Determine maximum bound (either given or from data)
last_time = int(sorted_data[-1]["cumulativeTime"])
if max_hours is None:
max_hours = last_time
for hour in range(0, max_hours + 1): # start from 0
# Advance state if needed
while (current_state_index < len(sorted_data) - 1 and
hour >= sorted_data[current_state_index + 1]["cumulativeTime"]):
current_state_index += 1
current_flow_rate = sorted_data[current_state_index]['flowRate']
current_eq_status = sorted_data[current_state_index]['currentEQStatus']
# Add hourly data point
current_flow_rate = sorted_data[current_state_index]["flowRate"]
current_eq_status = sorted_data[current_state_index]["currentEQStatus"]
hourly_data.append({
'cumulativeTime': hour,
'flowRate': current_flow_rate,
'currentEQStatus': current_eq_status
"cumulativeTime": hour,
"flowRate": current_flow_rate,
"currentEQStatus": current_eq_status
})
return hourly_data
def calculate_failures_per_month(hourly_data):
"""
Calculate the cumulative number of failures up to each month from hourly data.
@ -93,96 +99,110 @@ def calculate_failures_per_month(hourly_data):
return result
def analyze_monthly_metrics(timestamp_outs):
"""
Analyze time series data to calculate monthly metrics:
1. Failure count per month
2. Cumulative failure count each month
3. Total out-of-service time per month
4. Average flow rate per month
"""
import pandas as pd
import datetime
import datetime
import pandas as pd
async def plant_simulation_metrics(simulation_id: str, location_tag: str, max_interval, token, last_oh_date, use_location_tag: int = 1):
"""Get failure predictions for equipment from simulation service"""
calc_result_url = f"{RBD_SERVICE_API}/aeros/simulation/result/calc/{simulation_id}/{location_tag}"
try:
response = requests.get(
calc_result_url,
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {token}",
},
timeout=30
)
response.raise_for_status()
prediction_data = response.json()['data']
except (requests.RequestException, ValueError) as e:
raise Exception(str(e))
return prediction_data
# Check if timestamp_outs is None or empty
if timestamp_outs is None or not timestamp_outs:
# Return empty results with zero values
def analyze_monthly_metrics(timestamp_outs, start_date, max_flow_rate: float = 550):
if not timestamp_outs:
return {}
# Convert to DataFrame for easier manipulation
df = pd.DataFrame(timestamp_outs)
# Check if DataFrame is empty after creation
if df.empty:
return {}
# Check if required columns exist
required_columns = ['cumulativeTime', 'currentEQStatus', 'flowRate']
missing_columns = [col for col in required_columns if col not in df.columns]
if missing_columns:
if not all(col in df.columns for col in required_columns):
return {}
start_oh = datetime.datetime(start_date.year, start_date.month, start_date.day)
# Assuming the simulation starts from a reference date (you can modify this)
# For this example, I'll use January 1, 2024 as the start date
start_date = datetime.datetime(2025, 10, 22)
# Convert cumulative hours to actual datetime
df['datetime'] = df['cumulativeTime'].apply(
lambda x: start_date + datetime.timedelta(hours=x)
)
# Extract month-year for grouping
# Actual datetime from cumulative hours
df['datetime'] = df['cumulativeTime'].apply(lambda x: start_oh + datetime.timedelta(hours=x))
df['month_year'] = df['datetime'].dt.to_period('M')
# Calculate time duration for each record (difference between consecutive cumulative times)
df['duration_hours'] = df['cumulativeTime'].diff().fillna(df['cumulativeTime'].iloc[0])
# Initialize results dictionary
# Duration until next timestamp
df['duration_hours'] = df['cumulativeTime'].shift(-1) - df['cumulativeTime']
df['duration_hours'] = df['duration_hours'].fillna(0)
# Failure detection
df['status_change'] = df['currentEQStatus'].shift() != df['currentEQStatus']
df['failure'] = (df['currentEQStatus'] == 'OoS') & df['status_change']
# Cumulative tracking
df['cumulative_failures'] = df['failure'].cumsum()
df['cumulative_oos'] = (df['duration_hours'] * (df['currentEQStatus'] == 'OoS')).cumsum()
# Derating calculation
# Derating = capacity reduction below max but not outage
df['derating'] = (max_flow_rate - df['flowRate']).clip(lower=0)
df['is_derated'] = (df['currentEQStatus'] == 'Svc') & (df['derating'] > 0)
# Equivalent Derated Hours (EFDH) → sum of derating * hours, then normalized by max capacity
df['derated_mwh'] = df['derating'] * df['duration_hours']
df['derated_hours_equivalent'] = df['derated_mwh'] / max_flow_rate
monthly_results = {}
# Track cumulative failures across all months
cumulative_failures = 0
cummulative_oos = 0
# Group by month-year and ensure chronological order
for month_period, group in df.groupby('month_year'):
for month_period, group in df.groupby('month_year', sort=True):
month_str = str(month_period)
monthly_results[month_str] = {}
# 1. Count failures per month
# A failure is when currentEQStatus changes from "Svc" to "OoS"
status_changes = group['currentEQStatus'].shift() != group['currentEQStatus']
failures = ((group['currentEQStatus'] == 'OoS') & status_changes).sum()
monthly_results[month_str]['failures_count'] = int(failures)
# 2. Add failures to cumulative count
cumulative_failures += failures
monthly_results[month_str]['cumulative_failures'] = int(cumulative_failures)
# 3. Total out-of-service time per month (in hours)
oos_time = group[group['currentEQStatus'] == 'OoS']['duration_hours'].sum()
# Failures
monthly_results[month_str]['failures_count'] = int(group['failure'].sum())
monthly_results[month_str]['cumulative_failures'] = int(group['cumulative_failures'].max())
# OOS hours
oos_time = group.loc[group['currentEQStatus'] == 'OoS', 'duration_hours'].sum()
monthly_results[month_str]['total_oos_hours'] = float(oos_time)
cummulative_oos += oos_time
monthly_results[month_str]['cummulative_oos'] = float(cummulative_oos)
# 4. Average flow rate per month (weighted by duration)
# Calculate weighted average flow rate
monthly_results[month_str]['cummulative_oos'] = float(group['cumulative_oos'].max())
# Flow rate (weighted average)
total_flow_time = (group['flowRate'] * group['duration_hours']).sum()
total_time = group['duration_hours'].sum()
avg_flow_rate = total_flow_time / total_time if total_time > 0 else 0
monthly_results[month_str]['avg_flow_rate'] = float(avg_flow_rate)
# Additional useful metrics
# Extra metrics
monthly_results[month_str]['total_hours'] = float(total_time)
monthly_results[month_str]['service_hours'] = float(
group[group['currentEQStatus'] == 'Svc']['duration_hours'].sum()
)
service_hours = group.loc[group['currentEQStatus'] == 'Svc', 'duration_hours'].sum()
monthly_results[month_str]['service_hours'] = float(service_hours)
monthly_results[month_str]['availability_percentage'] = float(
(monthly_results[month_str]['service_hours'] / total_time * 100) if total_time > 0 else 0
(service_hours / total_time * 100) if total_time > 0 else 0
)
# Derating metrics
derating_hours = group.loc[group['is_derated'], 'duration_hours'].sum()
derated_mwh = group['derated_mwh'].sum()
equivalent_derated_hours = group['derated_hours_equivalent'].sum()
monthly_results[month_str]['derating_hours'] = float(derating_hours)
monthly_results[month_str]['derated_mwh'] = float(derated_mwh)
monthly_results[month_str]['equivalent_derated_hours'] = float(equivalent_derated_hours)
return monthly_results
def calculate_risk_cost_per_failure(monthly_results, birnbaum_importance, energy_price):
"""
Calculate risk cost per failure for each month based on:

@ -83,3 +83,12 @@ MAXIMO_API_KEY = config("MAXIMO_API_KEY", default="keys")
AUTH_SERVICE_API = config("AUTH_SERVICE_API", default="http://192.168.1.82:8000/auth")
REALIBILITY_SERVICE_API = config("REALIBILITY_SERVICE_API", default="http://192.168.1.82:8000/reliability")
RBD_SERVICE_API = config("RBD_SERVICE_API", default="http://192.168.1.82:8000/rbd")
TEMPORAL_URL = config("TEMPORAL_URL", default="http://192.168.1.86:7233")
API_KEY = config("API_KEY", default="0KFvcB7zWENyKVjoma9FKZNofVSViEshYr59zEQNGaYjyUP34gCJKDuqHuk9VfvE")
TR_RBD_ID = config("TR_RBD_ID", default="f04f365e-25d8-4036-87c2-ba1bfe1f9229")
TC_RBD_ID = config("TC_RBD_ID", default="f8523cb0-dc3c-4edb-bcf1-eea7b62582f1")
DEFAULT_TC_ID = config("DEFAULT_TC_ID", default="44f483f3-bfe4-4094-a59f-b97a10f2fea6")

@ -5,7 +5,7 @@ from sqlalchemy import Delete, Select, and_, text
from sqlalchemy.orm import selectinload
from src.auth.service import CurrentUser
from src.database.core import DbSession
from src.database.core import CollectorDbSession, DbSession
from src.database.service import CommonParameters, search_filter_sort_paginate
from .model import ScopeEquipmentPart
@ -16,139 +16,203 @@ from .schema import ScopeEquipmentActivityCreate, ScopeEquipmentActivityUpdate
# result = await db_session.get(ScopeEquipmentActivity, scope_equipment_activity_id)
# return result
def create_dummy_parts(assetnum: str, count: int = 5):
"""
Create a list of dummy ScopeEquipmentPart objects with random stock values.
Args:
assetnum (str): The base asset number to generate dummy parts for.
count (int): The number of parts to create. Default is 5.
Returns:
List[ScopeEquipmentPart]: A list of dummy ScopeEquipmentPart objects.
"""
parts = []
for i in range(1, count + 1):
# Generate a unique part asset number
part_assetnum = f"{assetnum}_PART_{i}"
stock = random.randint(1, 100) # Random stock value between 1 and 100
parts.append({"assetnum": part_assetnum, "stock": stock})
return parts
from sqlalchemy import text
from typing import Optional, List, Dict, Any
from datetime import datetime
from sqlalchemy.ext.asyncio import AsyncSession as DbSession
from sqlalchemy.sql import text
import logging
logger = logging.getLogger(__name__)
# async def get_all(
# db_session: CollectorDbSession,
# location_tag: Optional[str] = None,
# start_year: int = 2023,
# end_year: Optional[int] = None,
# parent_wonum: Optional[str] = None
# ) -> List[Dict[str, Any]]:
# """
# Retrieve overhaul spare parts consumption data.
# Handles missing data, null parent WO, and query safety.
# Args:
# db_session: Async SQLAlchemy session
# location_tag: Optional location filter
# start_year: Year to start analysis (default 2023)
# end_year: Optional year to end analysis (default start_year + 1)
# parent_wonum: Parent work order number (required for context)
# Returns:
# List of dictionaries with spare part usage per overhaul WO.
# """
# # --- 1. Basic validation ---
# if not parent_wonum:
# logger.warning("Parent WO number not provided. Returning empty result.")
# return []
# if start_year < 1900 or (end_year and end_year < start_year):
# raise ValueError("Invalid year range provided.")
# if end_year is None:
# end_year = start_year + 1
# # --- 2. Build SQL safely ---
# base_query = """
# WITH filtered_wo AS (
# SELECT wonum, location_tag
# FROM public.wo_max
# WHERE worktype = 'OH'
# AND xx_parent = :parent_wonum
# """
# params = {
# "parent_wonum": parent_wonum,
# }
# if location_tag:
# base_query += " AND location_tag = :location_tag"
# params["location_tag"] = location_tag
# base_query += """
# ),
# filtered_materials AS (
# SELECT wonum, itemnum, itemqty, inv_curbaltotal, inv_avgcost
# FROM public.wo_max_material
# WHERE wonum IN (SELECT wonum FROM filtered_wo)
# )
# SELECT
# fwo.location_tag AS location_tag,
# fm.itemnum,
# spl.description AS sparepart_name,
# COALESCE(SUM(fm.itemqty), 0) AS parts_consumed_in_oh,
# COALESCE(AVG(fm.inv_avgcost), 0) AS avgcost,
# COALESCE(AVG(fm.inv_curbaltotal), 0) AS inv_curbaltotal
# FROM filtered_wo fwo
# INNER JOIN filtered_materials fm ON fwo.wonum = fm.wonum
# LEFT JOIN public.maximo_sparepart_pr_po_line spl ON fm.itemnum = spl.item_num
# GROUP BY fwo.location_tag, fm.itemnum, spl.description
# ORDER BY fwo.location_tag, fm.itemnum;
# """
# # --- 3. Execute query ---
# try:
# result = await db_session.execute(text(base_query), params)
# rows = result.fetchall()
# # Handle "no data found"
# if not rows:
# logger.info(f"No spare part data found for parent WO {parent_wonum}.")
# return []
# # --- 4. Map results cleanly ---
# equipment_parts = []
# for row in rows:
# try:
# equipment_parts.append({
# "location_tag": row.location_tag,
# "itemnum": row.itemnum,
# "sparepart_name": row.sparepart_name or "-",
# "parts_consumed_in_oh": float(row.parts_consumed_in_oh or 0),
# "avgcost": float(row.avgcost or 0),
# "inv_curbaltotal": float(row.inv_curbaltotal or 0)
# })
# except Exception as parse_err:
# logger.error(f"Failed to parse row {row}: {parse_err}")
# continue # Skip malformed rows
# return equipment_parts
# except Exception as e:
# logger.exception(f"Database query failed: {e}")
# raise RuntimeError("Failed to fetch overhaul spare parts data.") from e
from typing import List, Dict, Any, Optional
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.sql import text
async def get_all(
db_session: DbSession,
db_session: AsyncSession,
location_tag: Optional[str] = None,
start_year: int = 2023,
end_year: Optional[int] = None
) -> List[Dict[str, Any]]:
"""
Get overhaul spare parts consumption data with optimized query
Get overhaul spare parts consumption data with optimized query.
Args:
db_session: Database session
location_tag: Optional location filter
start_year: Year to start analysis (default: 2023)
end_year: Year to end analysis (default: start_year + 1)
db_session: SQLAlchemy async database session
location_tag: Optional filter for location (asset_location)
start_year: Starting year (default: 2023)
end_year: Ending year (default: start_year + 1)
Returns:
List of dictionaries containing spare parts consumption data
List of dictionaries with spare parts consumption data
"""
# Set end year if not provided
# Set default end year
if end_year is None:
end_year = start_year + 1
# Build dynamic query
base_query = """
# Build query dynamically
query_str = """
WITH filtered_wo AS (
SELECT wonum, asset_location
FROM public.wo_staging_maximo_2
WHERE worktype = 'OH'
SELECT DISTINCT wonum, asset_location, asset_unit
FROM public.wo_maximo ma
WHERE ma.xx_parent IN ('155026', '155027', '155029', '155030')
"""
params = {}
# Add location filter to CTE if provided
# Optional filter for location
if location_tag:
base_query += " AND asset_location = :location_tag"
query_str += " AND asset_location = :location_tag"
params["location_tag"] = location_tag
base_query += """
query_str += """
),
filtered_transactions AS (
SELECT wonum, itemnum, curbal
FROM public.maximo_material_use_transactions
WHERE issuetype = 'ISSUE'
AND wonum IN (SELECT wonum FROM filtered_wo)
filtered_materials AS (
SELECT
mat.wonum,
mat.itemnum,
mat.itemqty,
mat.inv_curbaltotal AS inv_curbaltotal,
mat.inv_avgcost AS inv_avgcost
FROM public.wo_maximo_material AS mat
WHERE mat.wonum IN (SELECT wonum FROM filtered_wo)
)
SELECT
fwo.asset_location AS location_tag,
ft.itemnum,
spl.description AS sparepart_name,
COUNT(*) AS parts_consumed_in_oh,
MIN(ft.curbal) AS min_remaining_balance,
MAX(mi.curbaltotal) AS inv_curbaltotal
FROM filtered_wo fwo
INNER JOIN filtered_transactions ft ON fwo.wonum = ft.wonum
INNER JOIN public.maximo_inventory mi ON ft.itemnum = mi.itemnum
LEFT JOIN public.maximo_sparepart_pr_po_line spl ON ft.itemnum = spl.item_num
COALESCE(spl.description, 'Unknown') AS sparepart_name,
AVG(ft.itemqty) AS total_parts_used,
COALESCE(AVG(ft.inv_avgcost), 0) AS avg_cost,
COALESCE(AVG(ft.inv_curbaltotal), 0) AS avg_inventory_balance
FROM filtered_wo AS fwo
INNER JOIN filtered_materials AS ft
ON fwo.wonum = ft.wonum
LEFT JOIN public.maximo_sparepart_pr_po_line AS spl
ON ft.itemnum = spl.item_num
GROUP BY fwo.asset_location, ft.itemnum, spl.description
ORDER BY fwo.asset_location, ft.itemnum
ORDER BY fwo.asset_location, ft.itemnum;
"""
query = text(base_query)
try:
results = await db_session.execute(query, params)
result = await db_session.execute(text(query_str), params)
rows = result.fetchall()
equipment_parts = []
for row in results:
for row in rows:
equipment_parts.append({
"location_tag": row.location_tag,
"itemnum": row.itemnum,
"sparepart_name": row.sparepart_name,
"parts_consumed_in_oh": row.parts_consumed_in_oh,
"min_remaining_balance": float(row.min_remaining_balance),
"inv_curbaltotal": float(row.inv_curbaltotal)
"parts_consumed_in_oh": float(row.total_parts_used or 0),
"avg_cost": float(row.avg_cost or 0),
"inv_curbaltotal": float(row.avg_inventory_balance or 0),
})
return equipment_parts
except Exception as e:
# Log the error appropriately in your application
print(f"Database query error: {e}")
print(f"[get_all] Database query error: {e}")
raise
# async def create(*, db_session: DbSession, scope_equipment_activty_in: ScopeEquipmentActivityCreate):
# activity = ScopeEquipmentActivity(
# **scope_equipment_activty_in.model_dump())
# db_session.add(activity)
# await db_session.commit()
# return activity
# async def update(*, db_session: DbSession, activity: ScopeEquipmentActivity, scope_equipment_activty_in: ScopeEquipmentActivityUpdate):
# """Updates a document."""
# data = scope_equipment_activty_in.model_dump()
# update_data = scope_equipment_activty_in.model_dump(exclude_defaults=True)
# for field in data:
# if field in update_data:
# setattr(activity, field, update_data[field])
# await db_session.commit()
# return activity
# async def delete(*, db_session: DbSession, scope_equipment_activity_id: str):
# """Deletes a document."""
# activity = await db_session.get(ScopeEquipmentActivity, scope_equipment_activity_id)
# await db_session.delete(activity)
# await db_session.commit()

@ -46,7 +46,7 @@ app = FastAPI(
)
app.state.limiter = limiter
app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)
app.add_middleware(GZipMiddleware, minimum_size=2000)
app.add_middleware(GZipMiddleware, minimum_size=1000)
# credentials: "include",

@ -1,5 +1,5 @@
from datetime import datetime
from sqlalchemy import select, func, cast, Numeric
from sqlalchemy import select, func, cast, Numeric, text
from sqlalchemy.orm import Session
from sqlalchemy import and_
from sqlalchemy.sql import not_
@ -8,72 +8,246 @@ from src.database.core import CollectorDbSession
async def get_cm_cost_summary(collector_db: CollectorDbSession, last_oh_date:datetime, upcoming_oh_date:datetime):
query = select(
WorkOrderData.location,
(func.sum(WorkOrderData.total_cost_max).cast(Numeric) / func.count(WorkOrderData.wonum)).label('avg_cost')
).where(
and_(
# WorkOrderData.wo_start >= last_oh_date,
# WorkOrderData.wo_start <= upcoming_oh_date,
WorkOrderData.worktype.in_(['CM', 'EM', 'PROACTIVE']),
WorkOrderData.system_tag.in_(['HPB', 'AH', 'APC', 'SCR', 'CL', 'DM', 'CRH', 'ASH', 'BAD', 'DS', 'WTP',
'MT', 'SUP', 'DCS', 'FF', 'EG', 'AI', 'SPS', 'EVM', 'SCW', 'KLH', 'CH',
'TUR', 'LOT', 'HRH', 'ESP', 'CAE', 'GMC', 'BFT', 'LSH', 'CHB', 'BSS',
'LOS', 'LPB', 'SAC', 'CP', 'EHS', 'RO', 'GG', 'MS', 'CW', 'SO', 'ATT',
'AFG', 'EHB', 'RP', 'FO', 'PC', 'APE', 'AF', 'DMW', 'BRS', 'GEN', 'ABS',
'CHA', 'TR', 'H2', 'BDW', 'LOM', 'ACR', 'AL', 'FW', 'COND', 'CCCW', 'IA',
'GSS', 'BOL', 'SSB', 'CO', 'OA', 'CTH-UPD', 'AS', 'DP']),
WorkOrderData.reportdate.is_not(None),
WorkOrderData.actstart.is_not(None),
WorkOrderData.actfinish.is_not(None),
WorkOrderData.unit.in_([3, 0]),
WorkOrderData.reportdate >= datetime.strptime('2015-01-01', '%Y-%m-%d'),
not_(WorkOrderData.wonum.like('T%'))
query = text("""WITH part_costs AS (
SELECT
mu.wonum,
SUM(mu.itemqty * COALESCE(inv.avgcost, po.unit_cost, 0)) AS parts_total_cost
FROM maximo_workorder_materials mu
LEFT JOIN maximo_inventory inv
ON mu.itemnum = inv.itemnum
LEFT JOIN (
SELECT item_num, AVG(unit_cost) AS unit_cost
FROM maximo_sparepart_pr_po_line
GROUP BY item_num
) po
ON mu.itemnum = po.item_num
GROUP BY mu.wonum
),
wo_costs AS (
SELECT
w.wonum,
w.asset_location,
(COALESCE(w.mat_cost_max, 0) + COALESCE(pc.parts_total_cost, 0)) AS total_wo_cost
FROM wo_staging_maximo_2 w
LEFT JOIN part_costs pc
ON w.wonum = pc.wonum
WHERE
w.worktype IN ('CM', 'EM', 'PROACTIVE')
AND w.asset_system IN (
'HPB','AH','APC','SCR','CL','DM','CRH','ASH','BAD','DS','WTP',
'MT','SUP','DCS','FF','EG','AI','SPS','EVM','SCW','KLH','CH',
'TUR','LOT','HRH','ESP','CAE','GMC','BFT','LSH','CHB','BSS',
'LOS','LPB','SAC','CP','EHS','RO','GG','MS','CW','SO','ATT',
'AFG','EHB','RP','FO','PC','APE','AF','DMW','BRS','GEN','ABS',
'CHA','TR','H2','BDW','LOM','ACR','AL','FW','COND','CCCW','IA',
'GSS','BOL','SSB','CO','OA','CTH-UPD','AS','DP'
)
).group_by(
WorkOrderData.location
).order_by(
func.count(WorkOrderData.wonum).desc()
)
result = await collector_db.execute(query)
data = result.all()
AND w.reportdate IS NOT NULL
AND w.actstart IS NOT NULL
AND w.actfinish IS NOT NULL
AND w.asset_unit IN ('3','00')
AND w.reportdate >= '2015-01-01'
AND w.wonum NOT LIKE 'T%'
),
-- find max cost per location
location_max AS (
SELECT asset_location, MAX(total_wo_cost) AS max_cost
FROM wo_costs
WHERE total_wo_cost > 0
GROUP BY asset_location
),
-- filter WO costs to only reasonable range (e.g. >0 and >=10% of max)
filtered_wo AS (
SELECT w.*
FROM wo_costs w
JOIN location_max lm ON w.asset_location = lm.asset_location
WHERE w.total_wo_cost > 0
)
SELECT
asset_location,
SUM(total_wo_cost)::numeric / COUNT(wonum) AS avg_cost
FROM filtered_wo
GROUP BY asset_location
ORDER BY avg_cost DESC;
""")
results = await collector_db.execute(query)
data = []
for row in results:
data.append({
"location_tag": row.asset_location,
"avg_cost": row.avg_cost
})
return {
data.location: data.avg_cost for data in data
item["location_tag"]: item["avg_cost"] for item in data
}
# async def get_oh_cost_summary(collector_db: CollectorDbSession, last_oh_date:datetime, upcoming_oh_date:datetime):
# query = text("""
# WITH target_wo AS (
# -- Get work orders under a specific parent(s)
# SELECT
# wonum,
# xx_parent,
# assetnum,
# location_tag AS asset_location,
# actmatcost,
# actservcost,
# reportdate
# FROM public.wo_maxim
# WHERE xx_parent = ANY(:parent_nums)
# ),
# part_costs AS (
# -- Calculate parts cost per WO if actmatcost = 0
# SELECT
# wm.wonum,
# SUM(
# wm.itemqty *
# COALESCE(wm.inv_avgcost, po.unit_cost, 0)
# ) AS parts_total_cost
# FROM public.wo_maxim_material wm
# LEFT JOIN (
# SELECT item_num, AVG(unit_cost) AS unit_cost
# FROM public.maximo_sparepart_pr_po_line
# GROUP BY item_num
# ) po ON wm.itemnum = po.item_num
# WHERE wm.itemnum IS NOT NULL
# GROUP BY wm.wonum
# ),
# wo_costs AS (
# SELECT
# w.wonum,
# w.asset_location,
# CASE
# WHEN COALESCE(w.actmatcost, 0) > 0 THEN COALESCE(w.actmatcost, 0)
# ELSE COALESCE(pc.parts_total_cost, 0)
# END AS material_cost,
# COALESCE(w.actservcost, 0) AS service_cost
# FROM target_wo w
# LEFT JOIN part_costs pc ON w.wonum = pc.wonum
# )
# SELECT
# asset_location,
# ROUND(SUM(material_cost + service_cost)::numeric / COUNT(wonum), 2) AS avg_cost,
# COUNT(wonum) AS total_wo_count
# FROM wo_costs
# GROUP BY asset_location
# ORDER BY total_wo_count DESC;
# """)
# parent_nums = []
# result = await collector_db.execute(query, {"parent_nums": parent_nums})
# data = []
# for row in result:
# data.append({
# "location_tag": row.asset_location,
# "avg_cost": float(row.avg_cost or 0.0),
# "total_wo_count": row.total_wo_count,
# })
# return {item["location_tag"]: item["avg_cost"] for item in data}
async def get_oh_cost_summary(collector_db: CollectorDbSession, last_oh_date:datetime, upcoming_oh_date:datetime):
query = select(
WorkOrderData.location,
(func.sum(WorkOrderData.total_cost_max).cast(Numeric) / func.count(WorkOrderData.wonum)).label('avg_cost')
).where(
and_(
# WorkOrderData.wo_start >= last_oh_date,
# WorkOrderData.wo_start <= upcoming_oh_date,
WorkOrderData.worktype.in_(['OH']),
WorkOrderData.system_tag.in_(['HPB', 'AH', 'APC', 'SCR', 'CL', 'DM', 'CRH', 'ASH', 'BAD', 'DS', 'WTP',
'MT', 'SUP', 'DCS', 'FF', 'EG', 'AI', 'SPS', 'EVM', 'SCW', 'KLH', 'CH',
'TUR', 'LOT', 'HRH', 'ESP', 'CAE', 'GMC', 'BFT', 'LSH', 'CHB', 'BSS',
'LOS', 'LPB', 'SAC', 'CP', 'EHS', 'RO', 'GG', 'MS', 'CW', 'SO', 'ATT',
'AFG', 'EHB', 'RP', 'FO', 'PC', 'APE', 'AF', 'DMW', 'BRS', 'GEN', 'ABS',
'CHA', 'TR', 'H2', 'BDW', 'LOM', 'ACR', 'AL', 'FW', 'COND', 'CCCW', 'IA',
'GSS', 'BOL', 'SSB', 'CO', 'OA', 'CTH-UPD', 'AS', 'DP']),
WorkOrderData.reportdate.is_not(None),
WorkOrderData.actstart.is_not(None),
WorkOrderData.actfinish.is_not(None),
WorkOrderData.unit.in_([3, 0]),
WorkOrderData.reportdate >= datetime.strptime('2015-01-01', '%Y-%m-%d'),
not_(WorkOrderData.wonum.like('T%'))
)
).group_by(
WorkOrderData.location
).order_by(
func.count(WorkOrderData.wonum).desc()
# query = text("""
# WITH part_costs AS (
# SELECT
# wm.wonum,
# SUM(wm.itemqty * COALESCE(wm.inv_avgcost, po.unit_cost, 0)) AS parts_total_cost
# FROM public.wo_maxim_material wm
# LEFT JOIN (
# SELECT item_num, AVG(unit_cost) AS unit_cost
# FROM public.maximo_sparepart_pr_po_line
# GROUP BY item_num
# ) po ON wm.itemnum = po.item_num
# WHERE wm.itemnum IS NOT NULL
# GROUP BY wm.wonum
# ),
# wo_costs AS (
# SELECT
# w.wonum,
# w.asset_location,
# -- Use mat_cost_max if parts_total_cost = 0
# CASE
# WHEN COALESCE(pc.parts_total_cost, 0) = 0 THEN COALESCE(w.mat_cost_max , 0)
# ELSE COALESCE(pc.parts_total_cost, 0)
# END AS total_wo_cost
# FROM wo_staging_maximo_2 w
# LEFT JOIN part_costs pc
# ON w.wonum = pc.wonum
# WHERE
# w.worktype = 'OH'
# AND w.reportdate IS NOT NULL
# AND w.actstart IS NOT NULL
# AND w.actfinish IS NOT NULL
# AND w.asset_unit IN ('3', '00')
# AND w.wonum NOT LIKE 'T%'
# )
# SELECT
# asset_location,
# AVG(total_wo_cost) AS avg_cost
# FROM wo_costs
# GROUP BY asset_location
# ORDER BY COUNT(wonum) DESC;
# """)
query = text("""
WITH part_costs AS (
SELECT
wm.wonum,
SUM(wm.itemqty * COALESCE(inv.avgcost, po.unit_cost, 0)) AS parts_total_cost
FROM public.maximo_workorder_materials wm
JOIN public.maximo_inventory AS inv on inv.itemnum = wm.itemnum
LEFT JOIN (
SELECT item_num, AVG(unit_cost) AS unit_cost
FROM public.maximo_sparepart_pr_po_line
GROUP BY item_num
) po ON wm.itemnum = po.item_num
WHERE wm.itemnum IS NOT NULL
GROUP BY wm.wonum
),
wo_costs AS (
SELECT
w.wonum,
w.asset_location,
-- Use mat_cost_max if parts_total_cost = 0
CASE
WHEN COALESCE(pc.parts_total_cost, 0) = 0 THEN COALESCE(w.mat_cost_max , 0)
ELSE COALESCE(pc.parts_total_cost, 0)
END AS total_wo_cost
FROM wo_staging_maximo_2 w
LEFT JOIN part_costs pc
ON w.wonum = pc.wonum
WHERE
w.worktype = 'OH'
AND w.reportdate IS NOT NULL
AND w.actstart IS NOT NULL
AND w.actfinish IS NOT NULL
AND w.asset_unit IN ('3', '00')
AND w.wonum NOT LIKE 'T%'
)
SELECT
asset_location,
AVG(total_wo_cost) AS avg_cost
FROM wo_costs
GROUP BY asset_location
ORDER BY COUNT(wonum) DESC;
""")
result = await collector_db.execute(query)
data = result.all()
data = []
for row in result:
data.append({
"location_tag": row.asset_location,
"avg_cost": row.avg_cost
})
return {
data.location: data.avg_cost for data in data
item["location_tag"]: item["avg_cost"] for item in data
}

@ -3,7 +3,7 @@ from typing import List
from fastapi import APIRouter, HTTPException, status
from src.auth.service import Token
from src.database.core import DbSession
from src.database.core import CollectorDbSession, DbSession
from src.models import StandardResponse
from src.overhaul.service import (get_overhaul_critical_parts,
get_overhaul_overview,
@ -18,11 +18,11 @@ router = APIRouter()
@router.get("", response_model=StandardResponse[OverhaulRead])
async def get_overhaul(db_session: DbSession, token:Token):
async def get_overhaul(db_session: DbSession, token:Token, collector_db_session:CollectorDbSession):
"""Get all scope pagination."""
overview = await get_overhaul_overview(db_session=db_session)
schedules = await get_overhaul_schedules(db_session=db_session)
criticalParts = await get_overhaul_critical_parts(db_session=db_session, session_id=overview["overhaul"]["id"], token=token)
criticalParts = await get_overhaul_critical_parts(db_session=db_session, session_id=overview["overhaul"]["id"], token=token, collector_db_session=collector_db_session)
systemComponents = get_overhaul_system_components()
return StandardResponse(

@ -6,6 +6,7 @@ from sqlalchemy import Delete, Select
from src.auth.service import CurrentUser
from src.calculation_target_reliability.service import RBD_SERVICE_API
from src.config import TC_RBD_ID
from src.database.core import DbSession
from src.contribution_util import calculate_contribution
from src.overhaul_activity.service import get_standard_scope_by_session_id
@ -28,9 +29,9 @@ async def get_simulation_results(*, simulation_id: str, token: str):
"Content-Type": "application/json"
}
calc_result_url = f"{RBD_SERVICE_API}/aeros/simulation/result/calc/default?nodetype=RegularNode"
calc_result_url = f"{RBD_SERVICE_API}/aeros/simulation/result/calc/{simulation_id}?nodetype=RegularNode"
# plot_result_url = f"{RBD_SERVICE_API}/aeros/simulation/result/plot/{simulation_id}?nodetype=RegularNode"
calc_plant_result = f"{RBD_SERVICE_API}/aeros/simulation/result/calc/default/plant"
calc_plant_result = f"{RBD_SERVICE_API}/aeros/simulation/result/calc/{simulation_id}/plant"
async with httpx.AsyncClient(timeout=300.0) as client:
calc_task = client.get(calc_result_url, headers=headers)
@ -54,16 +55,17 @@ async def get_simulation_results(*, simulation_id: str, token: str):
"plant_result": plant_data
}
async def get_overhaul_critical_parts(db_session, session_id, token):
async def get_overhaul_critical_parts(db_session, session_id, token, collector_db_session):
"""Get all overhaul critical parts."""
equipments, _ = await get_by_oh_session_id(
equipments = await get_standard_scope_by_session_id(
db_session=db_session,
oh_session_id=session_id,
overhaul_session_id=session_id,
collector_db=collector_db_session
)
criticality_simulation = await get_simulation_results(
simulation_id="default",
simulation_id = TC_RBD_ID,
token=token
)
@ -79,7 +81,7 @@ async def get_overhaul_critical_parts(db_session, session_id, token):
{
"id": equipment.id,
"location_tag": equipment.location_tag,
"name": equipment.master_equipment.name,
"name": equipment.equipment_name,
"matrix": rbd_simulation.get(equipment.location_tag)
} for equipment in equipments
@ -103,7 +105,7 @@ async def get_overhaul_critical_parts(db_session, session_id, token):
)[:10]
return {
"availability" : availability_result,
"availability" :availability_result,
"criticality": criticality_result
}

@ -136,9 +136,6 @@ async def get_all(
# page = common.get("page", 1)
# items_per_page = common.get("items_per_page", 10)
# Sort by overhaul_cost descending
results.sort(key=lambda x: x.overhaul_cost, reverse=True)
# Build response data
data = {
"items": results,

@ -0,0 +1,17 @@
from sqlalchemy import Column, String
from src.database.core import Base
from src.models import DefaultMixin
class OverhaulGantt(Base, DefaultMixin):
__tablename__ = "oh_ms_monitoring_spreadsheet"
spreadsheet_id = Column(String, nullable=True)
spreadsheet_link = Column(String, nullable=True)

@ -1,11 +1,15 @@
import re
from typing import List, Optional
from fastapi import APIRouter, HTTPException, status
from sqlalchemy import select
from src.auth.service import CurrentUser
from src.database.core import DbSession
from src.database.service import CommonParameters
from src.models import StandardResponse
from src.overhaul_gantt.model import OverhaulGantt
from src.overhaul_gantt.schema import OverhaulGanttIn
# from .schema import (OverhaulScheduleCreate, OverhaulSchedulePagination, OverhaulScheduleUpdate)
from .service import get_gantt_performance_chart
@ -14,18 +18,93 @@ router = APIRouter()
@router.get(
"", response_model=StandardResponse[list]
"", response_model=StandardResponse[dict]
)
async def get_gantt_performance():
async def get_gantt_performance(db_session: DbSession):
"""Get all scope pagination."""
# return
results = await get_gantt_performance_chart()
query = select(OverhaulGantt).limit(1)
data = (await db_session.execute(query)).scalar_one_or_none()
results, gantt_data = await get_gantt_performance_chart(spreadsheet_id=data.spreadsheet_id)
return StandardResponse(
data={
"chart_data": results,
"gantt_data": gantt_data
},
message="Data retrieved successfully",
)
@router.get(
"/spreadsheet", response_model=StandardResponse[dict]
)
async def get_gantt_spreadsheet(db_session: DbSession):
"""Get all scope pagination."""
# return
query = select(OverhaulGantt).limit(1)
data = (await db_session.execute(query)).scalar_one_or_none()
result = {
"spreadsheet_id": None,
"spreadsheet_link": None
}
if data:
result = {
"spreadsheet_id": data.spreadsheet_id,
"spreadsheet_link": data.spreadsheet_link
}
return StandardResponse(
data=results,
data=result,
message="Data retrieved successfully",
)
@router.post(
"/spreadsheet", response_model=StandardResponse[dict]
)
async def update_gantt_spreadsheet(db_session: DbSession, spreadsheet_in: OverhaulGanttIn):
"""Get all scope pagination."""
# return
match = re.search(r"/d/([a-zA-Z0-9-_]+)", spreadsheet_in.spreadsheet_link)
if not match:
raise ValueError("Invalid Google Sheets URL")
spreadsheet_id = match.group(1)
query = select(OverhaulGantt).limit(1)
data = (await db_session.execute(query)).scalar_one_or_none()
if data:
data.spreadsheet_link = spreadsheet_in.spreadsheet_link
data.spreadsheet_id = spreadsheet_id
else:
spreadsheet = OverhaulGantt(
spreadsheet_id=spreadsheet_id,
spreadsheet_link=spreadsheet_in.spreadsheet_link
)
db_session.add(spreadsheet)
await db_session.commit()
if data:
result = {
"spreadsheet_id": spreadsheet_id
}
return StandardResponse(
data=result,
message="Data retrieved successfully",
)
# @router.post("", response_model=StandardResponse[None])

@ -9,8 +9,12 @@
# from src.scope_equipment_job.schema import ScopeEquipmentJobRead
# from src.job.schema import ActivityMasterRead
# class OverhaulScheduleBase(DefultBase):
# pass
from pydantic import Field
from src.models import DefultBase
class OverhaulGanttIn(DefultBase):
spreadsheet_link: str = Field(...)
# class OverhaulScheduleCreate(OverhaulScheduleBase):

@ -6,7 +6,7 @@ from sqlalchemy.orm import selectinload
# from .model import OverhaulSchedule
# from .schema import OverhaulScheduleCreate, OverhaulScheduleUpdate
from .utils import get_google_creds, get_spreatsheed_service, process_spreadsheet_data
from .utils import fetch_all_sections, get_google_creds, get_spreatsheed_service, process_spreadsheet_data
# async def get_all(*, common):
# """Returns all documents."""
@ -53,20 +53,60 @@ from .utils import get_google_creds, get_spreatsheed_service, process_spreadshee
async def get_gantt_performance_chart(*, spreadsheet_id = "1gZXuwA97zU1v4QBv56wKeiqadc6skHUucGKYG8qVFRk"):
creds = get_google_creds()
RANGE_NAME = "'2024 kurva s'!N79:BJ83" # Or just "2024 schedule"
RANGE_NAME = "'SUMMARY'!K34:AZ38" # Or just "2024 schedule"
GANTT_DATA_NAME = "ACTUAL PROGRESS"
try:
service = get_spreatsheed_service(creds)
sheet = service.spreadsheets()
response = sheet.values().get(spreadsheetId=spreadsheet_id, range=RANGE_NAME).execute()
response = sheet.values().get(
spreadsheetId=spreadsheet_id,
range=RANGE_NAME
).execute()
values = response.get("values", [])
keys = ['day', 'time', 'plan', 'actual', 'gap']
transposed = list(zip(*values))
results = [dict(zip(keys, result)) for result in transposed]
if len(values) < 4:
raise Exception("Spreadsheet format invalid: need 4 rows (DAY, DATE, PLAN, ACTUAL).")
# Extract rows
day_row = values[0][1:]
date_row = values[1][1:]
plan_row = values[3][1:]
actual_row = values[4][1:]
total_days = len(day_row)
# PAD rows so lengths match day count
date_row += [""] * (total_days - len(date_row))
plan_row += [""] * (total_days - len(plan_row))
actual_row += [""] * (total_days - len(actual_row))
results = []
for i in range(total_days):
day = day_row[i]
date = date_row[i]
plan = plan_row[i]
actual = actual_row[i] if actual_row[i] else "0%" # <-- FIX HERE
results.append({
"day": day,
"date": date,
"plan": plan,
"actual": actual
})
except Exception as e:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=e)
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e))
processed_data = process_spreadsheet_data(results)
processed_data = process_spreadsheet_data(results)
gantt_data = fetch_all_sections(service=service, spreadsheet_id=spreadsheet_id, sheet_name=GANTT_DATA_NAME)
return processed_data
return processed_data, gantt_data

@ -22,44 +22,176 @@ def process_spreadsheet_data(rows):
processed_data = []
for row in rows:
processed_row = convert_spreadsheet_data(row)
processed_data.append(processed_row)
processed_data.append(processed_row) if processed_row else None
return processed_data
def convert_spreadsheet_data(data):
from datetime import datetime
from datetime import datetime
def convert_spreadsheet_data(data, default_year=None):
"""
Convert spreadsheet row into structured data.
Expected keys: day, date, plan, actual
"""
# Skip header or invalid rows
if not data.get("day") or not data["day"].isdigit():
return None
result = {}
# Convert day to integer
result['day'] = int(data['day'])
# Convert time to a datetime object
from datetime import datetime
# Assuming Indonesian format with month names
# Replace Indonesian month names with English if needed
month_mapping = {
'Januari': 'January', 'Februari': 'February', 'Maret': 'March',
'April': 'April', 'Mei': 'May', 'Juni': 'June',
'Juli': 'July', 'Agustus': 'August', 'September': 'September',
'Oktober': 'October', 'November': 'November', 'Desember': 'December'
}
# Convert day
result["day"] = int(data["day"])
# Determine default year
if default_year is None:
default_year = datetime.now().year
date_str = data.get("date", "").strip()
# ---------- DATE HANDLING ----------
# Accept formats like: "Nov 20", "Dec 3", "Jan 1"
parsed_date = None
if date_str:
try:
parsed_date = datetime.strptime(f"{date_str} {default_year}", "%b %d %Y")
except ValueError:
try:
parsed_date = datetime.strptime(f"{date_str} {default_year}", "%B %d %Y")
except:
parsed_date = None
# YEAR ROLLOVER (Dec → Jan next year)
if parsed_date and parsed_date.month == 1 and "Dec" in data.get("date", ""):
parsed_date = parsed_date.replace(year=default_year + 1)
result["date"] = parsed_date
# ---------- PERCENT HANDLING ----------
def parse_percent(value):
if not value:
return 0.0
v = value.strip().replace(",", ".").replace("%", "")
try:
return float(v) / 100.0
except:
return 0.0
time_str = data['time']
for indo, eng in month_mapping.items():
time_str = time_str.replace(indo, eng)
# Format: "Sabtu, Juli 13, 2024" -> "Saturday, July 13, 2024"
# Removing the day of week to simplify parsing
time_str = time_str.split(', ', 1)[1] # Remove "Sabtu, "
result['time'] = datetime.strptime(time_str, '%B %d, %Y')
# Convert percentage strings to floats
# Handling format like "0,12%" -> 0.12
for key in ['plan', 'actual', 'gap']:
# Replace comma with dot (European to US decimal notation)
value = data[key].replace(',', '.')
# Remove percentage sign
value = value.rstrip('%')
# Convert to float
result[key] = float(value) / 100 # Divide by 100 to get the actual decimal value
result["plan"] = parse_percent(data.get("plan", "0"))
result["actual"] = parse_percent(data.get("actual", "0"))
# Gap calculation
result["gap"] = result["actual"] - result["plan"]
return result
def fetch_all_sections(service, spreadsheet_id, sheet_name):
# Fetch a wide range including columns AL
result = service.spreadsheets().values().get(
spreadsheetId=spreadsheet_id,
range=f"{sheet_name}!A5:M5000"
).execute()
values = result.get("values", [])
if not values:
raise ValueError("No data found in sheet")
data = []
current_section = None
current_subsystem = None
for row in values:
# Pad missing columns to avoid index errors
row += [""] * (13 - len(row))
colA, colB, colC, colD, colE, colF, colG, colH, colI, colJ, colK, colL, colM = row
# Detect a SECTION — bold blue rows in Column C
if colC and not colD and not colE:
current_section = colC.strip()
current_subsystem = None
continue
# Detect a SUBSYSTEM — indented header in Column D
if colD and not colE:
current_subsystem = colD.strip()
continue
# Detect a TASK — Column E populated
if colE:
task = colE.strip()
pic = colF.strip()
start_date = indo_formatted_date(colG.strip())
finish_date = indo_formatted_date(colH.strip())
duration = colI.strip()
plan = colK.strip()
actual = colL.strip()
gap = colM.strip()
data.append({
"system": current_section,
"subsystem": current_subsystem,
"task": task,
"PIC": pic,
"start_date": start_date,
"end_date": finish_date,
"duration": int(duration),
"plan": plan,
"actual": actual,
"gap": gap
})
return data
def indo_formatted_date(date_str, base_year=2025):
"""
Convert short date like 'Nov 20', '30-Dec', 'Jan 1'
into: 'Rabu, November 20, 2025'
If month is January, year becomes 2026.
"""
# Month mappings
eng_to_indo_month = {
"Jan": "Januari", "Feb": "Februari", "Mar": "Maret", "Apr": "April",
"May": "Mei", "Jun": "Juni", "Jul": "Juli", "Aug": "Agustus",
"Sep": "September", "Oct": "Oktober", "Nov": "November", "Dec": "Desember"
}
indo_days = {
0: "Senin",
1: "Selasa",
2: "Rabu",
3: "Kamis",
4: "Jumat",
5: "Sabtu",
6: "Minggu"
}
# Normalize formats ("30-Dec" → "Dec 30")
if "-" in date_str:
d, m = date_str.split("-")
date_str = f"{m} {d}"
# Parse using English abbreviation
try:
dt = datetime.strptime(f"{date_str} {base_year}", "%b %d %Y")
except:
return None
# Handle year rollover (Jan -> next year)
if dt.month == 1:
dt = dt.replace(year=base_year + 1)
# Convert to Indonesian components
day_name = indo_days[dt.weekday()]
month_name = eng_to_indo_month[dt.strftime("%b")]
return f"{day_name}, {month_name} {dt.day}, {dt.year}"

@ -161,7 +161,7 @@ async def get_overview_overhaul(*, db_session: DbSession):
)
)
ongoing_result = await db_session.execute(ongoing_query.options(selectinload(OverhaulScope.maintenance_type)))
ongoing_overhaul = ongoing_result.first()
ongoing_overhaul = ongoing_result.scalar_one_or_none()
# 2. If no ongoing overhaul, get the closest scheduled overhaul
if ongoing_overhaul is None:
@ -202,6 +202,9 @@ async def get_overview_overhaul(*, db_session: DbSession):
)
results = await db_session.execute(equipments)
#Remaining days based on status
remaining_days = (selected_overhaul.start_date - current_date).days if selected_overhaul.status == "Upcoming" else (selected_overhaul.end_date - current_date).days
return {
"status": selected_overhaul.status,
@ -212,7 +215,7 @@ async def get_overview_overhaul(*, db_session: DbSession):
"end_date": selected_overhaul.end_date,
"duration_oh": selected_overhaul.duration_oh,
"crew_number": selected_overhaul.crew_number,
"remaining_days": (selected_overhaul.start_date - current_date).days,
"remaining_days": remaining_days,
"equipment_count": len(results.scalars().all()),
},
}

@ -33,3 +33,11 @@ class MasterSparepartProcurement(Base, DefaultMixin):
eta_requisition = Column(Date, nullable=False)
eta_ordered = Column(Date, nullable=True)
eta_received = Column(Date, nullable=True)
class SparepartRemark(Base, DefaultMixin):
__tablename__ = "oh_ms_sparepart_remark"
itemnum = Column(String, nullable=False)
remark = Column(String, nullable=False)

@ -4,17 +4,18 @@ from src.database.core import CollectorDbSession
from src.database.service import (CommonParameters, DbSession,
search_filter_sort_paginate)
from src.models import StandardResponse
from src.sparepart.schema import SparepartRemark
from .service import get_all
from .service import create_remark, get_spareparts_paginated
router = APIRouter()
@router.get("", response_model=StandardResponse[list])
async def get_sparepart(collector_db_session:CollectorDbSession):
async def get_sparepart(collector_db_session:CollectorDbSession, db_session: DbSession):
"""Get all scope activity pagination."""
# return
data = await get_all(collector_db_session)
data = await get_spareparts_paginated(db_session=db_session, collector_db_session=collector_db_session)
@ -22,6 +23,17 @@ async def get_sparepart(collector_db_session:CollectorDbSession):
data=data,
message="Data retrieved successfully",
)
@router.post("", response_model=StandardResponse[SparepartRemark])
async def create_remark_route(collector_db_session:CollectorDbSession, db_session: DbSession, remark_in:SparepartRemark):
sparepart_remark = await create_remark(db_session=db_session, collector_db_session=collector_db_session, remark_in=remark_in)
return StandardResponse(
data=sparepart_remark,
message="Remark Created successfully"
)
# @router.post("", response_model=StandardResponse[ActivityMasterCreate])

@ -1,4 +1,6 @@
from datetime import datetime
from dataclasses import dataclass
from datetime import date, datetime
from enum import Enum
from typing import Any, Dict, List, Optional
from uuid import UUID
@ -35,41 +37,48 @@ class ActivityMasterRead(ActivityMaster):
class ActivityMasterPagination(Pagination):
items: List[ActivityMasterRead] = []
# {
# "overview": {
# "totalEquipment": 30,
# "nextSchedule": {
# "date": "2025-01-12",
# "Overhaul": "B",
# "equipmentCount": 30
# }
# },
# "criticalParts": [
# "Boiler feed pump",
# "Boiler reheater system",
# "Drum Level (Right) Root Valve A",
# "BCP A Discharge Valve",
# "BFPT A EXH Press HI Root VLV"
# ],
# "schedules": [
# {
# "date": "2025-01-12",
# "Overhaul": "B",
# "status": "upcoming"
# }
# // ... other scheduled overhauls
# ],
# "systemComponents": {
# "boiler": {
# "status": "operational",
# "lastOverhaul": "2024-06-15"
# },
# "turbine": {
# "hpt": { "status": "operational" },
# "ipt": { "status": "operational" },
# "lpt": { "status": "operational" }
# }
# // ... other major components
# }
# }
class ProcurementStatus(Enum):
PLANNED = "planned"
ORDERED = "ordered"
RECEIVED = "received"
CANCELLED = "cancelled"
@dataclass
class SparepartRequirement:
"""Sparepart requirement for equipment overhaul"""
sparepart_id: str
quantity_required: int
lead_time: int
sparepart_name: str
unit_cost: float
avg_cost: float
remark:str
@dataclass
class SparepartStock:
"""Current sparepart stock information"""
sparepart_id: str
sparepart_name: str
current_stock: int
unit_cost: float
location: str
remark:str
@dataclass
class ProcurementRecord:
"""Purchase Order/Purchase Request record"""
po_pr_id: str
sparepart_id: str
sparepart_name: str
quantity: int
unit_cost: float
total_cost: float
order_date: date
expected_delivery_date: date
status: ProcurementStatus
po_vendor_delivery_date: date
class SparepartRemark(DefultBase):
itemnum: str
remark:str

@ -9,147 +9,402 @@ import numpy as np
from dataclasses import dataclass
from enum import Enum
from sqlalchemy import Delete, Select, text
from sqlalchemy import Delete, Select, select, text
from sqlalchemy.orm import joinedload, selectinload
from src.auth.service import CurrentUser
from src.database.core import DbSession
from src.database.service import CommonParameters, search_filter_sort_paginate
from src.logging import setup_logging
from src.overhaul_scope.service import get as get_scope
from src.overhaul_activity.service import get_standard_scope_by_session_id
from src.overhaul_scope.service import get as get_scope, get_overview_overhaul
from src.overhaul_scope.service import get_prev_oh
from src.sparepart.model import SparepartRemark
from src.sparepart.schema import ProcurementRecord, ProcurementStatus, SparepartRequirement, SparepartStock
log = logging.getLogger(__name__)
setup_logging(logger=log)
async def get_all(db_session: DbSession):
from sqlalchemy import text
import math
from sqlalchemy import text
# async def get_spareparts_paginated(
# *,
# db_session,
# collector_db_session,
# ):
# """
# Get spare parts for work orders under specific parent WO(s),
# including inventory and PR/PO data.
# """
# # Normalize parent_num to array for SQL ANY()
# # parent_nums = parent_num if isinstance(parent_num, (list, tuple)) else [parent_num]
# parent_nums = []
# data_query = text("""
# WITH selected_wo AS (
# SELECT
# wonum,
# xx_parent,
# location_tag,
# assetnum,
# siteid,
# reportdate
# FROM public.wo_maxim
# WHERE xx_parent = ANY(:parent_nums)
# ),
# wo_materials AS (
# SELECT
# wm.wonum,
# wm.itemnum,
# wm.itemqty,
# wm.inv_itemnum,
# wm.inv_location,
# wm.inv_curbaltotal,
# wm.inv_avgcost,
# sw.location_tag
# FROM public.wo_maxim_material wm
# JOIN selected_wo sw ON wm.wonum = sw.wonum
# ),
# -- PR Lines
# pr_lines AS (
# SELECT
# pl.item_num,
# h.num AS pr_number,
# h.issue_date AS pr_issue_date,
# h.status AS pr_status,
# pl.qty_ordered AS pr_qty_ordered,
# pl.qty_requested AS pr_qty_requested
# FROM public.maximo_sparepart_pr_po h
# JOIN public.maximo_sparepart_pr_po_line pl
# ON h.num = pl.num
# WHERE h.type = 'PR'
# AND EXTRACT(YEAR FROM h.issue_date) >= 2019
# ),
# -- PO Lines
# po_lines AS (
# SELECT
# pl.item_num,
# h.num AS po_number,
# h.estimated_arrival_date AS po_estimated_arrival_date,
# h.vendeliverydate AS po_vendeliverydate,
# h.receipts AS po_receipt,
# h.status AS po_status,
# pl.qty_ordered AS po_qty_ordered,
# pl.qty_received AS po_qty_received
# FROM public.maximo_sparepart_pr_po h
# JOIN public.maximo_sparepart_pr_po_line pl
# ON h.num = pl.num
# WHERE h.type = 'PO'
# AND (h.receipts = 'NONE')
# AND (h.status IS NOT NULL)
# ),
# -- Item Descriptions
# item_descriptions AS (
# SELECT DISTINCT
# item_num,
# FIRST_VALUE(description) OVER (
# PARTITION BY item_num
# ORDER BY created_at DESC NULLS LAST
# ) AS description
# FROM public.maximo_sparepart_pr_po_line
# WHERE description IS NOT NULL
# ),
# -- Unified PR/PO data
# pr_po_unified AS (
# SELECT
# pr.item_num,
# pr.pr_number,
# pr.pr_issue_date,
# pr.pr_qty_ordered,
# pr.pr_status,
# po.po_number,
# COALESCE(po.po_qty_ordered, 0) AS po_qty_ordered,
# COALESCE(po.po_qty_received, 0) AS po_qty_received,
# po.po_estimated_arrival_date,
# po.po_vendeliverydate,
# po.po_receipt,
# po.po_status,
# CASE WHEN po.po_number IS NOT NULL THEN 'YES' ELSE 'NO' END AS po_exists
# FROM pr_lines pr
# LEFT JOIN po_lines po
# ON pr.item_num = po.item_num
# AND pr.pr_number = po.po_number
# ),
# -- Aggregate PR/PO info
# pr_po_agg AS (
# SELECT
# item_num,
# SUM(COALESCE(pr_qty_ordered, 0)) AS total_pr_qty,
# SUM(COALESCE(po_qty_ordered, 0)) AS total_po_qty,
# SUM(COALESCE(po_qty_received, 0)) AS total_po_received,
# JSON_AGG(
# JSON_BUILD_OBJECT(
# 'pr_number', pr_number,
# 'pr_issue_date', pr_issue_date,
# 'pr_qty_requested', pr_qty_ordered,
# 'pr_status', pr_status,
# 'po_exists', po_exists,
# 'po_qty_ordered', po_qty_ordered,
# 'po_qty_received', po_qty_received,
# 'po_estimated_arrival_date', po_estimated_arrival_date,
# 'po_vendeliverydate', po_vendeliverydate,
# 'po_receipt', po_receipt,
# 'po_status', po_status
# )
# ORDER BY pr_issue_date DESC
# ) AS pr_po_details
# FROM pr_po_unified
# GROUP BY item_num
# )
# SELECT
# wm.itemnum,
# COALESCE(id.description, 'No description available') AS item_description,
# SUM(wm.itemqty) AS total_required_for_oh,
# COALESCE(MAX(wm.inv_curbaltotal), 0) AS current_balance_total,
# COALESCE(ap.total_pr_qty, 0) AS total_pr_qty,
# COALESCE(ap.total_po_qty, 0) AS total_po_qty,
# COALESCE(ap.total_po_received, 0) AS total_po_received,
# ap.pr_po_details
# FROM wo_materials wm
# LEFT JOIN item_descriptions id
# ON wm.itemnum = id.item_num
# LEFT JOIN pr_po_agg ap
# ON wm.itemnum = ap.item_num
# GROUP BY
# wm.itemnum, id.description,
# ap.total_pr_qty, ap.total_po_qty, ap.total_po_received, ap.pr_po_details
# ORDER BY wm.itemnum;
# """)
# rows = await collector_db_session.execute(data_query, {"parent_nums": parent_nums})
# spare_parts = []
# for row in rows:
# spare_parts.append({
# "item_num": row.itemnum,
# "description": row.item_description,
# "current_balance_total": float(row.current_balance_total or 0.0),
# "total_required_for_oh": float(row.total_required_for_oh or 0.0),
# "total_pr_qty": row.total_pr_qty,
# "total_po_qty": row.total_po_qty,
# "total_po_received": row.total_po_received,
# "pr_po_details": row.pr_po_details,
# })
# return spare_parts
async def get_spareparts_paginated(*, db_session, collector_db_session):
"""
Get all spare parts with their latest PR and PO information.
Get paginated spare parts with usage, inventory, and PR/PO information.
Uses two queries: one for data, one for total count.
Args:
db_session: SQLAlchemy database session
assetnum: Optional asset number filter (not used in this query but kept for compatibility)
Returns:
List of dictionaries containing spare part information
page (int): Page number (1-based)
items_per_page (int): Number of items per page
"""
# Define the SQL query
query = text("""
WITH latest_prs AS (
SELECT DISTINCT ON (pl.item_num)
pl.item_num,
h.num as pr_number,
h.issue_date as pr_issue_date,
h.status as pr_status,
pl.qty_ordered as pr_qty_ordered,
pl.description,
pl.unit_cost,
pl.line_cost
FROM public.maximo_sparepart_pr_po h
JOIN public.maximo_sparepart_pr_po_line pl ON h.num = pl.num
WHERE h.type = 'PR'
AND h.issue_date IS NOT NULL
AND h.num LIKE 'K%'
ORDER BY pl.item_num, h.issue_date DESC
)
SELECT DISTINCT ON (pr.item_num)
pr.item_num,
pr.line_cost,
pr.unit_cost,
pr.description,
COALESCE(i.curbaltotal, 0) as current_balance_total,
pr.pr_number,
pr.pr_issue_date,
pr.pr_qty_ordered,
CASE
WHEN po.po_number IS NOT NULL THEN 'YES'
ELSE 'NO'
END as po_exists,
COALESCE(po.qty_received, 0) as po_qty_received,
COALESCE(po.qty_ordered, 0) as po_qty_ordered,
po.estimated_arrival_date as po_estimated_arrival_date
FROM latest_prs pr
LEFT JOIN public.maximo_inventory i ON pr.item_num = i.itemnum
LEFT JOIN LATERAL (
# calculate limit/offset
# limit = items_per_page
# offset = (page - 1) * items_per_page
# wo_materials AS (
# SELECT
# wm.wonum,
# wm.itemnum,
# wm.itemqty,
# wm.inv_itemnum,
# wm.inv_location,
# wm.inv_curbaltotal,
# wm.inv_avgcost,
# sw.asset_location as location_tag
# FROM public.wo_maxim_material wm
# JOIN oh_workorders sw ON wm.wonum = sw.wonum
# ),
# -----------------------------
# Query #1: Fetch paginated rows
# -----------------------------
data_query = text("""
WITH oh_workorders AS (
SELECT DISTINCT wonum, asset_location, asset_unit
FROM public.wo_maximo ma
WHERE ma.xx_parent IN ('155026', '155027', '155029', '155030')
),
wo_materials AS (
SELECT
wm.wonum,
wm.itemnum,
wm.itemqty,
wm.inv_location AS inv_location,
wm.inv_curbaltotal AS inv_curbaltotal,
wm.inv_avgcost AS inv_avgcost,
sw.asset_location as location_tag
FROM public.wo_maximo_material wm
JOIN oh_workorders sw ON wm.wonum = sw.wonum
),
location_sparepart_stats AS (
SELECT location_tag, itemnum,
COUNT(DISTINCT wonum) as total_wo_count,
SUM(itemqty) as total_qty_used,
AVG(itemqty) as avg_qty_per_wo,
MIN(itemqty) as min_qty_used,
MAX(itemqty) as max_qty_used
FROM wo_materials
GROUP BY location_tag, itemnum
HAVING SUM(itemqty) > 0
),
pr_lines AS (
SELECT
pl.item_num,
h.num as pr_number,
h.issue_date as pr_issue_date,
h.status as pr_status,
pl.qty_ordered as pr_qty_ordered,
pl.qty_requested as pr_qty_requested
FROM public.maximo_sparepart_pr_po h
JOIN public.maximo_sparepart_pr_po_line pl ON h.num = pl.num
WHERE h.type = 'PR' AND EXTRACT(YEAR FROM h.issue_date) >= 2023
),
item_descriptions AS (
SELECT DISTINCT
item_num,
FIRST_VALUE(description) OVER (
PARTITION BY item_num
ORDER BY created_at DESC NULLS LAST
) as description
FROM public.maximo_sparepart_pr_po_line
WHERE description IS NOT NULL
),
po_lines AS (
SELECT
pl.item_num,
h.num as po_number,
pl.qty_received,
pl.qty_ordered,
h.estimated_arrival_date
h.estimated_arrival_date as po_estimated_arrival_date,
h.vendeliverydate as po_vendeliverydate,
h.receipts as po_receipt,
h.status as po_status,
pl.qty_ordered as po_qty_ordered,
pl.qty_received as po_qty_received
FROM public.maximo_sparepart_pr_po h
JOIN public.maximo_sparepart_pr_po_line pl ON h.num = pl.num
WHERE h.type = 'PO'
AND h.num = pr.pr_number
AND pl.item_num = pr.item_num
LIMIT 1
) po ON true
ORDER BY pr.item_num;
AND (h.receipts = 'NONE')
AND (h.status IS NOT NULL)
),
pr_po_unified AS (
SELECT
pr.item_num,
pr.pr_number,
pr.pr_issue_date,
pr.pr_qty_ordered,
pr.pr_status,
po.po_number,
COALESCE(po.po_qty_ordered,0) as po_qty_ordered,
COALESCE(po.po_qty_received,0) as po_qty_received,
po.po_estimated_arrival_date,
po.po_vendeliverydate,
po.po_receipt,
po.po_status,
CASE WHEN po.po_number IS NOT NULL THEN 'YES' ELSE 'NO' END as po_exists
FROM pr_lines pr
LEFT JOIN po_lines po
ON pr.item_num = po.item_num
AND pr.pr_number = po.po_number
),
pr_po_agg AS (
SELECT
item_num,
SUM(COALESCE(pr_qty_ordered,0)) as total_pr_qty,
SUM(COALESCE(po_qty_ordered,0)) as total_po_qty,
SUM(COALESCE(po_qty_received,0)) as total_po_received,
JSON_AGG(
JSON_BUILD_OBJECT(
'pr_number', pr_number,
'pr_issue_date', pr_issue_date,
'pr_qty_requested', pr_qty_ordered,
'pr_status', pr_status,
'po_exists', po_exists,
'po_qty_ordered', po_qty_ordered,
'po_qty_received', po_qty_received,
'po_estimated_arrival_date', po_estimated_arrival_date,
'po_vendeliverydate', po_vendeliverydate,
'po_receipt', po_receipt,
'po_status', po_status
) ORDER BY pr_issue_date DESC
) as pr_po_details
FROM pr_po_unified
GROUP BY item_num
),
inv_summary AS (
SELECT
itemnum,
MAX(inv_curbaltotal) AS total_curbaltotal,
AVG(inv_avgcost) AS avg_cost
FROM wo_materials
GROUP BY itemnum
)
SELECT
lss.itemnum,
COALESCE(id.description, 'No description available') as item_description,
lss.total_wo_count,
lss.total_qty_used,
ROUND(CAST(lss.avg_qty_per_wo AS NUMERIC), 2) as avg_qty_per_wo,
lss.min_qty_used,
lss.max_qty_used,
COALESCE(i.total_curbaltotal,0) as current_balance_total,
COALESCE(ap.total_pr_qty,0) as total_pr_qty,
COALESCE(ap.total_po_qty,0) as total_po_qty,
COALESCE(ap.total_po_received,0) as total_po_received,
ap.pr_po_details
FROM location_sparepart_stats lss
LEFT JOIN item_descriptions id ON lss.itemnum = id.item_num
LEFT JOIN inv_summary i ON lss.itemnum = i.itemnum
LEFT JOIN pr_po_agg ap ON lss.itemnum = ap.item_num
ORDER BY lss.location_tag, lss.itemnum;
""")
# Execute the query
result = await db_session.execute(query)
overhaul = await get_overview_overhaul(db_session=db_session)
# Fetch all results and convert to list of dictionaries
standard_overhaul = await get_standard_scope_by_session_id(db_session=db_session, collector_db=collector_db_session, overhaul_session_id=overhaul['overhaul']['id'])
asset_locations = [eq.location_tag for eq in standard_overhaul]
rows = await collector_db_session.execute(
data_query,
{"asset_locations": asset_locations}
)
sparepart_remark = (await db_session.execute(
select(SparepartRemark)
)).scalars().all()
sparepart_remark_dict = {item.itemnum: item.remark for item in sparepart_remark}
spare_parts = []
for row in result:
for row in rows:
spare_parts.append({
"item_num": row.item_num,
"description": row.description,
"line_cost": row.line_cost,
"unit_cost": row.unit_cost,
"current_balance_total": float(row.current_balance_total) if row.current_balance_total is not None else 0.0,
"pr_number": row.pr_number,
"pr_issue_date": row.pr_issue_date,
"pr_qty_ordered": float(row.pr_qty_ordered) if row.pr_qty_ordered is not None else 0.0,
"po_exists": row.po_exists,
"po_qty_received": float(row.po_qty_received) if row.po_qty_received is not None else 0.0,
"po_qty_ordered": float(row.po_qty_ordered) if row.po_qty_ordered is not None else 0.0,
"po_estimated_arrival_date": row.po_estimated_arrival_date
"item_num": row.itemnum,
"description": row.item_description,
"remark": sparepart_remark_dict.get(row.itemnum, ""),
"current_balance_total": float(row.current_balance_total) if row.current_balance_total else 0.0,
"total_required_for_oh": float(row.avg_qty_per_wo),
"total_pr_qty": row.total_pr_qty,
"total_po_qty": row.total_po_qty,
"total_po_received": row.total_po_received,
"pr_po_details": row.pr_po_details
})
return spare_parts
class ProcurementStatus(Enum):
PLANNED = "planned"
ORDERED = "ordered"
RECEIVED = "received"
CANCELLED = "cancelled"
@dataclass
class SparepartRequirement:
"""Sparepart requirement for equipment overhaul"""
sparepart_id: str
quantity_required: int
lead_time: int
sparepart_name: str
unit_cost: float
@dataclass
class SparepartStock:
"""Current sparepart stock information"""
sparepart_id: str
sparepart_name: str
current_stock: int
unit_cost: float
location: str
@dataclass
class ProcurementRecord:
"""Purchase Order/Purchase Request record"""
po_pr_id: str
sparepart_id: str
sparepart_name: str
quantity: int
unit_cost: float
total_cost: float
order_date: date
expected_delivery_date: date
status: ProcurementStatus
po_vendor_delivery_date: date
class SparepartManager:
"""Manages sparepart availability and procurement for overhaul optimization"""
@ -279,7 +534,8 @@ class SparepartManager:
sparepart_id = requirement.sparepart_id
needed_quantity = requirement.quantity_required
sparepart_name = requirement.sparepart_name
unit_cost = requirement.unit_cost
sparepart_remark= requirement.remark
unit_cost = requirement.avg_cost if requirement.avg_cost > 0 else requirement.unit_cost
current_stock = adjusted_stocks.get(sparepart_id, 0)
@ -307,7 +563,8 @@ class SparepartManager:
'status': order.status.value,
'months_until_delivery': self._calculate_months_until_delivery(order.expected_delivery_date, target_month),
'is_on_time': self._is_delivery_on_time(order.expected_delivery_date, target_month),
'usage': 'covers_requirement'
'usage': 'covers_requirement',
'remark': sparepart_remark
}
pr_po_summary['existing_orders'].append(order_info)
pr_po_summary['total_existing_value'] += order.total_cost
@ -319,6 +576,7 @@ class SparepartManager:
missing_parts.append({
'sparepart_id': sparepart_id,
'sparepart_name': sparepart_name,
'remark': sparepart_remark,
'required': needed_quantity,
'current_stock': current_stock,
'ordered_quantity': total_ordered_quantity,
@ -350,6 +608,7 @@ class SparepartManager:
new_order = {
'sparepart_id': sparepart_id,
'sparepart_name': sparepart_name,
'remark': sparepart_remark,
'quantity_needed': shortage,
'unit_cost': unit_cost,
'total_cost': procurement_cost,
@ -656,7 +915,7 @@ class SparepartManager:
# Integration functions for database operations
async def load_sparepart_data_from_db(scope, prev_oh_scope, db_session) -> SparepartManager:
async def load_sparepart_data_from_db(scope, prev_oh_scope, db_session, app_db_session, analysis_window_months = None) -> SparepartManager:
"""Load sparepart data from database"""
# You'll need to implement these queries based on your database schema
# Get scope dates for analysis window
@ -664,31 +923,40 @@ async def load_sparepart_data_from_db(scope, prev_oh_scope, db_session) -> Spare
# prev_oh_scope = await get_prev_oh(db_session=db_session, overhaul_session=scope)
analysis_start_date = prev_oh_scope.end_date
analysis_window_months = int(((scope.start_date - prev_oh_scope.end_date).days / 30) * 1.5)
analysis_window_months = int(((scope.start_date - prev_oh_scope.end_date).days / 30) * 1.2) if not analysis_window_months else analysis_window_months
sparepart_manager = SparepartManager(analysis_start_date, analysis_window_months)
start_date = prev_oh_scope.end_date
end_date = scope.start_date
# Load sparepart stocks
# Example query - adjust based on your schema
query = text("""
SELECT
mi.id,
mi.itemnum,
mi.itemsetid,
mi."location",
mi.curbaltotal,
mi.avgcost,
mspl.description
FROM public.maximo_inventory mi
LEFT JOIN public.maximo_sparepart_pr_po_line mspl
ON mi.itemnum = mspl.item_num
""")
query = text("""SELECT
wm.inv_itemnum AS itemnum,
wm.inv_itemsetid AS itemsetid,
wm.inv_location AS location,
MAX(wm.inv_curbaltotal) AS curbaltotal,
AVG(wm.inv_avgcost) AS avgcost,
COALESCE(mspl.description, 'No description available') AS description
FROM public.wo_maximo_material wm
LEFT JOIN public.maximo_sparepart_pr_po_line mspl
ON wm.inv_itemnum = mspl.item_num
WHERE wm.inv_itemnum IS NOT NULL
GROUP BY wm.inv_itemnum, wm.inv_itemsetid, wm.inv_location, mspl.description
""")
log.info("Fetch sparepart")
sparepart_stocks_query = await db_session.execute(query)
sparepart_remark = (await app_db_session.execute(
select(SparepartRemark)
)).scalars().all()
sparepart_remark_dict = {item.itemnum: item.remark for item in sparepart_remark}
for stock_record in sparepart_stocks_query:
stock = SparepartStock(
sparepart_id=stock_record.itemnum,
remark=sparepart_remark_dict.get(stock_record.itemnum),
sparepart_name=stock_record.description,
current_stock=stock_record.curbaltotal,
unit_cost=stock_record.avgcost,
@ -696,27 +964,186 @@ async def load_sparepart_data_from_db(scope, prev_oh_scope, db_session) -> Spare
)
sparepart_manager.add_sparepart_stock(stock)
# Load equipment sparepart requirements
# parent_nums = []
# query = text("""
# WITH target_wo AS (
# -- Work orders from the given parent(s)
# SELECT
# wonum,
# xx_parent,
# location_tag AS asset_location
# FROM public.wo_maxim
# WHERE xx_parent = ANY(:parent_nums)
# ),
# target_materials AS (
# -- Materials directly linked to target WOs (new requirement data)
# SELECT
# tw.asset_location,
# wm.itemnum,
# wm.inv_avgcost
# SUM(wm.itemqty) AS total_qty_required
# FROM public.wo_maxim_material wm
# JOIN target_wo tw ON wm.wonum = tw.wonum
# WHERE wm.itemnum IS NOT NULL
# GROUP BY tw.asset_location, wm.itemnum
# ),
# -- Historical OH work orders (for lead time reference)
# oh_workorders AS (
# SELECT DISTINCT
# wonum,
# asset_location
# FROM public.wo_staging_maximo_2
# WHERE worktype = 'OH'
# AND asset_location IS NOT NULL
# AND asset_unit IN ('3', '00')
# ),
# sparepart_usage AS (
# SELECT
# oh.asset_location,
# mwm.itemnum,
# mwm.itemqty,
# mwm.wonum
# FROM oh_workorders oh
# INNER JOIN public.wo_maxim_material mwm
# ON oh.wonum = mwm.wonum
# ),
# location_sparepart_stats AS (
# SELECT
# asset_location,
# itemnum,
# COUNT(DISTINCT wonum) as total_wo_count,
# SUM(itemqty) as total_qty_used,
# AVG(itemqty) as avg_qty_per_wo
# FROM sparepart_usage
# GROUP BY asset_location, itemnum
# ),
# pr_po_combined AS (
# SELECT
# mspl.item_num,
# mspl.num,
# mspl.unit_cost,
# mspl.qty_ordered,
# MAX(CASE WHEN mspo.type = 'PR' THEN mspo.issue_date END) as issue_date,
# MAX(CASE WHEN mspo.type = 'PO' THEN mspo.vendeliverydate END) as vendeliverydate,
# MAX(CASE WHEN mspo.type = 'PO' THEN mspo.estimated_arrival_date END) as estimated_arrival_date
# FROM public.maximo_sparepart_pr_po_line mspl
# INNER JOIN public.maximo_sparepart_pr_po mspo
# ON mspl.num = mspo.num
# WHERE mspo.type IN ('PR', 'PO')
# GROUP BY mspl.item_num, mspl.num, mspl.unit_cost, mspl.qty_ordered
# ),
# leadtime_stats AS (
# SELECT
# item_num,
# ROUND(CAST(AVG(
# EXTRACT(EPOCH FROM (
# COALESCE(vendeliverydate, estimated_arrival_date) - issue_date
# )) / 86400 / 30.44
# ) AS NUMERIC), 1) as avg_leadtime_months,
# ROUND(CAST(MIN(
# EXTRACT(EPOCH FROM (
# COALESCE(vendeliverydate, estimated_arrival_date) - issue_date
# )) / 86400 / 30.44
# ) AS NUMERIC), 1) as min_leadtime_months,
# ROUND(CAST(MAX(
# EXTRACT(EPOCH FROM (
# COALESCE(vendeliverydate, estimated_arrival_date) - issue_date
# )) / 86400 / 30.44
# ) AS NUMERIC), 1) as max_leadtime_months,
# COUNT(*) as leadtime_sample_size,
# COUNT(CASE WHEN vendeliverydate IS NOT NULL THEN 1 END) as vendelivery_count,
# COUNT(CASE WHEN vendeliverydate IS NULL AND estimated_arrival_date IS NOT NULL THEN 1 END) as estimated_only_count
# FROM pr_po_combined
# WHERE issue_date IS NOT NULL
# AND COALESCE(vendeliverydate, estimated_arrival_date) IS NOT NULL
# AND COALESCE(vendeliverydate, estimated_arrival_date) > issue_date
# GROUP BY item_num
# ),
# cost_stats AS (
# SELECT
# item_num,
# ROUND(CAST(AVG(unit_cost) AS NUMERIC), 2) as avg_unit_cost,
# ROUND(CAST(MIN(unit_cost) AS NUMERIC), 2) as min_unit_cost,
# ROUND(CAST(MAX(unit_cost) AS NUMERIC), 2) as max_unit_cost,
# COUNT(*) as cost_sample_size,
# ROUND(CAST(AVG(unit_cost * qty_ordered) AS NUMERIC), 2) as avg_order_value,
# ROUND(CAST(SUM(unit_cost * qty_ordered) AS NUMERIC), 2) as total_value_ordered
# FROM pr_po_combined
# WHERE unit_cost IS NOT NULL AND unit_cost > 0
# GROUP BY item_num
# ),
# item_descriptions AS (
# SELECT DISTINCT
# item_num,
# FIRST_VALUE(description) OVER (
# PARTITION BY item_num
# ORDER BY created_at DESC NULLS LAST
# ) as description
# FROM public.maximo_sparepart_pr_po_line
# WHERE description IS NOT NULL
# )
# SELECT
# tr.asset_location,
# tr.itemnum,
# COALESCE(id.description, 'No description available') as item_description,
# tr.total_qty_required AS total_required_for_oh,
# tr.inv_avgcost,
# COALESCE(lt.avg_leadtime_months, 0) as avg_leadtime_months,
# COALESCE(cs.avg_unit_cost, 0) as avg_unit_cost,
# ROUND(CAST(COALESCE(tr.total_qty_required * cs.avg_unit_cost, 0) AS NUMERIC), 2) as estimated_cost_for_oh
# FROM target_materials tr
# LEFT JOIN item_descriptions id ON tr.itemnum = id.item_num
# LEFT JOIN leadtime_stats lt ON tr.itemnum = lt.item_num
# LEFT JOIN cost_stats cs ON tr.itemnum = cs.item_num
# ORDER BY tr.asset_location, tr.itemnum;
# """)
# equipment_requirements_query = await db_session.execute(query, {"parent_nums": parent_nums})
# equipment_requirements = defaultdict(list)
# for req_record in equipment_requirements_query:
# requirement = SparepartRequirement(
# sparepart_id=req_record.itemnum,
# quantity_required=float(req_record.total_required_for_oh or 0.0),
# lead_time=float(req_record.avg_leadtime_months or 0.0),
# sparepart_name=req_record.item_description,
# unit_cost=float(req_record.avg_unit_cost or 0.0),
# avg_cost=float(req_record.inv_avgcost or 0.0),
# )
# equipment_requirements[req_record.asset_location].append(requirement)
# for equipment_tag, requirements in equipment_requirements.items():
# sparepart_manager.add_equipment_requirements(equipment_tag, requirements)
# Load equipment sparepart requirements
# You'll need to create this table/relationship
query = text("""WITH oh_workorders AS (
-- First, get all OH work orders
SELECT DISTINCT
wonum,
asset_location
FROM public.wo_staging_maximo_2
WHERE worktype = 'OH' AND asset_location IS NOT NULL
),
sparepart_usage AS (
-- Get sparepart usage for OH work orders
SELECT
oh.asset_location,
mwm.itemnum,
mwm.itemqty,
mwm.wonum
FROM oh_workorders oh
INNER JOIN public.maximo_workorder_materials mwm
ON oh.wonum = mwm.wonum
),
-- First, get all OH work orders
SELECT DISTINCT
wonum,
asset_location
FROM public.wo_maximo ma
WHERE worktype = 'OH' AND asset_location IS NOT NULL and asset_unit IN ('3', '00') AND EXTRACT(YEAR FROM reportdate) >= 2019
),
current_oh as (
SELECT DISTINCT wonum, asset_location, asset_unit
FROM public.wo_maximo ma
WHERE ma.xx_parent IN ('155026', '155027', '155029', '155030')
),
sparepart_usage AS (
SELECT
oh.asset_location,
mwm.itemnum,
mwm.itemqty,
mwm.wonum,
mwm.inv_avgcost
FROM current_oh oh
INNER JOIN public.wo_maximo_material mwm
ON oh.wonum = mwm.wonum
),
location_sparepart_stats AS (
-- Calculate average usage per sparepart per location
SELECT
@ -801,6 +1228,12 @@ item_descriptions AS (
) as description
FROM public.maximo_sparepart_pr_po_line
WHERE description IS NOT NULL
),
item_inventory as (
SELECT
itemnum,
avgcost
FROM public.maximo_inventory
)
SELECT
lss.asset_location,
@ -811,6 +1244,7 @@ SELECT
ROUND(CAST(lss.avg_qty_per_wo AS NUMERIC), 2) as avg_qty_per_wo,
lss.min_qty_used,
lss.max_qty_used,
iin.inv_avgcost,
-- Lead time metrics
COALESCE(lt.avg_leadtime_months, 0) as avg_leadtime_months,
COALESCE(lt.min_leadtime_months, 0) as min_leadtime_months,
@ -831,6 +1265,7 @@ FROM location_sparepart_stats lss
LEFT JOIN item_descriptions id ON lss.itemnum = id.item_num
LEFT JOIN leadtime_stats lt ON lss.itemnum = lt.item_num
LEFT JOIN cost_stats cs ON lss.itemnum = cs.item_num
LEFT JOIN sparepart_usage iin ON lss.itemnum = iin.itemnum
ORDER BY lss.asset_location, lss.itemnum;""")
equipment_requirements_query = await db_session.execute(query)
@ -842,7 +1277,9 @@ ORDER BY lss.asset_location, lss.itemnum;""")
quantity_required=float(req_record.avg_qty_per_wo),
lead_time=float(req_record.avg_leadtime_months),
sparepart_name=req_record.item_description,
unit_cost=float(req_record.avg_unit_cost)
unit_cost=float(req_record.avg_unit_cost),
avg_cost=float(req_record.inv_avgcost or 0),
remark=sparepart_remark_dict.get(req_record.itemnum, "")
)
equipment_requirements[req_record.asset_location].append(requirement)
@ -854,54 +1291,65 @@ ORDER BY lss.asset_location, lss.itemnum;""")
# Load procurement records (PO/PR)
query = text("""
WITH active_pos AS (
-- Get all POs that are NOT complete (not in inventory yet) and NOT closed
SELECT
pl.item_num,
h.num as po_number,
pl.qty_received,
pl.qty_ordered,
h.estimated_arrival_date,
h.vendeliverydate,
h.receipts as po_receipts,
h.status as po_status,
pl.description,
pl.unit_cost,
pl.line_cost
FROM public.maximo_sparepart_pr_po h
JOIN public.maximo_sparepart_pr_po_line pl ON h.num = pl.num
WHERE h.type = 'PO'
-- Exclude POs where receipts = 'COMPLETE'
AND (h.receipts IS NULL OR h.receipts != 'COMPLETE')
-- Exclude closed POs
AND (h.status IS NULL OR h.status != 'CLOSE')
),
po_with_pr_date AS (
-- Join with PR to get the issue_date
SELECT
po.*,
pr.issue_date as pr_issue_date
FROM active_pos po
LEFT JOIN public.maximo_sparepart_pr_po pr
ON pr.num = po.po_number
AND pr.type = 'PR'
)
-- Get all POs that are NOT complete (not in inventory yet) and NOT closed
SELECT
pl.item_num,
h.num as po_number,
pl.qty_received,
pl.qty_ordered,
h.estimated_arrival_date,
h.vendeliverydate,
h.receipts as po_receipts,
h.status as po_status,
pl.description,
pl.unit_cost,
pl.line_cost
FROM public.maximo_sparepart_pr_po h
JOIN public.maximo_sparepart_pr_po_line pl
ON h.num = pl.num
WHERE h.type = 'PO'
-- Exclude POs where receipts = 'COMPLETE'
AND (h.receipts IS NULL OR h.receipts != 'COMPLETE')
-- Exclude closed POs
AND (h.status IS NULL OR h.status = 'APPR')
),
po_with_pr_date AS (
-- Force join with PR to ensure every PO has a PR
SELECT
po.*,
pr.issue_date as pr_issue_date
FROM active_pos po
INNER JOIN public.maximo_sparepart_pr_po pr
ON pr.num = po.po_number
AND pr.type = 'PR'
),
item_inventory AS (
SELECT
po.item_num,
po.description,
po.line_cost,
po.unit_cost,
COALESCE(i.curbaltotal, 0) as current_balance_total,
po.po_number,
po.pr_issue_date,
po.po_status,
po.po_receipts,
COALESCE(po.qty_received, 0) as po_qty_received,
COALESCE(po.qty_ordered, 0) as po_qty_ordered,
po.estimated_arrival_date as po_estimated_arrival_date,
po.vendeliverydate as po_vendor_delivery_date
FROM po_with_pr_date po
LEFT JOIN public.maximo_inventory i ON po.item_num = i.itemnum
ORDER BY po.item_num, po.pr_issue_date DESC;
itemnum,
MAX(inv_curbaltotal) AS current_balance_total,
AVG(inv_avgcost) AS avg_cost
FROM public.wo_maximo_material
WHERE inv_itemnum IS NOT NULL
GROUP BY itemnum
)
SELECT
po.item_num,
po.description,
po.line_cost,
po.unit_cost,
COALESCE(i.current_balance_total, 0) as current_balance_total,
po.po_number,
po.pr_issue_date,
po.po_status,
po.po_receipts,
COALESCE(po.qty_received, 0) as po_qty_received,
COALESCE(po.qty_ordered, 0) as po_qty_ordered,
po.estimated_arrival_date as po_estimated_arrival_date,
po.vendeliverydate as po_vendor_delivery_date
FROM po_with_pr_date po
LEFT JOIN item_inventory i
ON po.item_num = i.itemnum
ORDER BY po.item_num, po.pr_issue_date DESC;
""")
# Execute the query
@ -949,4 +1397,29 @@ ORDER BY lss.asset_location, lss.itemnum;""")
async def create_remark(*, db_session, collector_db_session, remark_in):
# Step 1: Check if remark already exists for this itemnum
result = await db_session.execute(
select(SparepartRemark).where(SparepartRemark.itemnum == remark_in.itemnum)
)
existing_remark = result.scalar_one_or_none()
# Step 2: If it already exists, you can decide what to do
if existing_remark:
# Option B: Update existing remark (if needed)
existing_remark.remark = remark_in.remark
await db_session.commit()
await db_session.refresh(existing_remark)
return existing_remark
# Step 3: If it doesnt exist, create new one
new_remark = SparepartRemark(
itemnum=remark_in.itemnum,
remark=remark_in.remark,
)
db_session.add(new_remark)
await db_session.commit()
await db_session.refresh(new_remark)
return new_remark

@ -37,8 +37,9 @@ class ScopeEquipmentRead(ScopeEquipmentBase):
master_equipment: Optional[MasterEquipmentBase] = Field(None)
class ScopeEquipmentPagination(Pagination):
class ScopeEquipmentPagination(DefultBase):
items: List[ScopeEquipmentRead] = []
total: int
class MasterEquipmentRead(DefultBase):
assetnum: Optional[str] = Field(None, title="Asset Number")

@ -60,8 +60,15 @@ async def get_all(*, common, oh_scope: Optional[str] = None):
# ).distinct()
)
results = await search_filter_sort_paginate(model=query, **common)
return results
results = await common['db_session'].execute(query)
items = results.scalars().all()
return {
"items": items,
"total": len(items)
}
async def get_by_oh_session_id(*, db_session: DbSession, oh_session_id: UUID):

Loading…
Cancel
Save