Cizz22 3 months ago
parent 1aa9e1e9aa
commit 05992bd4e2

113
poetry.lock generated

@ -1,4 +1,16 @@
# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand.
# This file is automatically @generated by Poetry 2.1.4 and should not be changed by hand.
[[package]]
name = "absl-py"
version = "2.3.1"
description = "Abseil Python Common Libraries, see https://github.com/abseil/abseil-py."
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "absl_py-2.3.1-py3-none-any.whl", hash = "sha256:eeecf07f0c2a93ace0772c92e596ace6d3d3996c042b2128459aaae2a76de11d"},
{file = "absl_py-2.3.1.tar.gz", hash = "sha256:a97820526f7fbfd2ec1bce83f3f25e3a14840dac0d8e02a0b71cd75db3f77fc9"},
]
[[package]]
name = "aiohappyeyeballs"
@ -686,8 +698,8 @@ files = [
google-auth = ">=2.14.1,<3.0.0"
googleapis-common-protos = ">=1.56.2,<2.0.0"
proto-plus = [
{version = ">=1.22.3,<2.0.0"},
{version = ">=1.25.0,<2.0.0", markers = "python_version >= \"3.13\""},
{version = ">=1.22.3,<2.0.0", markers = "python_version < \"3.13\""},
]
protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<7.0.0"
requests = ">=2.18.0,<3.0.0"
@ -1030,6 +1042,18 @@ files = [
[package.extras]
all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"]
[[package]]
name = "immutabledict"
version = "4.2.1"
description = "Immutable wrapper around dictionaries (a fork of frozendict)"
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "immutabledict-4.2.1-py3-none-any.whl", hash = "sha256:c56a26ced38c236f79e74af3ccce53772827cef5c3bce7cab33ff2060f756373"},
{file = "immutabledict-4.2.1.tar.gz", hash = "sha256:d91017248981c72eb66c8ff9834e99c2f53562346f23e7f51e7a5ebcf66a3bcc"},
]
[[package]]
name = "importlib-resources"
version = "6.4.5"
@ -1420,6 +1444,63 @@ rsa = ["cryptography (>=3.0.0)"]
signals = ["blinker (>=1.4.0)"]
signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"]
[[package]]
name = "ortools"
version = "9.14.6206"
description = "Google OR-Tools python libraries and modules"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "ortools-9.14.6206-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:6e2364edd1577cd094e7c7121ec5fb0aa462a69a78ce29cdc40fa45943ff0091"},
{file = "ortools-9.14.6206-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:164b726b4d358ae68a018a52ff1999c0646d6f861b33676c2c83e2ddb60cfa13"},
{file = "ortools-9.14.6206-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ebb0e210969cc3246fe78dadf9038936a3a18edc8156e23a394e2bbcec962431"},
{file = "ortools-9.14.6206-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:174de2f04c106c7dcc5989560f2c0e065e78fba0ad0d1fd029897582f4823c3a"},
{file = "ortools-9.14.6206-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e6d994ebcf9cbdda1e20a75662967124e7e6ffd707c7f60b2db1a11f2104d384"},
{file = "ortools-9.14.6206-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5763472f8b05072c96c36c4eafadd9f6ffcdab38a81d8f0142fc408ad52a4342"},
{file = "ortools-9.14.6206-cp310-cp310-win_amd64.whl", hash = "sha256:6711516f837f06836ff9fda66fe4337b88c214f2ba6a921b84d3b05876f1fa8c"},
{file = "ortools-9.14.6206-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:8bcd8481846090585a4fac82800683555841685c49fa24578ad1e48a37918568"},
{file = "ortools-9.14.6206-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5af2bbf2fff7d922ba036e27d7ff378abecb24749380c86a77fa6208d5ba35cd"},
{file = "ortools-9.14.6206-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a6ab43490583c4bbf0fff4e51bb1c15675d5651c2e8e12ba974fd08e8c05a48f"},
{file = "ortools-9.14.6206-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9aa2c0c50a765c6a060960dcb0207bd6aeb6341f5adacb3d33e613b7e7409428"},
{file = "ortools-9.14.6206-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:64ec63fd92125499e9ca6b72700406dda161eefdfef92f04c35c5150391f89a4"},
{file = "ortools-9.14.6206-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:8651008f05257471f45a919ade5027afa12ab6f7a4fdf0a8bcc18c92032f8571"},
{file = "ortools-9.14.6206-cp311-cp311-win_amd64.whl", hash = "sha256:ca60877830a631545234e83e7f6bd55830334a4d0c2b51f1669b1f2698d58b84"},
{file = "ortools-9.14.6206-cp312-cp312-macosx_10_15_x86_64.whl", hash = "sha256:e38c8c4a184820cbfdb812a8d484f6506cf16993ce2a95c88bc1c9d23b17c63e"},
{file = "ortools-9.14.6206-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:db685073cbed9f8bfaa744f5e883f3dea57c93179b0abe1788276fd3b074fa61"},
{file = "ortools-9.14.6206-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d4bfb8bffb29991834cf4bde7048ca8ee8caed73e8dd21e5ec7de99a33bbfea0"},
{file = "ortools-9.14.6206-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:eb464a698837e7f90ca5f9b3d748b6ddf553198a70032bc77824d1cd88695d2b"},
{file = "ortools-9.14.6206-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8f33deaeb7c3dda8ca1d29c5b9aa9c3a4f2ca9ecf34f12a1f809bb2995f41274"},
{file = "ortools-9.14.6206-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:086e7c2dc4f23efffb20a5e20f618c7d6adb99b2d94f684cab482387da3bc434"},
{file = "ortools-9.14.6206-cp312-cp312-win_amd64.whl", hash = "sha256:17c13b0bfde17ac57789ad35243edf1318ecd5db23cf949b75ab62480599f188"},
{file = "ortools-9.14.6206-cp313-cp313-macosx_10_15_x86_64.whl", hash = "sha256:8d0df7eef8ba53ad235e29018389259bad2e667d9594b9c2a412ed6a5756bd4e"},
{file = "ortools-9.14.6206-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:57dfe10844ce8331634d4723040fe249263fd490407346efc314c0bc656849b5"},
{file = "ortools-9.14.6206-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5c0c2c00a6e5d5c462e76fdda7dbd40d0f9139f1df4211d34b36906696248020"},
{file = "ortools-9.14.6206-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:38044cf39952d93cbcc02f6acdbe0a9bd3628fbf17f0d7eb0374060fa028c22e"},
{file = "ortools-9.14.6206-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:98564de773d709e1e49cb3c32f6917589c314f047786d88bd5f324c0eb7be96e"},
{file = "ortools-9.14.6206-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:80528b0ac72dc3de00cbeef2ce028517a476450b5877b1cda1b8ecb9fa98505e"},
{file = "ortools-9.14.6206-cp313-cp313-win_amd64.whl", hash = "sha256:47b1b15dcb085d32c61621b790259193aefa9e4577abadf233d47fbe7d0b81ef"},
{file = "ortools-9.14.6206-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d26a0f9ed97ef9d3384a9069923585f5f974c3fde555a41f4d6381fbe7840bc4"},
{file = "ortools-9.14.6206-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d40d8141667d47405f296a9f687058c566d7816586e9a672b59e9fcec8493133"},
{file = "ortools-9.14.6206-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:aefea81ed81aa937873efc520381785ed65380e52917f492ab566f46bbb5660d"},
{file = "ortools-9.14.6206-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f044bb277db3ab6a1b958728fe1cf14ca87c3800d67d7b321d876b48269340f6"},
{file = "ortools-9.14.6206-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:070dc7cebfa0df066acb6b9a6d02339351be8f91b2352b782ee7f40412207e20"},
{file = "ortools-9.14.6206-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5eb558a03b4ada501ecdea7b89f0d3bdf2cc6752e1728759ccf27923f592a8c2"},
{file = "ortools-9.14.6206-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:646329fa74a5c48c591b7fabfd26743f6d2de4e632b3b96ec596c47bfe19177a"},
{file = "ortools-9.14.6206-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aa5161924f35b8244295acd0fab2a8171bb08ef8d5cfaf1913a21274475704cc"},
{file = "ortools-9.14.6206-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e253526a026ae194aed544a0d065163f52a0c9cb606a1061c62df546877d5452"},
{file = "ortools-9.14.6206-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:dcb496ef633d884036770783f43bf8a47ff253ecdd8a8f5b95f00276ec241bfd"},
{file = "ortools-9.14.6206-cp39-cp39-win_amd64.whl", hash = "sha256:2733f635675de631fdc7b1611878ec9ee2f48a26434b7b3c07d0a0f535b92e03"},
]
[package.dependencies]
absl-py = ">=2.0.0"
immutabledict = ">=3.0.0"
numpy = ">=1.13.3"
pandas = ">=2.0.0"
protobuf = ">=6.31.1,<6.32"
typing-extensions = ">=4.12"
[[package]]
name = "packaging"
version = "24.2"
@ -1486,8 +1567,8 @@ files = [
[package.dependencies]
numpy = [
{version = ">=1.23.2", markers = "python_version == \"3.11\""},
{version = ">=1.26.0", markers = "python_version >= \"3.12\""},
{version = ">=1.23.2", markers = "python_version == \"3.11\""},
]
python-dateutil = ">=2.8.2"
pytz = ">=2020.1"
@ -1662,23 +1743,21 @@ testing = ["google-api-core (>=1.31.5)"]
[[package]]
name = "protobuf"
version = "5.29.0"
version = "6.31.1"
description = ""
optional = false
python-versions = ">=3.8"
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "protobuf-5.29.0-cp310-abi3-win32.whl", hash = "sha256:ea7fb379b257911c8c020688d455e8f74efd2f734b72dc1ea4b4d7e9fd1326f2"},
{file = "protobuf-5.29.0-cp310-abi3-win_amd64.whl", hash = "sha256:34a90cf30c908f47f40ebea7811f743d360e202b6f10d40c02529ebd84afc069"},
{file = "protobuf-5.29.0-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:c931c61d0cc143a2e756b1e7f8197a508de5365efd40f83c907a9febf36e6b43"},
{file = "protobuf-5.29.0-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:85286a47caf63b34fa92fdc1fd98b649a8895db595cfa746c5286eeae890a0b1"},
{file = "protobuf-5.29.0-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:0d10091d6d03537c3f902279fcf11e95372bdd36a79556311da0487455791b20"},
{file = "protobuf-5.29.0-cp38-cp38-win32.whl", hash = "sha256:0cd67a1e5c2d88930aa767f702773b2d054e29957432d7c6a18f8be02a07719a"},
{file = "protobuf-5.29.0-cp38-cp38-win_amd64.whl", hash = "sha256:e467f81fdd12ded9655cea3e9b83dc319d93b394ce810b556fb0f421d8613e86"},
{file = "protobuf-5.29.0-cp39-cp39-win32.whl", hash = "sha256:17d128eebbd5d8aee80300aed7a43a48a25170af3337f6f1333d1fac2c6839ac"},
{file = "protobuf-5.29.0-cp39-cp39-win_amd64.whl", hash = "sha256:6c3009e22717c6cc9e6594bb11ef9f15f669b19957ad4087214d69e08a213368"},
{file = "protobuf-5.29.0-py3-none-any.whl", hash = "sha256:88c4af76a73183e21061881360240c0cdd3c39d263b4e8fb570aaf83348d608f"},
{file = "protobuf-5.29.0.tar.gz", hash = "sha256:445a0c02483869ed8513a585d80020d012c6dc60075f96fa0563a724987b1001"},
{file = "protobuf-6.31.1-cp310-abi3-win32.whl", hash = "sha256:7fa17d5a29c2e04b7d90e5e32388b8bfd0e7107cd8e616feef7ed3fa6bdab5c9"},
{file = "protobuf-6.31.1-cp310-abi3-win_amd64.whl", hash = "sha256:426f59d2964864a1a366254fa703b8632dcec0790d8862d30034d8245e1cd447"},
{file = "protobuf-6.31.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:6f1227473dc43d44ed644425268eb7c2e488ae245d51c6866d19fe158e207402"},
{file = "protobuf-6.31.1-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:a40fc12b84c154884d7d4c4ebd675d5b3b5283e155f324049ae396b95ddebc39"},
{file = "protobuf-6.31.1-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:4ee898bf66f7a8b0bd21bce523814e6fbd8c6add948045ce958b73af7e8878c6"},
{file = "protobuf-6.31.1-cp39-cp39-win32.whl", hash = "sha256:0414e3aa5a5f3ff423828e1e6a6e907d6c65c1d5b7e6e975793d5590bdeecc16"},
{file = "protobuf-6.31.1-cp39-cp39-win_amd64.whl", hash = "sha256:8764cf4587791e7564051b35524b72844f845ad0bb011704c3736cce762d8fe9"},
{file = "protobuf-6.31.1-py3-none-any.whl", hash = "sha256:720a6c7e6b77288b85063569baae8536671b39f15cc22037ec7045658d80489e"},
{file = "protobuf-6.31.1.tar.gz", hash = "sha256:d8cac4c982f0b957a4dc73a80e2ea24fab08e679c0de9deb835f4a12d69aca9a"},
]
[[package]]
@ -2929,4 +3008,4 @@ propcache = ">=0.2.1"
[metadata]
lock-version = "2.1"
python-versions = "^3.11"
content-hash = "696ea33fe5b5efd49565f0b6216a2f40a85d62d34b27693ac6271b676f94897d"
content-hash = "6c2a5a5a8e6a2732bd9e94de4bac3a7c0d3e63d959d5793b23eb327c7a95f3f8"

@ -30,6 +30,7 @@ google-api-python-client = "^2.169.0"
google-auth-httplib2 = "^0.2.0"
google-auth-oauthlib = "^1.2.2"
aiohttp = "^3.12.14"
ortools = "^9.14.6206"
[build-system]

@ -36,7 +36,7 @@ async def get_target_reliability(
collector_db: CollectorDbSession,
oh_session_id: Optional[str] = Query(None),
eaf_input: float = Query(99.8),
duration: int = Query(8760),
duration: int = Query(17520),
simulation_id: Optional[str] = Query(None)
):
"""Get all scope pagination."""
@ -52,14 +52,14 @@ async def get_target_reliability(
# eaf_input=eaf_input,
# oh_duration=duration
# )
# simulation_id = await run_rbd_simulation(
# sim_hours=duration,
# token=token
# )
if not simulation_id:
simulation_id = "default"
if not simulation_id or duration == 17520:
simulation_id = "1b4e967c-a914-4906-93ed-ad7dd9ff217b"
else:
simulation_id = await run_rbd_simulation(
sim_hours=duration,
token=token
)
results = await get_simulation_results(
simulation_id=simulation_id,

@ -39,6 +39,8 @@ class AssetWeight(OverhaulBase):
num_of_failures: int
down_time: float
efficiency: float
improvement_impact:float
birbaum: float
class MaintenanceScenario(OverhaulBase):
location_tag: str

@ -96,44 +96,51 @@ def calculate_asset_eaf_contributions(plant_result, eq_results, standard_scope,
if asset_name not in standard_scope:
continue
birnbaum = asset.get("contribution", 0.0)
contribution_factor = asset.get("contribution_factor", 0.0)
birbaum = asset.get("contribution", 0.0)
current_availability = asset.get("availability", 0.0)
downtime = asset.get("total_downtime", 0.0)
# Filter 1: Importance too low
if birnbaum < MIN_BIRNBAUM_IMPORTANCE:
if contribution_factor < MIN_BIRNBAUM_IMPORTANCE:
continue
# Max possible availability improvement
max_possible_improvement = REALISTIC_MAX_AVAILABILITY - current_availability
if max_possible_improvement <= 0:
continue
# Required improvement (limited by plant gap and availability ceiling)
required_impr = min(eaf_gap_fraction, max_possible_improvement) * birnbaum
# Inject standard each equipment
required_improvement = 0.01
improvement_impact = required_improvement * contribution_factor
# Filter 2: Improvement too small
if required_impr < min_improvement_fraction:
continue
# if required_impr < min_improvement_fraction:
# continue
# Contribution efficiency (secondary metric)
efficiency = birnbaum / downtime if downtime > 0 else birnbaum
efficiency = birbaum / downtime if downtime > 0 else birbaum
contribution = AssetWeight(
node=asset.get("aeros_node"),
availability=current_availability,
contribution=birnbaum,
required_improvement=required_impr,
contribution=contribution_factor,
required_improvement=required_improvement,
improvement_impact=improvement_impact,
num_of_failures=asset.get("num_events", 0),
down_time=downtime,
efficiency= efficiency
efficiency= efficiency,
birbaum=birbaum
)
results.append(contribution)
# Sort: 1) contribution (desc), 2) efficiency (desc)
results.sort(key=lambda x: (x.contribution, x.efficiency), reverse=True)
results.sort(key=lambda x: (x.birbaum), reverse=True)
return results
@ -195,14 +202,17 @@ async def identify_worst_eaf_contributors(
if project_eaf_improvement >= eaf_gap:
break
if (project_eaf_improvement + asset.required_improvement) <= eaf_gap:
if (project_eaf_improvement + asset.improvement_impact) <= eaf_gap:
selected_eq.append(asset)
project_eaf_improvement += asset.required_improvement
project_eaf_improvement += asset.improvement_impact
else:
# allow overshoot tolerance by skipping large ones, continue with smaller ones
continue
possible_eaf_plant = current_plant_eaf + project_eaf_improvement
possible_eaf_plant = current_plant_eaf + project_eaf_improvement*100
selected_eq.sort(key=lambda x: (x.birbaum), reverse=True)
# Build output with efficiency included
return OptimizationResult(
@ -215,7 +225,9 @@ async def identify_worst_eaf_contributors(
"node": asset.node,
"availability": asset.availability,
"contribution": asset.contribution,
"sensitivy": asset.birbaum,
"required_improvement": asset.required_improvement,
"system_impact": asset.improvement_impact,
"num_of_failures": asset.num_of_failures,
"down_time": asset.down_time,
"efficiency": asset.efficiency,

@ -21,7 +21,7 @@ from .service import (create_calculation_result_service, create_param_and_data,
get_calculation_by_reference_and_parameter,
get_calculation_data_by_id, get_calculation_result,
get_corrective_cost_time_chart,
get_overhaul_cost_by_time_chart, run_simulation, run_simulation_with_spareparts)
get_overhaul_cost_by_time_chart, run_simulation_with_spareparts)
from src.database.core import CollectorDbSession
@ -103,7 +103,7 @@ async def create_calculation(
db_session=db_session, calculation=calculation_data, token=token, collector_db_session=collector_db_session, simulation_id=rbd_simulation_id
)
return results["id"]
return results
async def get_or_create_scope_equipment_calculation(

@ -50,435 +50,6 @@ client = httpx.AsyncClient(timeout=300.0)
log = logging.getLogger(__name__)
setup_logging(logger=log)
class OptimumCostModel:
def __init__(self, token: str, last_oh_date: date, next_oh_date: date,
time_window_months: Optional[int] = None,
base_url: str = "http://192.168.1.82:8000"):
"""
Initialize the Optimum Cost Model for overhaul timing optimization.
Args:
token: API authentication token
last_oh_date: Date of last overhaul
next_oh_date: Planned date of next overhaul
time_window_months: Analysis window in months (default: 1.5x planned interval)
base_url: API base URL
"""
self.api_base_url = base_url
self.token = token
self.last_oh_date = last_oh_date
self.next_oh_date = next_oh_date
self.session = None
# Calculate planned overhaul interval in months
self.planned_oh_months = self._get_months_between(last_oh_date, next_oh_date)
# Set analysis time window (default: 1.5x planned interval)
self.time_window_months = time_window_months or int(self.planned_oh_months * 1.5)
# Pre-calculate date range for API calls
self.date_range = self._generate_date_range()
# Setup logging
logging.basicConfig(level=logging.INFO)
self.logger = logging.getLogger(__name__)
self.logger.info(f"OptimumCostModel initialized:")
self.logger.info(f" - Planned OH interval: {self.planned_oh_months} months")
self.logger.info(f" - Analysis window: {self.time_window_months} months")
def _get_months_between(self, start_date: date, end_date: date) -> int:
"""Calculate number of months between two dates"""
return (end_date.year - start_date.year) * 12 + (end_date.month - start_date.month)
def _generate_date_range(self) -> List[datetime]:
"""Generate date range for analysis based on time window"""
dates = []
current_date = datetime.combine(self.last_oh_date, datetime.min.time())
end_date = current_date + timedelta(days=self.time_window_months * 30)
while current_date <= end_date:
dates.append(current_date)
current_date += timedelta(days=31)
return dates
async def _create_session(self):
"""Create aiohttp session with connection pooling"""
if self.session is None:
timeout = aiohttp.ClientTimeout(total=300)
connector = aiohttp.TCPConnector(
limit=500,
limit_per_host=200,
ttl_dns_cache=300,
use_dns_cache=True,
force_close=False,
enable_cleanup_closed=True
)
self.session = aiohttp.ClientSession(
timeout=timeout,
connector=connector,
headers={'Authorization': f'Bearer {self.token}'}
)
async def _close_session(self):
"""Close aiohttp session"""
if self.session:
await self.session.close()
self.session = None
async def get_failures_prediction(self, simulation_id: str, location_tag: str, birnbaum_importance: float):
"""Get failure predictions for equipment from simulation service"""
plot_result_url = f"{self.api_base_url}/aeros/simulation/result/plot/{simulation_id}/{location_tag}?use_location_tag=1"
try:
response = requests.get(
plot_result_url,
headers={
"Content-Type": "application/json",
"Authorization": f"Bearer {self.token}",
},
timeout=30
)
response.raise_for_status()
prediction_data = response.json()
except (requests.RequestException, ValueError) as e:
self.logger.error(f"Failed to fetch prediction data for {location_tag}: {e}")
return None
plot_data = prediction_data.get('data', {}).get('timestamp_outs') if prediction_data.get("data") else None
if not plot_data:
self.logger.warning(f"No plot data available for {location_tag}")
return None
time_series = create_time_series_data(plot_data, 43830)
monthly_data = analyze_monthly_metrics(time_series)
return monthly_data
async def get_simulation_results(self, simulation_id: str = "default"):
"""Get simulation results for Birnbaum importance calculations"""
headers = {
"Authorization": f"Bearer {self.token}",
"Content-Type": "application/json"
}
calc_result_url = f"{self.api_base_url}/aeros/simulation/result/calc/{simulation_id}?nodetype=RegularNode"
plant_result_url = f"{self.api_base_url}/aeros/simulation/result/calc/{simulation_id}/plant"
async with httpx.AsyncClient(timeout=300.0) as client:
calc_task = client.get(calc_result_url, headers=headers)
plant_task = client.get(plant_result_url, headers=headers)
calc_response, plant_response = await asyncio.gather(calc_task, plant_task)
calc_response.raise_for_status()
plant_response.raise_for_status()
calc_data = calc_response.json()["data"]
plant_data = plant_response.json()["data"]
return {
"calc_result": calc_data,
"plant_result": plant_data
}
def _calculate_equipment_costs(self, failures_prediction: Dict, birnbaum_importance: float,
preventive_cost: float, failure_replacement_cost: float,
location_tag: str) -> List[Dict]:
"""Calculate costs for each month for a single equipment"""
if not failures_prediction:
self.logger.warning(f"No failure prediction data for {location_tag}")
return []
months = list(failures_prediction.keys())
num_months = len(months)
# Calculate risk costs and failure costs
risk_costs = []
cumulative_risk_costs = []
failure_counts = []
cumulative_risk = 0
for month_key in months:
data = failures_prediction[month_key]
# Risk cost = flow_rate × birnbaum_importance × downtime_hours × energy_price
monthly_risk = data['avg_flow_rate'] * birnbaum_importance * data['total_oos_hours'] * 1000000
risk_costs.append(monthly_risk)
cumulative_risk += monthly_risk
cumulative_risk_costs.append(cumulative_risk)
failure_counts.append(data['cumulative_failures'])
raise Exception(cumulative_risk_costs)
# Calculate costs for each month
results = []
for i in range(num_months):
month_index = i + 1
# Failure cost = cumulative failures × replacement cost + cumulative risk cost
failure_cost = (failure_counts[i] * failure_replacement_cost) + cumulative_risk_costs[i]
# Preventive cost = overhaul cost distributed over months
preventive_cost_month = preventive_cost / month_index
# Total cost = failure cost + preventive cost
total_cost = failure_cost + preventive_cost_month
results.append({
'month': month_index,
'number_of_failures': failure_counts[i],
'failure_cost': failure_cost,
'preventive_cost': preventive_cost_month,
'total_cost': total_cost,
'is_after_planned_oh': month_index > self.planned_oh_months,
'delay_months': max(0, month_index - self.planned_oh_months),
'risk_cost': cumulative_risk_costs[i],
'monthly_risk_cost': risk_costs[i],
'procurement_cost': 0, # For database compatibility
'procurement_details': [] # For database compatibility
})
return results
def _find_optimal_timing(self, cost_results: List[Dict], location_tag: str) -> Optional[Dict]:
"""Find optimal timing for equipment overhaul"""
if not cost_results:
return None
# Find month with minimum total cost
min_cost = float('inf')
optimal_result = None
optimal_index = -1
for i, result in enumerate(cost_results):
if result['total_cost'] < min_cost:
min_cost = result['total_cost']
optimal_result = result
optimal_index = i
if optimal_result is None:
return None
# Calculate cost comparison with planned timing
planned_cost = None
cost_vs_planned = None
if self.planned_oh_months <= len(cost_results):
planned_cost = cost_results[self.planned_oh_months - 1]['total_cost']
cost_vs_planned = optimal_result['total_cost'] - planned_cost
return {
'location_tag': location_tag,
'optimal_month': optimal_result['month'],
'optimal_index': optimal_index,
'optimal_cost': optimal_result['total_cost'],
'failure_cost': optimal_result['failure_cost'],
'preventive_cost': optimal_result['preventive_cost'],
'number_of_failures': optimal_result['number_of_failures'],
'is_delayed': optimal_result['is_after_planned_oh'],
'delay_months': optimal_result['delay_months'],
'planned_oh_month': self.planned_oh_months,
'planned_cost': planned_cost,
'cost_vs_planned': cost_vs_planned,
'savings_from_delay': -cost_vs_planned if cost_vs_planned and cost_vs_planned < 0 else 0,
'cost_of_delay': cost_vs_planned if cost_vs_planned and cost_vs_planned > 0 else 0,
'all_monthly_costs': cost_results
}
async def calculate_optimal_timing_single_equipment(self, equipment, birnbaum_importance: float,
simulation_id: str = "default") -> Optional[Dict]:
"""Calculate optimal overhaul timing for a single equipment"""
location_tag = equipment.location_tag
self.logger.info(f"Calculating optimal timing for {location_tag}")
# Get failure predictions
monthly_data = await self.get_failures_prediction(simulation_id, location_tag, birnbaum_importance)
if not monthly_data:
self.logger.warning(f"No monthly data available for {location_tag}")
return None
# Calculate costs
preventive_cost = equipment.overhaul_cost + equipment.service_cost
failure_replacement_cost = equipment.material_cost + (3 * 111000 * 3) # Material + Labor
cost_results = self._calculate_equipment_costs(
failures_prediction=monthly_data,
birnbaum_importance=birnbaum_importance,
preventive_cost=preventive_cost,
failure_replacement_cost=failure_replacement_cost,
location_tag=location_tag
)
# Find optimal timing
optimal_timing = self._find_optimal_timing(cost_results, location_tag)
if optimal_timing:
self.logger.info(f"Optimal timing for {location_tag}: Month {optimal_timing['optimal_month']} "
f"(Cost: ${optimal_timing['optimal_cost']:,.2f})")
if optimal_timing['is_delayed']:
self.logger.info(f" - Delay recommended: {optimal_timing['delay_months']} months")
self.logger.info(f" - Savings from delay: ${optimal_timing['savings_from_delay']:,.2f}")
return optimal_timing
async def calculate_cost_all_equipment(self, db_session, equipments: List, calculation,
preventive_cost: float, simulation_id: str = "default") -> Dict:
"""
Calculate optimal overhaul timing for entire fleet and save to database
"""
self.logger.info(f"Starting fleet optimization for {len(equipments)} equipment items")
max_interval = self.time_window_months
# Get Birnbaum importance values
try:
importance_results = await self.get_simulation_results(simulation_id)
equipment_birnbaum = {
imp['aeros_node']['node_name']: imp['contribution']
for imp in importance_results["calc_result"]
}
except Exception as e:
self.logger.error(f"Failed to get simulation results: {e}")
equipment_birnbaum = {}
# Initialize fleet aggregation arrays
fleet_results = []
total_corrective_costs = np.zeros(max_interval)
total_preventive_costs = np.zeros(max_interval)
total_procurement_costs = np.zeros(max_interval)
total_costs = np.zeros(max_interval)
for equipment in equipments:
location_tag = equipment.location_tag
birnbaum = equipment_birnbaum.get(location_tag, 0.0)
if birnbaum == 0.0:
self.logger.warning(f"No Birnbaum importance found for {location_tag}, using 0.0")
try:
# Get failure predictions
monthly_data = await self.get_failures_prediction(simulation_id, location_tag, birnbaum)
if not monthly_data:
continue
# Calculate costs
equipment_preventive_cost = equipment.overhaul_cost + equipment.service_cost
failure_replacement_cost = equipment.material_cost + (3 * 111000 * 3)
cost_results = self._calculate_equipment_costs(
failures_prediction=monthly_data,
birnbaum_importance=birnbaum,
preventive_cost=equipment_preventive_cost,
failure_replacement_cost=failure_replacement_cost,
location_tag=location_tag
)
if not cost_results:
continue
# Find optimal timing
optimal_timing = self._find_optimal_timing(cost_results, location_tag)
if not optimal_timing:
continue
# Prepare arrays for database (pad to max_interval length)
corrective_costs = [r["failure_cost"] for r in cost_results]
preventive_costs = [r["preventive_cost"] for r in cost_results]
procurement_costs = [r["procurement_cost"] for r in cost_results]
failures = [r["number_of_failures"] for r in cost_results]
total_costs_equipment = [r['total_cost'] for r in cost_results]
procurement_details = [r["procurement_details"] for r in cost_results]
# Pad arrays to max_interval length
def pad_array(arr, target_length):
if len(arr) < target_length:
return arr + [arr[-1]] * (target_length - len(arr)) # Use last value for padding
return arr[:target_length]
corrective_costs = pad_array(corrective_costs, max_interval)
preventive_costs = pad_array(preventive_costs, max_interval)
procurement_costs = pad_array(procurement_costs, max_interval)
failures = pad_array(failures, max_interval)
total_costs_equipment = pad_array(total_costs_equipment, max_interval)
procurement_details = pad_array(procurement_details, max_interval)
# Create database result object
equipment_result = CalculationEquipmentResult(
corrective_costs=corrective_costs,
overhaul_costs=preventive_costs,
procurement_costs=procurement_costs,
daily_failures=failures,
location_tag=equipment.location_tag,
material_cost=equipment.material_cost,
service_cost=equipment.service_cost,
optimum_day=optimal_timing['optimal_index'],
calculation_data_id=calculation.id,
procurement_details=procurement_details
)
fleet_results.append(equipment_result)
# Aggregate costs for fleet analysis
total_corrective_costs += np.array(corrective_costs)
total_preventive_costs += np.array(preventive_costs)
total_procurement_costs += np.array(procurement_costs)
total_costs += np.array(total_costs_equipment)
self.logger.info(f"Processed {location_tag}: Optimal month {optimal_timing['optimal_month']}")
except Exception as e:
self.logger.error(f"Failed to calculate timing for {location_tag}: {e}")
continue
# Calculate fleet optimal interval
fleet_optimal_index = np.argmin(total_costs)
fleet_optimal_cost = total_costs[fleet_optimal_index]
# Update calculation with results
calculation.optimum_oh_day = fleet_optimal_index
calculation.max_interval = max_interval
# Save all results to database
db_session.add_all(fleet_results)
await db_session.commit()
self.logger.info(f"Fleet optimization completed:")
self.logger.info(f" - Fleet optimal month: {fleet_optimal_index + 1}")
self.logger.info(f" - Fleet optimal cost: ${fleet_optimal_cost:,.2f}")
self.logger.info(f" - Results saved to database for {len(fleet_results)} equipment")
return {
'id': calculation.id,
'fleet_results': fleet_results,
'fleet_optimal_interval': fleet_optimal_index + 1,
'fleet_optimal_cost': fleet_optimal_cost,
'total_corrective_costs': total_corrective_costs.tolist(),
'total_preventive_costs': total_preventive_costs.tolist(),
'total_procurement_costs': total_procurement_costs.tolist(),
'analysis_parameters': {
'planned_oh_months': self.planned_oh_months,
'analysis_window_months': self.time_window_months,
'last_oh_date': self.last_oh_date.isoformat(),
'next_oh_date': self.next_oh_date.isoformat()
}
}
class OptimumCostModelWithSpareparts:
def __init__(self, token: str, last_oh_date: date, next_oh_date: date,
sparepart_manager,
@ -603,7 +174,7 @@ class OptimumCostModelWithSpareparts:
}
def _calculate_equipment_costs_with_spareparts(self, failures_prediction: Dict, birnbaum_importance: float,
preventive_cost: float, failure_replacement_cost: float,
preventive_cost: float, failure_replacement_cost: float, ecs,
location_tag: str, planned_overhauls: List = None) -> List[Dict]:
"""Calculate costs for each month including sparepart costs and availability"""
@ -614,21 +185,20 @@ class OptimumCostModelWithSpareparts:
months = list(failures_prediction.keys())
num_months = len(months)
# Calculate basic costs (same as before)
risk_costs = []
cumulative_risk_costs = []
failure_counts = []
cumulative_risk = 0
monthly_risk_cost_per_failure = 0
if ecs:
is_trip = 1 if ecs.get("Diskripsi Operasional Akibat Equip. Failure") == "Trip" else 0
if is_trip:
downtime = ecs.get("Estimasi Waktu Maint. / Downtime / Gangguan (Jam)")
monthly_risk_cost_per_failure = 660 * 1000000 * is_trip * downtime
print("ECS Trip", location_tag, monthly_risk_cost_per_failure)
for month_key in months:
data = failures_prediction[month_key]
monthly_risk = data['avg_flow_rate'] * birnbaum_importance * data['total_oos_hours'] * 1000000
risk_costs.append(monthly_risk)
cumulative_risk += monthly_risk
cumulative_risk_costs.append(cumulative_risk)
failure_counts.append(data['cumulative_failures'])
# Calculate costs for each month including sparepart considerations
@ -638,7 +208,7 @@ class OptimumCostModelWithSpareparts:
month_index = i + 1
# Basic failure and preventive costs
failure_cost = (failure_counts[i] * failure_replacement_cost) + cumulative_risk_costs[i]
failure_cost = (failure_counts[i] * (failure_replacement_cost + monthly_risk_cost_per_failure))
preventive_cost_month = preventive_cost / month_index
# Check sparepart availability for this month
@ -667,8 +237,6 @@ class OptimumCostModelWithSpareparts:
'total_cost': total_cost,
'is_after_planned_oh': month_index > self.planned_oh_months,
'delay_months': max(0, month_index - self.planned_oh_months),
'risk_cost': cumulative_risk_costs[i],
'monthly_risk_cost': risk_costs[i],
'procurement_details': procurement_details,
'sparepart_available': sparepart_analysis['available'],
'sparepart_status': sparepart_analysis['message'],
@ -788,45 +356,55 @@ class OptimumCostModelWithSpareparts:
# Phase 1: Calculate individual optimal timings without considering interactions
individual_results = {}
with open('src/calculation_time_constrains/full_equipment_with_downtime_opdesc.json', 'r') as f:
data = json.load(f)
ecs_tags = {
eq["Location"]: eq
for eq in data
}
for equipment in equipments:
location_tag = equipment.location_tag
birnbaum = equipment_birnbaum.get(location_tag, 0.0)
ecs = ecs_tags.get(location_tag, None)
try:
# Get failure predictions
monthly_data = await self.get_failures_prediction(simulation_id, location_tag, birnbaum)
if not monthly_data:
continue
# Calculate costs without considering other equipment (first pass)
equipment_preventive_cost = equipment.overhaul_cost + equipment.service_cost
failure_replacement_cost = equipment.material_cost + (3 * 111000 * 3)
cost_results = self._calculate_equipment_costs_with_spareparts(
failures_prediction=monthly_data,
birnbaum_importance=birnbaum,
preventive_cost=equipment_preventive_cost,
failure_replacement_cost=failure_replacement_cost,
location_tag=location_tag,
planned_overhauls=[] # Empty in first pass
)
if not cost_results:
continue
# Find individual optimal timing
optimal_timing = self._find_optimal_timing_with_spareparts(cost_results, location_tag)
# try:
# # Get failure predictions
monthly_data = await self.get_failures_prediction(simulation_id, location_tag, birnbaum)
if not monthly_data:
continue
# Calculate costs without considering other equipment (first pass)
equipment_preventive_cost = equipment.overhaul_cost + equipment.service_cost
failure_replacement_cost = equipment.material_cost + (3 * 111000 * 3)
cost_results = self._calculate_equipment_costs_with_spareparts(
failures_prediction=monthly_data,
birnbaum_importance=birnbaum,
preventive_cost=equipment_preventive_cost,
failure_replacement_cost=failure_replacement_cost,
location_tag=location_tag,
planned_overhauls=[] , # Empty in first pass
ecs=ecs
)
if not cost_results:
continue
# Find individual optimal timing
optimal_timing = self._find_optimal_timing_with_spareparts(cost_results, location_tag)
if optimal_timing:
optimal_timing['all_monthly_costs'] = cost_results
individual_results[location_tag] = optimal_timing
if optimal_timing:
optimal_timing['all_monthly_costs'] = cost_results
individual_results[location_tag] = optimal_timing
self.logger.info(f"Individual optimal for {location_tag}: Month {optimal_timing['optimal_month']}")
self.logger.info(f"Individual optimal for {location_tag}: Month {optimal_timing['optimal_month']}")
except Exception as e:
self.logger.error(f"Failed to calculate individual timing for {location_tag}: {e}")
continue
# except Exception as e:
# self.logger.error(f"Failed to calculate individual timing for {location_tag}: {e}")
# raise Exception(e)
# Phase 2: Optimize considering sparepart interactions
self.logger.info("Phase 2: Optimizing with sparepart interactions...")
@ -1105,7 +683,9 @@ async def run_simulation_with_spareparts(*, db_session, calculation, token: str,
db_session=db_session, calculation_id=calculation.id
)
sparepart_manager = await load_sparepart_data_from_db(scope=scope, prev_oh_scope=prev_oh_scope, db_session=collector_db_session)
time_window_months = 60
sparepart_manager = await load_sparepart_data_from_db(scope=scope, prev_oh_scope=prev_oh_scope, db_session=collector_db_session, analysis_window_months=time_window_months)
# Initialize optimization model with sparepart management
optimum_oh_model = OptimumCostModelWithSpareparts(
@ -1138,57 +718,6 @@ async def run_simulation_with_spareparts(*, db_session, calculation, token: str,
await optimum_oh_model._close_session()
async def run_simulation(*, db_session, calculation, token: str, collector_db_session,
time_window_months: Optional[int] = None,
simulation_id: str = "default") -> Dict:
"""
Run complete overhaul optimization simulation
Args:
time_window_months: Analysis window in months (default: 1.5x planned interval)
simulation_id: Simulation ID for failure predictions
"""
# Get equipment and scope data
equipments = await get_standard_scope_by_session_id(
db_session=db_session,
overhaul_session_id=calculation.overhaul_session_id,
collector_db=collector_db_session
)
scope = await get_scope(db_session=db_session, overhaul_session_id=calculation.overhaul_session_id)
prev_oh_scope = await get_prev_oh(db_session=db_session, overhaul_session=scope)
calculation_data = await get_calculation_data_by_id(
db_session=db_session, calculation_id=calculation.id
)
# Initialize optimization model
optimum_oh_model = OptimumCostModel(
token=token,
last_oh_date=prev_oh_scope.end_date,
next_oh_date=scope.start_date,
time_window_months=time_window_months,
base_url=RBD_SERVICE_API
)
try:
# Run fleet optimization and save to database
results = await optimum_oh_model.calculate_cost_all_equipment(
db_session=db_session,
equipments=equipments,
calculation=calculation_data,
preventive_cost=calculation_data.parameter.overhaul_cost,
simulation_id=simulation_id
)
return results
finally:
await optimum_oh_model._close_session()
async def get_corrective_cost_time_chart(
material_cost: float,
service_cost: float,

@ -12,39 +12,42 @@ def get_months_between(start_date: datetime.datetime, end_date: datetime.datetim
return months
def create_time_series_data(chart_data, max_hours=24096):
# Filter out data points with currentEvent = "ON_OH"
filtered_data = [data for data in chart_data if data['currentEvent'] != 'ON_OH']
# Sort filtered data by cumulative time
sorted_data = sorted(filtered_data, key=lambda x: x['cumulativeTime'])
def create_time_series_data(chart_data, max_hours=None):
# Filter out ON_OH
filtered_data = [d for d in chart_data if d["currentEvent"] != "ON_OH"]
sorted_data = sorted(filtered_data, key=lambda x: x["cumulativeTime"])
if not sorted_data:
return []
hourly_data = []
current_state_index = 0
current_flow_rate = sorted_data[0]['flowRate']
current_eq_status = sorted_data[0]['currentEQStatus']
for hour in range(1, max_hours + 1):
# Check if we need to advance to the next state
while (current_state_index < len(sorted_data) - 1 and
hour >= int(sorted_data[current_state_index + 1]['cumulativeTime'])):
current_flow_rate = sorted_data[0]["flowRate"]
current_eq_status = sorted_data[0]["currentEQStatus"]
# Determine maximum bound (either given or from data)
last_time = int(sorted_data[-1]["cumulativeTime"])
if max_hours is None:
max_hours = last_time
for hour in range(0, max_hours + 1): # start from 0
# Advance state if needed
while (current_state_index < len(sorted_data) - 1 and
hour >= sorted_data[current_state_index + 1]["cumulativeTime"]):
current_state_index += 1
current_flow_rate = sorted_data[current_state_index]['flowRate']
current_eq_status = sorted_data[current_state_index]['currentEQStatus']
# Add hourly data point
current_flow_rate = sorted_data[current_state_index]["flowRate"]
current_eq_status = sorted_data[current_state_index]["currentEQStatus"]
hourly_data.append({
'cumulativeTime': hour,
'flowRate': current_flow_rate,
'currentEQStatus': current_eq_status
"cumulativeTime": hour,
"flowRate": current_flow_rate,
"currentEQStatus": current_eq_status
})
return hourly_data
def calculate_failures_per_month(hourly_data):
"""
Calculate the cumulative number of failures up to each month from hourly data.
@ -93,96 +96,67 @@ def calculate_failures_per_month(hourly_data):
return result
import pandas as pd
import datetime
def analyze_monthly_metrics(timestamp_outs):
"""
Analyze time series data to calculate monthly metrics:
1. Failure count per month
2. Cumulative failure count each month
3. Total out-of-service time per month
4. Average flow rate per month
"""
# Check if timestamp_outs is None or empty
if timestamp_outs is None or not timestamp_outs:
# Return empty results with zero values
if not timestamp_outs:
return {}
# Convert to DataFrame for easier manipulation
df = pd.DataFrame(timestamp_outs)
# Check if DataFrame is empty after creation
if df.empty:
return {}
# Check if required columns exist
required_columns = ['cumulativeTime', 'currentEQStatus', 'flowRate']
missing_columns = [col for col in required_columns if col not in df.columns]
if missing_columns:
if not all(col in df.columns for col in required_columns):
return {}
# Assuming the simulation starts from a reference date (you can modify this)
# For this example, I'll use January 1, 2024 as the start date
# Reference start date (adjust if needed)
start_date = datetime.datetime(2025, 10, 22)
# Convert cumulative hours to actual datetime
df['datetime'] = df['cumulativeTime'].apply(
lambda x: start_date + datetime.timedelta(hours=x)
)
# Extract month-year for grouping
df['datetime'] = df['cumulativeTime'].apply(lambda x: start_date + datetime.timedelta(hours=x))
df['month_year'] = df['datetime'].dt.to_period('M')
# Calculate time duration for each record (difference between consecutive cumulative times)
df['duration_hours'] = df['cumulativeTime'].diff().fillna(df['cumulativeTime'].iloc[0])
# Initialize results dictionary
# Duration until next timestamp
df['duration_hours'] = df['cumulativeTime'].shift(-1) - df['cumulativeTime']
df['duration_hours'] = df['duration_hours'].fillna(0)
# Failure detection (global, not per group)
df['status_change'] = df['currentEQStatus'].shift() != df['currentEQStatus']
df['failure'] = (df['currentEQStatus'] == 'OoS') & df['status_change']
# Cumulative tracking
df['cumulative_failures'] = df['failure'].cumsum()
df['cumulative_oos'] = (df['duration_hours'] * (df['currentEQStatus'] == 'OoS')).cumsum()
monthly_results = {}
# Track cumulative failures across all months
cumulative_failures = 0
cummulative_oos = 0
# Group by month-year and ensure chronological order
for month_period, group in df.groupby('month_year'):
for month_period, group in df.groupby('month_year', sort=True):
month_str = str(month_period)
monthly_results[month_str] = {}
# 1. Count failures per month
# A failure is when currentEQStatus changes from "Svc" to "OoS"
status_changes = group['currentEQStatus'].shift() != group['currentEQStatus']
failures = ((group['currentEQStatus'] == 'OoS') & status_changes).sum()
monthly_results[month_str]['failures_count'] = int(failures)
# 2. Add failures to cumulative count
cumulative_failures += failures
monthly_results[month_str]['cumulative_failures'] = int(cumulative_failures)
# 3. Total out-of-service time per month (in hours)
oos_time = group[group['currentEQStatus'] == 'OoS']['duration_hours'].sum()
# Failures
monthly_results[month_str]['failures_count'] = int(group['failure'].sum())
monthly_results[month_str]['cumulative_failures'] = int(group['cumulative_failures'].max())
# OOS hours
oos_time = group.loc[group['currentEQStatus'] == 'OoS', 'duration_hours'].sum()
monthly_results[month_str]['total_oos_hours'] = float(oos_time)
cummulative_oos += oos_time
monthly_results[month_str]['cummulative_oos'] = float(cummulative_oos)
# 4. Average flow rate per month (weighted by duration)
# Calculate weighted average flow rate
monthly_results[month_str]['cummulative_oos'] = float(group['cumulative_oos'].max())
# Flow rate (weighted average)
total_flow_time = (group['flowRate'] * group['duration_hours']).sum()
total_time = group['duration_hours'].sum()
avg_flow_rate = total_flow_time / total_time if total_time > 0 else 0
monthly_results[month_str]['avg_flow_rate'] = float(avg_flow_rate)
# Additional useful metrics
# Extra metrics
monthly_results[month_str]['total_hours'] = float(total_time)
monthly_results[month_str]['service_hours'] = float(
group[group['currentEQStatus'] == 'Svc']['duration_hours'].sum()
)
service_hours = group.loc[group['currentEQStatus'] == 'Svc', 'duration_hours'].sum()
monthly_results[month_str]['service_hours'] = float(service_hours)
monthly_results[month_str]['availability_percentage'] = float(
(monthly_results[month_str]['service_hours'] / total_time * 100) if total_time > 0 else 0
(service_hours / total_time * 100) if total_time > 0 else 0
)
return monthly_results
def calculate_risk_cost_per_failure(monthly_results, birnbaum_importance, energy_price):
"""
Calculate risk cost per failure for each month based on:

@ -772,7 +772,7 @@ class SparepartManager:
# Integration functions for database operations
async def load_sparepart_data_from_db(scope, prev_oh_scope, db_session) -> SparepartManager:
async def load_sparepart_data_from_db(scope, prev_oh_scope, db_session, analysis_window_months = None) -> SparepartManager:
"""Load sparepart data from database"""
# You'll need to implement these queries based on your database schema
# Get scope dates for analysis window
@ -780,7 +780,7 @@ async def load_sparepart_data_from_db(scope, prev_oh_scope, db_session) -> Spare
# prev_oh_scope = await get_prev_oh(db_session=db_session, overhaul_session=scope)
analysis_start_date = prev_oh_scope.end_date
analysis_window_months = int(((scope.start_date - prev_oh_scope.end_date).days / 30) * 1.5)
analysis_window_months = int(((scope.start_date - prev_oh_scope.end_date).days / 30) * 1.5) if not analysis_window_months else analysis_window_months
sparepart_manager = SparepartManager(analysis_start_date, analysis_window_months)
@ -970,54 +970,56 @@ ORDER BY lss.asset_location, lss.itemnum;""")
# Load procurement records (PO/PR)
query = text("""
WITH active_pos AS (
-- Get all POs that are NOT complete (not in inventory yet) and NOT closed
SELECT
pl.item_num,
h.num as po_number,
pl.qty_received,
pl.qty_ordered,
h.estimated_arrival_date,
h.vendeliverydate,
h.receipts as po_receipts,
h.status as po_status,
pl.description,
pl.unit_cost,
pl.line_cost
FROM public.maximo_sparepart_pr_po h
JOIN public.maximo_sparepart_pr_po_line pl ON h.num = pl.num
WHERE h.type = 'PO'
-- Exclude POs where receipts = 'COMPLETE'
AND (h.receipts IS NULL OR h.receipts != 'COMPLETE')
-- Exclude closed POs
AND (h.status IS NULL OR h.status != 'CLOSE')
),
po_with_pr_date AS (
-- Join with PR to get the issue_date
SELECT
po.*,
pr.issue_date as pr_issue_date
FROM active_pos po
LEFT JOIN public.maximo_sparepart_pr_po pr
ON pr.num = po.po_number
AND pr.type = 'PR'
)
-- Get all POs that are NOT complete (not in inventory yet) and NOT closed
SELECT
po.item_num,
po.description,
po.line_cost,
po.unit_cost,
COALESCE(i.curbaltotal, 0) as current_balance_total,
po.po_number,
po.pr_issue_date,
po.po_status,
po.po_receipts,
COALESCE(po.qty_received, 0) as po_qty_received,
COALESCE(po.qty_ordered, 0) as po_qty_ordered,
po.estimated_arrival_date as po_estimated_arrival_date,
po.vendeliverydate as po_vendor_delivery_date
FROM po_with_pr_date po
LEFT JOIN public.maximo_inventory i ON po.item_num = i.itemnum
ORDER BY po.item_num, po.pr_issue_date DESC;
pl.item_num,
h.num as po_number,
pl.qty_received,
pl.qty_ordered,
h.estimated_arrival_date,
h.vendeliverydate,
h.receipts as po_receipts,
h.status as po_status,
pl.description,
pl.unit_cost,
pl.line_cost
FROM public.maximo_sparepart_pr_po h
JOIN public.maximo_sparepart_pr_po_line pl
ON h.num = pl.num
WHERE h.type = 'PO'
-- Exclude POs where receipts = 'COMPLETE'
AND (h.receipts IS NULL OR h.receipts != 'COMPLETE')
-- Exclude closed POs
AND (h.status IS NULL OR h.status = 'APPR')
),
po_with_pr_date AS (
-- Force join with PR to ensure every PO has a PR
SELECT
po.*,
pr.issue_date as pr_issue_date
FROM active_pos po
INNER JOIN public.maximo_sparepart_pr_po pr
ON pr.num = po.po_number
AND pr.type = 'PR'
)
SELECT
po.item_num,
po.description,
po.line_cost,
po.unit_cost,
COALESCE(i.curbaltotal, 0) as current_balance_total,
po.po_number,
po.pr_issue_date,
po.po_status,
po.po_receipts,
COALESCE(po.qty_received, 0) as po_qty_received,
COALESCE(po.qty_ordered, 0) as po_qty_ordered,
po.estimated_arrival_date as po_estimated_arrival_date,
po.vendeliverydate as po_vendor_delivery_date
FROM po_with_pr_date po
LEFT JOIN public.maximo_inventory i
ON po.item_num = i.itemnum
ORDER BY po.item_num, po.pr_issue_date DESC;
""")
# Execute the query

Loading…
Cancel
Save