Compare commits

...

1 Commits

Author SHA1 Message Date
Cizz22 aa106a5a85 WIP: tm using relibility 3 months ago

87
poetry.lock generated

@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand. # This file is automatically @generated by Poetry 2.1.4 and should not be changed by hand.
[[package]] [[package]]
name = "aiohappyeyeballs" name = "aiohappyeyeballs"
@ -686,8 +686,8 @@ files = [
google-auth = ">=2.14.1,<3.0.0" google-auth = ">=2.14.1,<3.0.0"
googleapis-common-protos = ">=1.56.2,<2.0.0" googleapis-common-protos = ">=1.56.2,<2.0.0"
proto-plus = [ proto-plus = [
{version = ">=1.22.3,<2.0.0"},
{version = ">=1.25.0,<2.0.0", markers = "python_version >= \"3.13\""}, {version = ">=1.25.0,<2.0.0", markers = "python_version >= \"3.13\""},
{version = ">=1.22.3,<2.0.0", markers = "python_version < \"3.13\""},
] ]
protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<7.0.0" protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<7.0.0"
requests = ">=2.18.0,<3.0.0" requests = ">=2.18.0,<3.0.0"
@ -1486,8 +1486,8 @@ files = [
[package.dependencies] [package.dependencies]
numpy = [ numpy = [
{version = ">=1.23.2", markers = "python_version == \"3.11\""},
{version = ">=1.26.0", markers = "python_version >= \"3.12\""}, {version = ">=1.26.0", markers = "python_version >= \"3.12\""},
{version = ">=1.23.2", markers = "python_version == \"3.11\""},
] ]
python-dateutil = ">=2.8.2" python-dateutil = ">=2.8.2"
pytz = ">=2020.1" pytz = ">=2020.1"
@ -2163,6 +2163,85 @@ files = [
[package.dependencies] [package.dependencies]
pyasn1 = ">=0.1.3" pyasn1 = ">=0.1.3"
[[package]]
name = "scipy"
version = "1.16.2"
description = "Fundamental algorithms for scientific computing in Python"
optional = false
python-versions = ">=3.11"
groups = ["main"]
files = [
{file = "scipy-1.16.2-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:6ab88ea43a57da1af33292ebd04b417e8e2eaf9d5aa05700be8d6e1b6501cd92"},
{file = "scipy-1.16.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:c95e96c7305c96ede73a7389f46ccd6c659c4da5ef1b2789466baeaed3622b6e"},
{file = "scipy-1.16.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:87eb178db04ece7c698220d523c170125dbffebb7af0345e66c3554f6f60c173"},
{file = "scipy-1.16.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:4e409eac067dcee96a57fbcf424c13f428037827ec7ee3cb671ff525ca4fc34d"},
{file = "scipy-1.16.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e574be127bb760f0dad24ff6e217c80213d153058372362ccb9555a10fc5e8d2"},
{file = "scipy-1.16.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f5db5ba6188d698ba7abab982ad6973265b74bb40a1efe1821b58c87f73892b9"},
{file = "scipy-1.16.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ec6e74c4e884104ae006d34110677bfe0098203a3fec2f3faf349f4cb05165e3"},
{file = "scipy-1.16.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:912f46667d2d3834bc3d57361f854226475f695eb08c08a904aadb1c936b6a88"},
{file = "scipy-1.16.2-cp311-cp311-win_amd64.whl", hash = "sha256:91e9e8a37befa5a69e9cacbe0bcb79ae5afb4a0b130fd6db6ee6cc0d491695fa"},
{file = "scipy-1.16.2-cp311-cp311-win_arm64.whl", hash = "sha256:f3bf75a6dcecab62afde4d1f973f1692be013110cad5338007927db8da73249c"},
{file = "scipy-1.16.2-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:89d6c100fa5c48472047632e06f0876b3c4931aac1f4291afc81a3644316bb0d"},
{file = "scipy-1.16.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:ca748936cd579d3f01928b30a17dc474550b01272d8046e3e1ee593f23620371"},
{file = "scipy-1.16.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:fac4f8ce2ddb40e2e3d0f7ec36d2a1e7f92559a2471e59aec37bd8d9de01fec0"},
{file = "scipy-1.16.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:033570f1dcefd79547a88e18bccacff025c8c647a330381064f561d43b821232"},
{file = "scipy-1.16.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ea3421209bf00c8a5ef2227de496601087d8f638a2363ee09af059bd70976dc1"},
{file = "scipy-1.16.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f66bd07ba6f84cd4a380b41d1bf3c59ea488b590a2ff96744845163309ee8e2f"},
{file = "scipy-1.16.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5e9feab931bd2aea4a23388c962df6468af3d808ddf2d40f94a81c5dc38f32ef"},
{file = "scipy-1.16.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:03dfc75e52f72cf23ec2ced468645321407faad8f0fe7b1f5b49264adbc29cb1"},
{file = "scipy-1.16.2-cp312-cp312-win_amd64.whl", hash = "sha256:0ce54e07bbb394b417457409a64fd015be623f36e330ac49306433ffe04bc97e"},
{file = "scipy-1.16.2-cp312-cp312-win_arm64.whl", hash = "sha256:2a8ffaa4ac0df81a0b94577b18ee079f13fecdb924df3328fc44a7dc5ac46851"},
{file = "scipy-1.16.2-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:84f7bf944b43e20b8a894f5fe593976926744f6c185bacfcbdfbb62736b5cc70"},
{file = "scipy-1.16.2-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:5c39026d12edc826a1ef2ad35ad1e6d7f087f934bb868fc43fa3049c8b8508f9"},
{file = "scipy-1.16.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e52729ffd45b68777c5319560014d6fd251294200625d9d70fd8626516fc49f5"},
{file = "scipy-1.16.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:024dd4a118cccec09ca3209b7e8e614931a6ffb804b2a601839499cb88bdf925"},
{file = "scipy-1.16.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7a5dc7ee9c33019973a470556081b0fd3c9f4c44019191039f9769183141a4d9"},
{file = "scipy-1.16.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c2275ff105e508942f99d4e3bc56b6ef5e4b3c0af970386ca56b777608ce95b7"},
{file = "scipy-1.16.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:af80196eaa84f033e48444d2e0786ec47d328ba00c71e4299b602235ffef9acb"},
{file = "scipy-1.16.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9fb1eb735fe3d6ed1f89918224e3385fbf6f9e23757cacc35f9c78d3b712dd6e"},
{file = "scipy-1.16.2-cp313-cp313-win_amd64.whl", hash = "sha256:fda714cf45ba43c9d3bae8f2585c777f64e3f89a2e073b668b32ede412d8f52c"},
{file = "scipy-1.16.2-cp313-cp313-win_arm64.whl", hash = "sha256:2f5350da923ccfd0b00e07c3e5cfb316c1c0d6c1d864c07a72d092e9f20db104"},
{file = "scipy-1.16.2-cp313-cp313t-macosx_10_14_x86_64.whl", hash = "sha256:53d8d2ee29b925344c13bda64ab51785f016b1b9617849dac10897f0701b20c1"},
{file = "scipy-1.16.2-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:9e05e33657efb4c6a9d23bd8300101536abd99c85cca82da0bffff8d8764d08a"},
{file = "scipy-1.16.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:7fe65b36036357003b3ef9d37547abeefaa353b237e989c21027b8ed62b12d4f"},
{file = "scipy-1.16.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:6406d2ac6d40b861cccf57f49592f9779071655e9f75cd4f977fa0bdd09cb2e4"},
{file = "scipy-1.16.2-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ff4dc42bd321991fbf611c23fc35912d690f731c9914bf3af8f417e64aca0f21"},
{file = "scipy-1.16.2-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:654324826654d4d9133e10675325708fb954bc84dae6e9ad0a52e75c6b1a01d7"},
{file = "scipy-1.16.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:63870a84cd15c44e65220eaed2dac0e8f8b26bbb991456a033c1d9abfe8a94f8"},
{file = "scipy-1.16.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:fa01f0f6a3050fa6a9771a95d5faccc8e2f5a92b4a2e5440a0fa7264a2398472"},
{file = "scipy-1.16.2-cp313-cp313t-win_amd64.whl", hash = "sha256:116296e89fba96f76353a8579820c2512f6e55835d3fad7780fece04367de351"},
{file = "scipy-1.16.2-cp313-cp313t-win_arm64.whl", hash = "sha256:98e22834650be81d42982360382b43b17f7ba95e0e6993e2a4f5b9ad9283a94d"},
{file = "scipy-1.16.2-cp314-cp314-macosx_10_14_x86_64.whl", hash = "sha256:567e77755019bb7461513c87f02bb73fb65b11f049aaaa8ca17cfaa5a5c45d77"},
{file = "scipy-1.16.2-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:17d9bb346194e8967296621208fcdfd39b55498ef7d2f376884d5ac47cec1a70"},
{file = "scipy-1.16.2-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:0a17541827a9b78b777d33b623a6dcfe2ef4a25806204d08ead0768f4e529a88"},
{file = "scipy-1.16.2-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:d7d4c6ba016ffc0f9568d012f5f1eb77ddd99412aea121e6fa8b4c3b7cbad91f"},
{file = "scipy-1.16.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9702c4c023227785c779cba2e1d6f7635dbb5b2e0936cdd3a4ecb98d78fd41eb"},
{file = "scipy-1.16.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d1cdf0ac28948d225decdefcc45ad7dd91716c29ab56ef32f8e0d50657dffcc7"},
{file = "scipy-1.16.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:70327d6aa572a17c2941cdfb20673f82e536e91850a2e4cb0c5b858b690e1548"},
{file = "scipy-1.16.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5221c0b2a4b58aa7c4ed0387d360fd90ee9086d383bb34d9f2789fafddc8a936"},
{file = "scipy-1.16.2-cp314-cp314-win_amd64.whl", hash = "sha256:f5a85d7b2b708025af08f060a496dd261055b617d776fc05a1a1cc69e09fe9ff"},
{file = "scipy-1.16.2-cp314-cp314-win_arm64.whl", hash = "sha256:2cc73a33305b4b24556957d5857d6253ce1e2dcd67fa0ff46d87d1670b3e1e1d"},
{file = "scipy-1.16.2-cp314-cp314t-macosx_10_14_x86_64.whl", hash = "sha256:9ea2a3fed83065d77367775d689401a703d0f697420719ee10c0780bcab594d8"},
{file = "scipy-1.16.2-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:7280d926f11ca945c3ef92ba960fa924e1465f8d07ce3a9923080363390624c4"},
{file = "scipy-1.16.2-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:8afae1756f6a1fe04636407ef7dbece33d826a5d462b74f3d0eb82deabefd831"},
{file = "scipy-1.16.2-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:5c66511f29aa8d233388e7416a3f20d5cae7a2744d5cee2ecd38c081f4e861b3"},
{file = "scipy-1.16.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:efe6305aeaa0e96b0ccca5ff647a43737d9a092064a3894e46c414db84bc54ac"},
{file = "scipy-1.16.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7f3a337d9ae06a1e8d655ee9d8ecb835ea5ddcdcbd8d23012afa055ab014f374"},
{file = "scipy-1.16.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:bab3605795d269067d8ce78a910220262711b753de8913d3deeaedb5dded3bb6"},
{file = "scipy-1.16.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:b0348d8ddb55be2a844c518cd8cc8deeeb8aeba707cf834db5758fc89b476a2c"},
{file = "scipy-1.16.2-cp314-cp314t-win_amd64.whl", hash = "sha256:26284797e38b8a75e14ea6631d29bda11e76ceaa6ddb6fdebbfe4c4d90faf2f9"},
{file = "scipy-1.16.2-cp314-cp314t-win_arm64.whl", hash = "sha256:d2a4472c231328d4de38d5f1f68fdd6d28a615138f842580a8a321b5845cf779"},
{file = "scipy-1.16.2.tar.gz", hash = "sha256:af029b153d243a80afb6eabe40b0a07f8e35c9adc269c019f364ad747f826a6b"},
]
[package.dependencies]
numpy = ">=1.25.2,<2.6"
[package.extras]
dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy (==1.10.0)", "pycodestyle", "pydevtool", "rich-click", "ruff (>=0.0.292)", "types-psutil", "typing_extensions"]
doc = ["intersphinx_registry", "jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.19.1)", "jupytext", "linkify-it-py", "matplotlib (>=3.5)", "myst-nb (>=1.2.0)", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0,<8.2.0)", "sphinx-copybutton", "sphinx-design (>=0.4.0)"]
test = ["Cython", "array-api-strict (>=2.3.1)", "asv", "gmpy2", "hypothesis (>=6.30)", "meson", "mpmath", "ninja ; sys_platform != \"emscripten\"", "pooch", "pytest (>=8.0.0)", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"]
[[package]] [[package]]
name = "shellingham" name = "shellingham"
version = "1.5.4" version = "1.5.4"
@ -2929,4 +3008,4 @@ propcache = ">=0.2.1"
[metadata] [metadata]
lock-version = "2.1" lock-version = "2.1"
python-versions = "^3.11" python-versions = "^3.11"
content-hash = "696ea33fe5b5efd49565f0b6216a2f40a85d62d34b27693ac6271b676f94897d" content-hash = "91dbae2db3aade422091b46bec01e32cabc4457814e736775fc0d020d785fae5"

@ -30,6 +30,7 @@ google-api-python-client = "^2.169.0"
google-auth-httplib2 = "^0.2.0" google-auth-httplib2 = "^0.2.0"
google-auth-oauthlib = "^1.2.2" google-auth-oauthlib = "^1.2.2"
aiohttp = "^3.12.14" aiohttp = "^3.12.14"
scipy = "^1.16.2"
[build-system] [build-system]

@ -29,7 +29,7 @@ from .schema import (CalculationResultsRead,
CalculationTimeConstrainsParametersCreate, CalculationTimeConstrainsParametersCreate,
CalculationTimeConstrainsRead, OptimumResult) CalculationTimeConstrainsRead, OptimumResult)
from .utils import analyze_monthly_metrics, calculate_failures_per_month, calculate_risk_cost_per_failure, create_time_series_data, get_monthly_risk_analysis, get_months_between from .utils import analyze_monthly_metrics, calculate_failures_per_month, calculate_risk_cost_per_failure, create_time_series_data, failures_per_month, fetch_reliability, get_monthly_risk_analysis, get_months_between, simulate_failures
from src.equipment_sparepart.model import ScopeEquipmentPart from src.equipment_sparepart.model import ScopeEquipmentPart
import copy import copy
import random import random
@ -545,7 +545,7 @@ class OptimumCostModelWithSpareparts:
await self.session.close() await self.session.close()
self.session = None self.session = None
async def get_failures_prediction(self, simulation_id: str, location_tag: str, birnbaum_importance: float): async def max_flowrate(self, simulation_id: str, location_tag: str):
"""Get failure predictions for equipment from simulation service""" """Get failure predictions for equipment from simulation service"""
plot_result_url = f"{self.api_base_url}/aeros/simulation/result/plot/{simulation_id}/{location_tag}?use_location_tag=1" plot_result_url = f"{self.api_base_url}/aeros/simulation/result/plot/{simulation_id}/{location_tag}?use_location_tag=1"
@ -563,17 +563,27 @@ class OptimumCostModelWithSpareparts:
except (requests.RequestException, ValueError) as e: except (requests.RequestException, ValueError) as e:
self.logger.error(f"Failed to fetch prediction data for {location_tag}: {e}") self.logger.error(f"Failed to fetch prediction data for {location_tag}: {e}")
return None return None
plot_data = prediction_data.get('data', {}).get('timestamp_outs') if prediction_data.get("data") else None data = prediction_data.get('data', {})
if not plot_data: if not data:
self.logger.warning(f"No plot data available for {location_tag}")
return None return None
time_series = create_time_series_data(plot_data, 43830) max_flowrate = data.get("max_flow_rate")
monthly_data = analyze_monthly_metrics(time_series)
return monthly_data
return max_flowrate
# plot_data = prediction_data.get('data', {}).get('timestamp_outs') if prediction_data.get("data") else None
# if not plot_data:
# self.logger.warning(f"No plot data available for {location_tag}")
# return None
# time_series = create_time_series_data(plot_data, 43830)
# monthly_data = analyze_monthly_metrics(time_series)
# return monthly_data
async def get_simulation_results(self, simulation_id: str = "default"): async def get_simulation_results(self, simulation_id: str = "default"):
"""Get simulation results for Birnbaum importance calculations""" """Get simulation results for Birnbaum importance calculations"""
@ -602,8 +612,8 @@ class OptimumCostModelWithSpareparts:
"plant_result": plant_data "plant_result": plant_data
} }
def _calculate_equipment_costs_with_spareparts(self, failures_prediction: Dict, birnbaum_importance: float, def _calculate_equipment_costs_with_spareparts(self, failures_prediction: list, birnbaum_importance: float,
preventive_cost: float, failure_replacement_cost: float, preventive_cost: float, failure_replacement_cost: float, max_interval:int,
location_tag: str, planned_overhauls: List = None) -> List[Dict]: location_tag: str, planned_overhauls: List = None) -> List[Dict]:
"""Calculate costs for each month including sparepart costs and availability""" """Calculate costs for each month including sparepart costs and availability"""
@ -611,8 +621,8 @@ class OptimumCostModelWithSpareparts:
self.logger.warning(f"No failure prediction data for {location_tag}") self.logger.warning(f"No failure prediction data for {location_tag}")
return [] return []
months = list(failures_prediction.keys()) # months = list(failures_prediction.keys())
num_months = len(months) num_months = max_interval
# Calculate basic costs (same as before) # Calculate basic costs (same as before)
risk_costs = [] risk_costs = []
@ -621,9 +631,10 @@ class OptimumCostModelWithSpareparts:
cumulative_risk = 0 cumulative_risk = 0
for month_key in months:
data = failures_prediction[month_key] for i in range(num_months):
monthly_risk = data['avg_flow_rate'] * birnbaum_importance * data['total_oos_hours'] * 1000000 data = failures_prediction[i]
monthly_risk = data['avg_flowrate'] * birnbaum_importance * data['total_oos_hours'] * 1000000
risk_costs.append(monthly_risk) risk_costs.append(monthly_risk)
cumulative_risk += monthly_risk cumulative_risk += monthly_risk
@ -631,6 +642,7 @@ class OptimumCostModelWithSpareparts:
failure_counts.append(data['cumulative_failures']) failure_counts.append(data['cumulative_failures'])
# Calculate costs for each month including sparepart considerations # Calculate costs for each month including sparepart considerations
results = [] results = []
@ -638,7 +650,7 @@ class OptimumCostModelWithSpareparts:
month_index = i + 1 month_index = i + 1
# Basic failure and preventive costs # Basic failure and preventive costs
failure_cost = (failure_counts[i] * failure_replacement_cost) + cumulative_risk_costs[i] failure_cost = (failure_counts[i] * (failure_replacement_cost)) + cumulative_risk_costs[i]
preventive_cost_month = preventive_cost / month_index preventive_cost_month = preventive_cost / month_index
# Check sparepart availability for this month # Check sparepart availability for this month
@ -785,31 +797,42 @@ class OptimumCostModelWithSpareparts:
self.logger.error(f"Failed to get simulation results: {e}") self.logger.error(f"Failed to get simulation results: {e}")
equipment_birnbaum = {} equipment_birnbaum = {}
location_tags = [equipment.location_tag for equipment in equipments]
reliabity_parameter = {
res['location_tag']: res for res in fetch_reliability(location_tags)
}
# Phase 1: Calculate individual optimal timings without considering interactions # Phase 1: Calculate individual optimal timings without considering interactions
individual_results = {} individual_results = {}
for equipment in equipments: for equipment in equipments:
location_tag = equipment.location_tag location_tag = equipment.location_tag
birnbaum = equipment_birnbaum.get(location_tag, 0.0) birnbaum = equipment_birnbaum.get(location_tag, 0.0)
asset_reliability = reliabity_parameter.get(location_tag)
distribution = asset_reliability.get("distribution")
parameters = asset_reliability.get("parameters", {})
try: try:
# Get failure predictions # Get failure predictions
monthly_data = await self.get_failures_prediction(simulation_id, location_tag, birnbaum) max_flowrate = await self.max_flowrate(simulation_id, location_tag) or 15
results = simulate_failures(distribution,parameters , 3, max_flowrate, months=max_interval, runs=500)
if not monthly_data:
continue
# Calculate costs without considering other equipment (first pass) # Calculate costs without considering other equipment (first pass)
equipment_preventive_cost = equipment.overhaul_cost + equipment.service_cost equipment_preventive_cost = equipment.overhaul_cost + equipment.service_cost
failure_replacement_cost = equipment.material_cost + (3 * 111000 * 3) failure_replacement_cost = equipment.material_cost + (3 * 111000 * 3)
cost_results = self._calculate_equipment_costs_with_spareparts( cost_results = self._calculate_equipment_costs_with_spareparts(
failures_prediction=monthly_data, failures_prediction=results,
birnbaum_importance=birnbaum, birnbaum_importance=birnbaum,
preventive_cost=equipment_preventive_cost, preventive_cost=equipment_preventive_cost,
failure_replacement_cost=failure_replacement_cost, failure_replacement_cost=failure_replacement_cost,
location_tag=location_tag, location_tag=location_tag,
planned_overhauls=[] # Empty in first pass planned_overhauls=[], # Empty in first pass
max_interval=max_interval
) )
if not cost_results: if not cost_results:
@ -826,7 +849,7 @@ class OptimumCostModelWithSpareparts:
except Exception as e: except Exception as e:
self.logger.error(f"Failed to calculate individual timing for {location_tag}: {e}") self.logger.error(f"Failed to calculate individual timing for {location_tag}: {e}")
continue raise Exception(e)
# Phase 2: Optimize considering sparepart interactions # Phase 2: Optimize considering sparepart interactions
self.logger.info("Phase 2: Optimizing with sparepart interactions...") self.logger.info("Phase 2: Optimizing with sparepart interactions...")
@ -1169,7 +1192,7 @@ async def run_simulation(*, db_session, calculation, token: str, collector_db_se
token=token, token=token,
last_oh_date=prev_oh_scope.end_date, last_oh_date=prev_oh_scope.end_date,
next_oh_date=scope.start_date, next_oh_date=scope.start_date,
time_window_months=time_window_months, time_window_months=60,
base_url=RBD_SERVICE_API base_url=RBD_SERVICE_API
) )

@ -1,7 +1,11 @@
import datetime import datetime
import json import json
import numpy as np
import pandas as pd import pandas as pd
import requests
from src.config import REALIBILITY_SERVICE_API
def get_months_between(start_date: datetime.datetime, end_date: datetime.datetime) -> int: def get_months_between(start_date: datetime.datetime, end_date: datetime.datetime) -> int:
""" """
@ -273,10 +277,272 @@ def get_monthly_risk_analysis(timestamp_outs, birnbaum_importance, energy_price)
'risk_cost_array': risk_analysis['risk_cost_per_failure_array'] 'risk_cost_array': risk_analysis['risk_cost_per_failure_array']
} }
# Usage example:
# birnbaum_importance = 0.85 # Example value def fetch_reliability(location_tags):
# energy_price = 100 # Example: $100 per unit url = f"{REALIBILITY_SERVICE_API}/asset/batch"
# resp = requests.get(url, json={"location_tags": location_tags})
# results = get_monthly_risk_analysis(timestamp_outs, birnbaum_importance, energy_price) resp.raise_for_status()
# risk_cost_array = results['risk_cost_array'] return resp.json().get("data", [])
# print("Risk cost per failure each month:", risk_cost_array)
import math
from scipy.stats import lognorm, norm
def get_reliability(distribution: str, params: dict, t: float) -> float:
d = (distribution or "").lower()
if d in ["weibull_2p", "weibull_3p"]:
eta = params.get("eta"); beta = params.get("beta"); gamma_ = params.get("gamma", 0)
if eta is None or beta is None: return 1.0
if t <= gamma_: return 1.0
return math.exp(-((t - gamma_) / eta) ** beta)
elif d in ["exponential", "exponential_2p"]:
lam = params.get("lambda") or params.get("Lambda")
if lam is None: return 1.0
return math.exp(-lam * t)
elif "lognormal" in d:
mu = params.get("mu"); sigma = params.get("sigma"); gamma_ = params.get("gamma", 0)
if mu is None or sigma is None: return 1.0
return 1 - lognorm.cdf(max(t-gamma_,0), s=sigma, scale=math.exp(mu))
elif "normal" in d:
mu = params.get("mu"); sigma = params.get("sigma")
if mu is None or sigma is None: return 1.0
return 1 - norm.cdf(t, loc=mu, scale=sigma)
elif "nhpp" in d:
eta = params.get("eta")
beta = params.get("beta")
lam = params.get("lambda", 1)
if eta is None or beta is None:
return 1.0
if t <= 0:
return 1.0 # at time 0, survival = 1
return math.exp(-(t / eta) ** beta)
else:
return 1.0
import numpy as np
def failures_per_month(distribution, params, mttr, design_flow_rate=100,
population=1, months=24, hours_per_month=720,
mode="expected", runs=1):
"""
Calculate monthly failures, cumulative failures, downtime, and avg flowrate.
- mode="expected": returns smooth fractional expected values.
- mode="simulate": returns integer values per run (stochastic).
- runs: number of Monte Carlo runs (only used if simulate).
- If simulate with runs>1, returns P50 (median) summary across runs.
"""
all_runs = []
for r in range(runs):
results = []
cumulative = 0
total_oos_hours = 0
for m in range(1, months+1):
t_start = (m-1) * hours_per_month
t_end = m * hours_per_month
R_start = get_reliability(distribution, params, t_start)
R_end = get_reliability(distribution, params, t_end)
# Probability of failure in this month
prob_failure = max(0.0, R_start - R_end)
if mode == "expected":
failures = population * prob_failure # fractional
else: # simulate
failures = np.random.binomial(population, prob_failure)
cumulative += failures
# Downtime (failures × MTTR)
oos_hours = failures * mttr
total_oos_hours += oos_hours
service_hours = hours_per_month - oos_hours
if service_hours < 0:
service_hours = 0
# Availability = service / total
availability = service_hours / hours_per_month
# Avg flowrate scaled
avg_flowrate = design_flow_rate * availability
results.append({
"month": m,
"failures": failures,
"cumulative_failures": cumulative,
"oos_hours": oos_hours,
"total_oos_hours": total_oos_hours,
"service_hours": service_hours,
"availability": availability,
"avg_flowrate": avg_flowrate
})
all_runs.append(results)
# === OUTPUTS ===
if mode == "expected" or runs == 1:
return all_runs[0] # smooth or single trajectory
# === Summarize multiple runs (return only P50 for each field) ===
summary = []
keys = ["failures", "cumulative_failures", "oos_hours",
"total_oos_hours", "service_hours", "availability", "avg_flowrate"]
total_oos_hours = 0
cumulative = 0
for m in range(months):
row = {"month": m+1}
for key in keys:
values = [r[m][key] for r in all_runs]
if key == 'failures':
failures = float(np.percentile(values, 90)) # P50 median
oos_hours = failures * mttr
total_oos_hours += oos_hours
service_hours = hours_per_month - oos_hours
availability = service_hours / hours_per_month
avg_flowrate = design_flow_rate * availability
cumulative += failures
summary.append({
"month": m,
"failures": failures,
"cumulative_failures": cumulative,
"oos_hours": oos_hours,
"total_oos_hours": total_oos_hours,
"service_hours": service_hours,
"availability": availability,
"avg_flowrate": avg_flowrate
})
return summary
import pandas as pd
def get_reliability_data(location_tags, months=24):
# 1. Fetch parameters
data = fetch_reliability(location_tags)
all_results = []
for asset in data:
distribution = asset.get("distribution")
params = asset.get("parameters", {})
mttr = 3
tag = asset.get("location_tag")
# 2. Predict monthly
results = failures_per_month(distribution, params, mttr, design_flow_rate, months=months)
# 3. Store with location_tag
for row in results:
row["location_tag"] = tag
all_results.append(row)
return all_results
import numpy as np
import math
def sample_failure_time(distribution, params):
"""Draw one failure time from the reliability distribution."""
d = (distribution or "").lower()
u = np.random.rand()
if d in ["weibull_2p", "weibull_3p"]:
eta = params.get("eta"); beta = params.get("beta"); gamma_ = params.get("gamma", 0)
if eta is None or beta is None: return np.inf
return gamma_ + eta * (-math.log(1-u))**(1/beta)
elif "exponential" in d or "exponential_2p" in d:
lam = params.get("lambda") or params.get("Lambda")
if lam is None: return np.inf
return -math.log(1-u) / lam
elif "lognormal" in d:
mu = params.get("mu"); sigma = params.get("sigma"); gamma_ = params.get("gamma", 0)
if mu is None or sigma is None: return np.inf
return gamma_ + np.random.lognormal(mean=mu, sigma=sigma)
elif "normal" in d:
mu = params.get("mu"); sigma = params.get("sigma")
if mu is None or sigma is None: return np.inf
return max(0, np.random.normal(mu, sigma))
else:
return np.inf
def simulate_failures(distribution, params, mttr, design_flow_rate=100,
population=1, months=24, hours_per_month=720,
runs=1000):
"""
Simulate failures over a given horizon using renewal process.
Always in stochastic mode, results aggregated to P50 across runs.
"""
horizon = months * hours_per_month
all_runs = []
for r in range(runs):
results = []
failures_by_month = [0] * months
for _ in range(population):
# First failure
t = sample_failure_time(distribution, params)
while t < horizon:
month_idx = int(t // hours_per_month)
if month_idx < months:
failures_by_month[month_idx] += 1
# Renewal: after repair (mttr), draw new TTF
t += mttr + sample_failure_time(distribution, params)
# Build results for this run
cumulative = 0
total_oos_hours = 0
for m in range(months):
failures = failures_by_month[m]
cumulative += failures
oos_hours = failures * mttr
total_oos_hours += oos_hours
service_hours = max(0, hours_per_month - oos_hours)
availability = service_hours / hours_per_month
avg_flowrate = design_flow_rate * availability
results.append({
"month": m+1,
"failures": failures,
"cumulative_failures": cumulative,
"oos_hours": oos_hours,
"total_oos_hours": total_oos_hours,
"service_hours": service_hours,
"availability": availability,
"avg_flowrate": avg_flowrate
})
all_runs.append(results)
# === Aggregate to P50 ===
summary = []
for m in range(months):
row = {"month": m+1}
for key in ["failures", "cumulative_failures", "oos_hours",
"total_oos_hours", "service_hours", "availability", "avg_flowrate"]:
values = [r[m][key] for r in all_runs]
row[key] = float(np.percentile(values, 50)) # median
summary.append(row)
return summary

Loading…
Cancel
Save