diff --git a/src/calculation_time_constrains/flows.py b/src/calculation_time_constrains/flows.py index 5f140b3..35a5f6b 100644 --- a/src/calculation_time_constrains/flows.py +++ b/src/calculation_time_constrains/flows.py @@ -20,8 +20,7 @@ from .service import (create_calculation_result_service, create_param_and_data, get_avg_cost_by_asset, get_calculation_by_reference_and_parameter, get_calculation_data_by_id, get_calculation_result, - get_corrective_cost_time_chart, - get_overhaul_cost_by_time_chart, run_simulation_with_spareparts) + run_simulation_with_spareparts) from src.database.core import CollectorDbSession diff --git a/src/calculation_time_constrains/model.py b/src/calculation_time_constrains/model.py index b4cd8c2..5380f9d 100644 --- a/src/calculation_time_constrains/model.py +++ b/src/calculation_time_constrains/model.py @@ -69,6 +69,8 @@ class CalculationData(Base, DefaultMixin, IdentityMixin): optimum_oh_day = Column(Integer, nullable=True) max_interval = Column(Integer, nullable=True) + + rbd_simulation_id = Column(UUID(as_uuid=True), nullable=True) session = relationship("OverhaulScope", lazy="raise") diff --git a/src/calculation_time_constrains/router.py b/src/calculation_time_constrains/router.py index 903f79d..1b3ac6c 100644 --- a/src/calculation_time_constrains/router.py +++ b/src/calculation_time_constrains/router.py @@ -84,9 +84,9 @@ async def get_calculation_parameters( @router.get( "/{calculation_id}", response_model=StandardResponse[CalculationTimeConstrainsRead] ) -async def get_calculation_results(db_session: DbSession, calculation_id): +async def get_calculation_results(db_session: DbSession, calculation_id, token:Token): results = await get_calculation_result( - db_session=db_session, calculation_id=calculation_id + db_session=db_session, calculation_id=calculation_id, token=token ) return StandardResponse( diff --git a/src/calculation_time_constrains/service.py b/src/calculation_time_constrains/service.py index 81ee019..d17ce48 100644 --- a/src/calculation_time_constrains/service.py +++ b/src/calculation_time_constrains/service.py @@ -28,7 +28,7 @@ from .schema import (CalculationResultsRead, CalculationTimeConstrainsParametersCreate, CalculationTimeConstrainsRead, OptimumResult) -from .utils import analyze_monthly_metrics, calculate_failures_per_month, calculate_risk_cost_per_failure, create_time_series_data, get_monthly_risk_analysis, get_months_between +from .utils import analyze_monthly_metrics, calculate_failures_per_month, calculate_risk_cost_per_failure, create_time_series_data, get_monthly_risk_analysis, get_months_between, plant_simulation_metrics from src.equipment_sparepart.model import ScopeEquipmentPart import copy import random @@ -115,9 +115,9 @@ class OptimumCostModelWithSpareparts: await self.session.close() self.session = None - async def get_failures_prediction(self, simulation_id: str, location_tag: str, birnbaum_importance: float): + async def get_failures_prediction(self, simulation_id: str, location_tag: str, birnbaum_importance: float, use_location_tag: int = 1): """Get failure predictions for equipment from simulation service""" - plot_result_url = f"{self.api_base_url}/aeros/simulation/result/plot/{simulation_id}/{location_tag}?use_location_tag=1" + plot_result_url = f"{self.api_base_url}/aeros/simulation/result/plot/{simulation_id}/{location_tag}?use_location_tag={use_location_tag}" try: response = requests.get( @@ -140,8 +140,8 @@ class OptimumCostModelWithSpareparts: self.logger.warning(f"No plot data available for {location_tag}") return None - time_series = create_time_series_data(plot_data, 43830) - monthly_data = analyze_monthly_metrics(time_series) + time_series = create_time_series_data(plot_data, (self.time_window_months * 24 * 31)) + monthly_data = analyze_monthly_metrics(time_series, self.last_oh_date) return monthly_data @@ -362,7 +362,15 @@ class OptimumCostModelWithSpareparts: eq["Location"]: eq for eq in data } + plant_monthly_metrics = await self.get_failures_prediction(simulation_id=simulation_id, location_tag="plant", use_location_tag=0, birnbaum_importance=0) + REFERENCE_CAPACITY = 630 # or 550 + COST_PER_MWH = 1_000_000 # rupiah + + plant_capacity_loss_money = [metrics['derated_mwh'] * COST_PER_MWH for metrics in plant_monthly_metrics.values()] + cumulative_loss_money = np.cumsum(plant_capacity_loss_money) + + for equipment in equipments: location_tag = equipment.location_tag birnbaum = equipment_birnbaum.get(location_tag, 0.0) @@ -418,13 +426,14 @@ class OptimumCostModelWithSpareparts: # Phase 3: Generate final results and database objects fleet_results = [] - total_corrective_costs = np.zeros(max_interval) + total_corrective_costs = np.zeros(max_interval) + cumulative_loss_money[0:max_interval] total_preventive_costs = np.zeros(max_interval) total_procurement_costs = np.zeros(max_interval) total_costs = np.zeros(max_interval) total_fleet_procurement_cost = 0 + for equipment in equipments: location_tag = equipment.location_tag @@ -499,6 +508,7 @@ class OptimumCostModelWithSpareparts: # Update calculation with results calculation.optimum_oh_day = fleet_optimal_index calculation.max_interval = max_interval + calculation.rbd_simulation_id = simulation_id # Save all results to database db_session.add_all(fleet_results) @@ -717,190 +727,6 @@ async def run_simulation_with_spareparts(*, db_session, calculation, token: str, await optimum_oh_model._close_session() -async def get_corrective_cost_time_chart( - material_cost: float, - service_cost: float, - location_tag: str, - token, - start_date: datetime, - end_date: datetime -) -> Tuple[np.ndarray, np.ndarray]: - days_difference = (end_date - start_date).days - - today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) - tomorrow = today + timedelta(days=1) - - # Initialize monthly data dictionary - monthly_data = {} - latest_num = 1 - - # Handle historical data (any portion before or including today) - historical_start = start_date if start_date <= today else None - historical_end = min(today, end_date) - - - if historical_start and historical_start <= historical_end: - url_history = f"http://192.168.1.82:8000/reliability/main/failures/{location_tag}/{historical_start.strftime('%Y-%m-%d')}/{historical_end.strftime('%Y-%m-%d')}" - - try: - response = requests.get( - url_history, - headers={ - "Content-Type": "application/json", - "Authorization": f"Bearer {token}", - }, - ) - history_data = response.json() - - # Process historical data - accumulate failures by month - history_dict = {} - monthly_failures = {} - - for item in history_data["data"]: - date = datetime.datetime.strptime(item["date"], "%d %b %Y") - month_key = datetime.datetime(date.year, date.month, 1) - - # Initialize if first occurrence of this month - if month_key not in history_dict: - history_dict[month_key] = 0 - - # Accumulate failures for this month - if item["num_fail"] is not None: - history_dict[month_key] += item["num_fail"] - - - - - # Sort months chronologically - sorted_months = sorted(history_dict.keys()) - - if sorted_months: - failures = np.array([history_dict[month] for month in sorted_months]) - cum_failure = np.cumsum(failures) - - for month_key in sorted_months: - monthly_failures[month_key] = int(cum_failure[sorted_months.index(month_key)]) - - # Update monthly_data with cumulative historical data - monthly_data.update(monthly_failures) - - # Get the latest number for predictions if we have historical data - if failures.size > 0: - latest_num = max(1, failures[-1]) # Use the last month's failures, minimum 1 - - except Exception as e: - raise Exception(f"Error fetching historical data: {e}") - - if location_tag == '3TR-TF005': - raise Exception("tes",monthly_data) - - - if end_date >= start_date: - url_prediction = f"http://192.168.1.82:8000/reliability/main/number-of-failures/{location_tag}/{start_date.strftime('%Y-%m-%d')}/{end_date.strftime('%Y-%m-%d')}" - - - try: - response = requests.get( - url_prediction, - headers={ - "Content-Type": "application/json", - "Authorization": f"Bearer {token}", - }, - ) - prediction_data = response.json() - - # Process prediction data - but only use it for future dates - if prediction_data["data"]: - for item in prediction_data["data"]: - date = datetime.strptime(item["date"], "%d %b %Y") - - # Only apply prediction data for dates after today - if date > today: - month_key = datetime(date.year, date.month, 1) - - monthly_data[month_key] = item["num_fail"] if item["num_fail"] is not None else 0 - - - # Update latest_num with the last prediction if available - last_prediction = prediction_data["data"][-1]["num_fail"] - if last_prediction is not None: - latest_num = max(1, round(last_prediction)) - - except Exception as e: - print(f"Error fetching prediction data: {e}") - - - - # Fill in any missing months in the range - current_date = datetime(start_date.year, start_date.month, 1) - end_month = datetime(end_date.year, end_date.month, 1) - - while current_date <= end_month: - if current_date not in monthly_data: - # Try to find the most recent month with data - prev_months = [m for m in monthly_data.keys() if m < current_date] - - if prev_months: - # Use the most recent previous month's data - latest_month = max(prev_months) - monthly_data[current_date] = monthly_data[latest_month] - else: - # If no previous months exist, look for future months - future_months = [m for m in monthly_data.keys() if m > current_date] - - if future_months: - # Use the earliest future month's data - earliest_future = min(future_months) - monthly_data[current_date] = monthly_data[earliest_future] - else: - # No data available at all, use default - monthly_data[current_date] = latest_num - - # Move to next month - if current_date.month == 12: - current_date = datetime(current_date.year + 1, 1, 1) - else: - current_date = datetime(current_date.year, current_date.month + 1, 1) - - # Convert to list maintaining chronological order - complete_data = [] - for month in sorted(monthly_data.keys()): - complete_data.append(monthly_data[month]) - - if latest_num < 1: - raise ValueError("Number of failures cannot be negative", latest_num) - - # Convert to numpy array - monthly_failure = np.array(complete_data) - cost_per_failure = (material_cost + service_cost) / latest_num - - raise Exception(monthly_data, location_tag) - - try: - corrective_costs = monthly_failure * cost_per_failure - except Exception as e: - raise Exception(f"Error calculating corrective costs: {monthly_failure}", location_tag) - - return corrective_costs, monthly_failure - - -def get_overhaul_cost_by_time_chart( - overhaul_cost: float, months_num: int, numEquipments: int, decay_base: float = 1.01 -) -> np.ndarray: - if overhaul_cost < 0: - raise ValueError("Overhaul cost cannot be negative") - if months_num <= 0: - raise ValueError("months_num must be positive") - - rate = np.arange(1, months_num + 1) - - cost_per_equipment = overhaul_cost / numEquipments - - # results = cost_per_equipment - ((cost_per_equipment / hours) * rate) - results = cost_per_equipment / rate - - return results - async def create_param_and_data( *, db_session: DbSession, @@ -927,7 +753,7 @@ async def create_param_and_data( return calculationData -async def get_calculation_result(db_session: DbSession, calculation_id: str): +async def get_calculation_result(db_session: DbSession, calculation_id: str, token): """ Get calculation results with improved error handling, performance, and sparepart details """ @@ -982,12 +808,20 @@ async def get_calculation_result(db_session: DbSession, calculation_id: str): 'total_procurement_items': 0, 'critical_procurement_items': 0 } + + plant_monthly_metrics = await plant_simulation_metrics(simulation_id=scope_calculation.rbd_simulation_id, location_tag="plant", use_location_tag=0, token=token, last_oh_date=prev_oh_scope.end_date, max_interval=scope_calculation.max_interval) + + REFERENCE_CAPACITY = 630 # or 550 + COST_PER_MWH = 1_000_000 # rupiah + + plant_capacity_loss_money = [metrics['derated_mwh'] * COST_PER_MWH for metrics in plant_monthly_metrics.values()] + cumulative_loss_money = np.cumsum(plant_capacity_loss_money) - # Process each month + # Process each monthself for month_index in range(data_num): month_result = { "overhaul_cost": 0.0, - "corrective_cost": 0.0, + "corrective_cost": cumulative_loss_money[month_index], "procurement_cost": 0.0, "num_failures": 0.0, "day": month_index + 1, diff --git a/src/calculation_time_constrains/utils.py b/src/calculation_time_constrains/utils.py index 9e1802c..2940b09 100644 --- a/src/calculation_time_constrains/utils.py +++ b/src/calculation_time_constrains/utils.py @@ -2,6 +2,9 @@ import datetime import json import pandas as pd +import requests + +from src.config import RBD_SERVICE_API def get_months_between(start_date: datetime.datetime, end_date: datetime.datetime) -> int: """ @@ -99,7 +102,37 @@ def calculate_failures_per_month(hourly_data): import pandas as pd import datetime -def analyze_monthly_metrics(timestamp_outs): +import datetime +import pandas as pd + +async def plant_simulation_metrics(simulation_id: str, location_tag: str, max_interval, token, last_oh_date, use_location_tag: int = 1): + """Get failure predictions for equipment from simulation service""" + plot_result_url = f"{RBD_SERVICE_API}/aeros/simulation/result/plot/{simulation_id}/{location_tag}?use_location_tag={use_location_tag}" + + try: + response = requests.get( + plot_result_url, + headers={ + "Content-Type": "application/json", + "Authorization": f"Bearer {token}", + }, + timeout=30 + ) + response.raise_for_status() + prediction_data = response.json() + except (requests.RequestException, ValueError) as e: + raise Exception(str(e)) + + plot_data = prediction_data.get('data', {}).get('timestamp_outs') if prediction_data.get("data") else None + + if not plot_data: + raise Exception(str("no data")) + + time_series = create_time_series_data(plot_data, (max_interval * 24 * 31)) + monthly_data = analyze_monthly_metrics(time_series, last_oh_date) + + return monthly_data +def analyze_monthly_metrics(timestamp_outs, start_date, max_flow_rate: float = 600): if not timestamp_outs: return {} @@ -108,16 +141,17 @@ def analyze_monthly_metrics(timestamp_outs): if not all(col in df.columns for col in required_columns): return {} - # Reference start date (adjust if needed) - start_date = datetime.datetime(2025, 10, 22) - df['datetime'] = df['cumulativeTime'].apply(lambda x: start_date + datetime.timedelta(hours=x)) + start_oh = datetime.datetime(start_date.year, start_date.month, start_date.day) + + # Actual datetime from cumulative hours + df['datetime'] = df['cumulativeTime'].apply(lambda x: start_oh + datetime.timedelta(hours=x)) df['month_year'] = df['datetime'].dt.to_period('M') # Duration until next timestamp df['duration_hours'] = df['cumulativeTime'].shift(-1) - df['cumulativeTime'] df['duration_hours'] = df['duration_hours'].fillna(0) - # Failure detection (global, not per group) + # Failure detection df['status_change'] = df['currentEQStatus'].shift() != df['currentEQStatus'] df['failure'] = (df['currentEQStatus'] == 'OoS') & df['status_change'] @@ -125,6 +159,15 @@ def analyze_monthly_metrics(timestamp_outs): df['cumulative_failures'] = df['failure'].cumsum() df['cumulative_oos'] = (df['duration_hours'] * (df['currentEQStatus'] == 'OoS')).cumsum() + # Derating calculation + # Derating = capacity reduction below max but not outage + df['derating'] = (max_flow_rate - df['flowRate']).clip(lower=0) + df['is_derated'] = (df['currentEQStatus'] == 'Svc') & (df['derating'] > 0) + + # Equivalent Derated Hours (EFDH) → sum of derating * hours, then normalized by max capacity + df['derated_mwh'] = df['derating'] * df['duration_hours'] + df['derated_hours_equivalent'] = df['derated_mwh'] / max_flow_rate + monthly_results = {} for month_period, group in df.groupby('month_year', sort=True): @@ -154,6 +197,15 @@ def analyze_monthly_metrics(timestamp_outs): (service_hours / total_time * 100) if total_time > 0 else 0 ) + # Derating metrics + derating_hours = group.loc[group['is_derated'], 'duration_hours'].sum() + derated_mwh = group['derated_mwh'].sum() + equivalent_derated_hours = group['derated_hours_equivalent'].sum() + + monthly_results[month_str]['derating_hours'] = float(derating_hours) + monthly_results[month_str]['derated_mwh'] = float(derated_mwh) + monthly_results[month_str]['equivalent_derated_hours'] = float(equivalent_derated_hours) + return monthly_results diff --git a/src/maximo/service.py b/src/maximo/service.py index 148b353..21d47cd 100644 --- a/src/maximo/service.py +++ b/src/maximo/service.py @@ -62,7 +62,6 @@ filtered_wo AS ( FROM wo_costs w JOIN location_max lm ON w.asset_location = lm.asset_location WHERE w.total_wo_cost > 0 - AND w.total_wo_cost >= lm.max_cost * 0.15 -- keep within 10% of max ) SELECT asset_location,