From 22bc48eef2e29027f0a62f7068571ed47bbcc32c Mon Sep 17 00:00:00 2001 From: Cizz22 Date: Mon, 13 Oct 2025 10:44:54 +0700 Subject: [PATCH] fix --- src/calculation_target_reliability/router.py | 9 +- src/calculation_target_reliability/schema.py | 2 + src/calculation_target_reliability/service.py | 147 +++++++++++------- src/calculation_time_constrains/service.py | 17 +- src/sparepart/service.py | 6 +- 5 files changed, 119 insertions(+), 62 deletions(-) diff --git a/src/calculation_target_reliability/router.py b/src/calculation_target_reliability/router.py index 92a2577..617b342 100644 --- a/src/calculation_target_reliability/router.py +++ b/src/calculation_target_reliability/router.py @@ -40,7 +40,8 @@ async def get_target_reliability( oh_session_id: Optional[str] = Query(None), eaf_input: float = Query(99.8), duration: int = Query(17520), - simulation_id: Optional[str] = Query(None) + simulation_id: Optional[str] = Query(None), + po_duration = Query(1200) ): """Get all scope pagination.""" if not oh_session_id: @@ -66,7 +67,7 @@ async def get_target_reliability( ) simulation_id = simulation.get("data") - await wait_for_workflow(simulation_id=simulation_id.get("data")) + await wait_for_workflow(simulation_id=simulation_id) else: await wait_for_workflow(simulation_id=simulation_id) @@ -82,7 +83,9 @@ async def get_target_reliability( db_session=db_session, oh_session_id=oh_session_id, collector_db=collector_db, - simulation_id=simulation_id + simulation_id=simulation_id, + duration=duration, + po_duration=po_duration ) diff --git a/src/calculation_target_reliability/schema.py b/src/calculation_target_reliability/schema.py index 59db83d..4d6a628 100644 --- a/src/calculation_target_reliability/schema.py +++ b/src/calculation_target_reliability/schema.py @@ -56,9 +56,11 @@ class OptimizationResult(OverhaulBase): target_plant_eaf: float possible_plant_eaf:float eaf_gap: float + warning_message:Optional[str] asset_contributions: List[dict] optimization_success: bool = False simulation_id: Optional[str] = None + # { diff --git a/src/calculation_target_reliability/service.py b/src/calculation_target_reliability/service.py index 03a929d..fa9989e 100644 --- a/src/calculation_target_reliability/service.py +++ b/src/calculation_target_reliability/service.py @@ -1,3 +1,4 @@ +import math from typing import Optional, List from dataclasses import dataclass from sqlalchemy import Delete, Select @@ -77,57 +78,61 @@ async def get_simulation_results(*, simulation_id: str, token: str): "plant_result": plant_data } -def calculate_asset_eaf_contributions(plant_result, eq_results, standard_scope, eaf_gap): +def calculate_asset_eaf_contributions(plant_result, eq_results, standard_scope, eaf_gap, scheduled_outage): """ - Calculate each asset's contribution to plant EAF with realistic improvement potential. - Ranking: - 1. Highest contribution (Birnbaum Importance) - 2. Tie-breaker: Contribution per downtime (efficiency) + Calculate each asset's contribution to plant EAF with realistic, fair improvement allocation. + The total EAF gap is distributed among assets proportionally to their contribution potential. """ - eaf_gap_fraction = eaf_gap / 100.0 if eaf_gap > 1.0 else eaf_gap + + total_hours = plant_result.get("total_uptime") + plant_result.get("total_downtime") + plant_operating_fraction = (total_hours - scheduled_outage) / total_hours - MIN_BIRNBAUM_IMPORTANCE = 0.0005 - REALISTIC_MAX_AVAILABILITY = 0.995 # 99.5% + REALISTIC_MAX_TECHNICAL = 0.995 + REALISTIC_MAX_AVAILABILITY = REALISTIC_MAX_TECHNICAL * plant_operating_fraction MIN_IMPROVEMENT_PERCENT = 0.0001 min_improvement_fraction = MIN_IMPROVEMENT_PERCENT / 100.0 results = [] + weighted_assets = [] + # Step 1: Collect eligible assets and their weights for asset in eq_results: asset_name = asset.get("aeros_node").get("node_name") - - if asset_name not in standard_scope: + num_of_events = asset.get("num_events") + + if asset_name not in standard_scope or num_of_events < 2: continue + + contribution_factor = asset.get("contribution_factor", 0.0) + birbaum = asset.get("contribution", 0.0) + current_availability = asset.get("availability", 0.0) + downtime = asset.get("total_downtime", 0.0) + max_possible_improvement = REALISTIC_MAX_AVAILABILITY - current_availability if REALISTIC_MAX_AVAILABILITY > current_availability else REALISTIC_MAX_TECHNICAL - + raw_weight = birbaum * contribution_factor + weight = math.sqrt(raw_weight) + weighted_assets.append((asset, weight, max_possible_improvement)) + + # Step 2: Compute total weight + total_weight = sum(w for _, w, _ in weighted_assets) or 1.0 + + # Step 3: Distribute improvement proportionally to weight + for asset, weight, max_possible_improvement in weighted_assets: + asset_name = asset.get("aeros_node").get("node_name") contribution_factor = asset.get("contribution_factor", 0.0) birbaum = asset.get("contribution", 0.0) current_availability = asset.get("availability", 0.0) downtime = asset.get("total_downtime", 0.0) - - # Filter 1: Importance too low - if contribution_factor < MIN_BIRNBAUM_IMPORTANCE: - continue + # Proportional improvement share + required_improvement = eaf_gap_fraction * (weight / total_weight) + required_improvement = min(required_improvement, max_possible_improvement) + required_improvement = max(required_improvement, min_improvement_fraction) - # Max possible availability improvement - max_possible_improvement = REALISTIC_MAX_AVAILABILITY - current_availability - if max_possible_improvement <= 0: - continue - - - - # Inject standard each equipment - required_improvement = 0.01 improvement_impact = required_improvement * contribution_factor - - - # Filter 2: Improvement too small - if improvement_impact < MIN_IMPROVEMENT_PERCENT: - continue - # Contribution efficiency (secondary metric) + # Secondary metric: efficiency efficiency = birbaum / downtime if downtime > 0 else birbaum contribution = AssetWeight( @@ -138,30 +143,30 @@ def calculate_asset_eaf_contributions(plant_result, eq_results, standard_scope, improvement_impact=improvement_impact, num_of_failures=asset.get("num_events", 0), down_time=downtime, - efficiency= efficiency, + efficiency=efficiency, birbaum=birbaum ) - results.append(contribution) - # Sort: 1) contribution (desc), 2) efficiency (desc) - results.sort(key=lambda x: (x.birbaum), reverse=True) + # Step 4: Sort by Birnbaum importance + results.sort(key=lambda x: x.birbaum, reverse=True) return results + def project_eaf_improvement(asset: AssetWeight, improvement_factor: float = 0.3) -> float: """ Project EAF improvement after maintenance This is a simplified model - you should replace with your actual prediction logic """ current_downtime_pct = 100 - asset.eaf - # Assume maintenance reduces downtime by improvement_factor improved_downtime_pct = current_downtime_pct * (1 - improvement_factor) projected_eaf = 100 - improved_downtime_pct return min(projected_eaf, 99.9) # Cap at 99.9% + async def identify_worst_eaf_contributors( *, simulation_result, @@ -170,24 +175,56 @@ async def identify_worst_eaf_contributors( oh_session_id: str, collector_db: CollectorDbSession, simulation_id: str, + duration:int, + po_duration: int ): """ Identify equipment that contributes most to plant EAF reduction - in order to reach a target EAF. + and evaluate if target EAF is physically achievable. """ # Extract results calc_result = simulation_result["calc_result"] plant_result = simulation_result["plant_result"] - # Ensure list of equipment eq_results = calc_result if isinstance(calc_result, list) else [calc_result] # Current plant EAF and gap current_plant_eaf = plant_result.get("eaf", 0) + total_hours = duration + scheduled_outage = int(po_duration) + max_eaf_possible = (total_hours - scheduled_outage) / total_hours * 100 + + # Check if target EAF exceeds theoretical maximum + warning_message = None + if target_eaf > max_eaf_possible: + impossible_gap = target_eaf - max_eaf_possible + required_scheduled_hours = total_hours * (1 - target_eaf / 100) + required_reduction = scheduled_outage - required_scheduled_hours + + warning_message = ( + f"⚠️ Target EAF {target_eaf:.2f}% exceeds theoretical maximum {max_eaf_possible:.2f}%.\n" + f"To achieve it, planned outage must be reduced by approximately " + f"{required_reduction:.1f} hours (from {scheduled_outage:.0f}h → {required_scheduled_hours:.0f}h)." + ) + + # Cap target EAF to max achievable for calculation + target_eaf = max_eaf_possible + eaf_gap = (target_eaf - current_plant_eaf) / 100.0 + if eaf_gap <= 0: + return OptimizationResult( + current_plant_eaf=current_plant_eaf, + target_plant_eaf=target_eaf, + possible_plant_eaf=current_plant_eaf, + eaf_gap=0, + warning_message=warning_message or "Target already achieved or exceeded.", + asset_contributions=[], + optimization_success=True, + simulation_id=simulation_id, + ) - # Get standard scope (equipment allowed for overhaul/optimization) + # Get standard scope standard_scope = await get_standard_scope_by_session_id( db_session=db_session, overhaul_session_id=oh_session_id, @@ -197,35 +234,39 @@ async def identify_worst_eaf_contributors( # Compute contributions asset_contributions = calculate_asset_eaf_contributions( - plant_result, eq_results, standard_scope_location_tags, eaf_gap=eaf_gap + plant_result, + eq_results, + standard_scope_location_tags, + eaf_gap, + scheduled_outage ) - project_eaf_improvement = 0.0 + # Greedy selection to fill EAF gap + project_eaf_improvement_total = 0.0 selected_eq = [] - # Greedy select until gap is closed for asset in asset_contributions: - if project_eaf_improvement >= eaf_gap: + if project_eaf_improvement_total >= eaf_gap: break - - if (project_eaf_improvement + asset.improvement_impact) <= eaf_gap: + if (project_eaf_improvement_total + asset.improvement_impact) <= eaf_gap: selected_eq.append(asset) - project_eaf_improvement += asset.improvement_impact + project_eaf_improvement_total += asset.improvement_impact else: - # allow overshoot tolerance by skipping large ones, continue with smaller ones continue - - possible_eaf_plant = current_plant_eaf + project_eaf_improvement*100 - - selected_eq.sort(key=lambda x: (x.birbaum), reverse=True) - # Build output with efficiency included + possible_eaf_plant = current_plant_eaf + project_eaf_improvement_total * 100 + possible_eaf_plant = min(possible_eaf_plant, max_eaf_possible) + + selected_eq.sort(key=lambda x: x.birbaum, reverse=True) + + # Final return return OptimizationResult( current_plant_eaf=current_plant_eaf, target_plant_eaf=target_eaf, possible_plant_eaf=possible_eaf_plant, eaf_gap=eaf_gap, + warning_message=warning_message, asset_contributions=[ { "node": asset.node, @@ -236,10 +277,10 @@ async def identify_worst_eaf_contributors( "system_impact": asset.improvement_impact, "num_of_failures": asset.num_of_failures, "down_time": asset.down_time, - "efficiency": asset.efficiency, + "efficiency": asset.efficiency, } for asset in selected_eq ], - optimization_success=(current_plant_eaf + project_eaf_improvement) >= target_eaf, + optimization_success=(current_plant_eaf + project_eaf_improvement_total * 100) >= target_eaf, simulation_id=simulation_id, ) \ No newline at end of file diff --git a/src/calculation_time_constrains/service.py b/src/calculation_time_constrains/service.py index b31449e..f1dba5f 100644 --- a/src/calculation_time_constrains/service.py +++ b/src/calculation_time_constrains/service.py @@ -68,7 +68,7 @@ class OptimumCostModelWithSpareparts: self.planned_oh_months = self._get_months_between(last_oh_date, next_oh_date) # Set analysis time window (default: 1.5x planned interval) - self.time_window_months = time_window_months or int(self.planned_oh_months * 1.5) + self.time_window_months = time_window_months or int(self.planned_oh_months * 1.3) # Pre-calculate date range for API calls self.date_range = self._generate_date_range() @@ -174,7 +174,7 @@ class OptimumCostModelWithSpareparts: def _calculate_equipment_costs_with_spareparts(self, failures_prediction: Dict, birnbaum_importance: float, preventive_cost: float, failure_replacement_cost: float, ecs, - location_tag: str, planned_overhauls: List = None) -> List[Dict]: + location_tag: str, planned_overhauls: List = None, loss_production_permonth=0) -> List[Dict]: """Calculate costs for each month including sparepart costs and availability""" if not failures_prediction: @@ -349,6 +349,11 @@ class OptimumCostModelWithSpareparts: imp['aeros_node']['node_name']: imp['contribution_factor'] for imp in importance_results["calc_result"] } + + loss_production_permonth = { + imp['aeros_node']['node_name']: (imp['ideal_production'] - imp['production']) / 60 + for imp in importance_results["calc_result"] + } except Exception as e: self.logger.error(f"Failed to get simulation results: {e}") equipment_birnbaum = {} @@ -376,6 +381,8 @@ class OptimumCostModelWithSpareparts: location_tag = equipment.location_tag contribution_factor = equipment_birnbaum.get(location_tag, 0.0) ecs = ecs_tags.get(location_tag, None) + loss_production = loss_production_permonth.get(location_tag, 0) * 960000 + # try: # # Get failure predictions @@ -395,7 +402,8 @@ class OptimumCostModelWithSpareparts: failure_replacement_cost=failure_replacement_cost, location_tag=location_tag, planned_overhauls=[] , # Empty in first pass - ecs=ecs + ecs=ecs, + loss_production_permonth=loss_production ) if not cost_results: @@ -696,17 +704,18 @@ async def run_simulation_with_spareparts(*, db_session, calculation, token: str, time_window_months = 60 sparepart_manager = await load_sparepart_data_from_db(scope=scope, prev_oh_scope=prev_oh_scope, db_session=collector_db_session, analysis_window_months=time_window_months) + # Initialize optimization model with sparepart management optimum_oh_model = OptimumCostModelWithSpareparts( token=token, last_oh_date=prev_oh_scope.end_date, next_oh_date=scope.start_date, - time_window_months=time_window_months, base_url=RBD_SERVICE_API, sparepart_manager=sparepart_manager ) + try: # Run fleet optimization with sparepart management results = await optimum_oh_model.calculate_cost_all_equipment_with_spareparts( diff --git a/src/sparepart/service.py b/src/sparepart/service.py index e0eaf39..a4e96d5 100644 --- a/src/sparepart/service.py +++ b/src/sparepart/service.py @@ -792,9 +792,11 @@ async def load_sparepart_data_from_db(scope, prev_oh_scope, db_session, analysis # prev_oh_scope = await get_prev_oh(db_session=db_session, overhaul_session=scope) analysis_start_date = prev_oh_scope.end_date - analysis_window_months = int(((scope.start_date - prev_oh_scope.end_date).days / 30) * 1.5) if not analysis_window_months else analysis_window_months + analysis_window_months = int(((scope.start_date - prev_oh_scope.end_date).days / 30) * 1.3) if not analysis_window_months else analysis_window_months sparepart_manager = SparepartManager(analysis_start_date, analysis_window_months) + start_date = prev_oh_scope.end_date + end_date = scope.start_date # Load sparepart stocks # Example query - adjust based on your schema @@ -832,7 +834,7 @@ async def load_sparepart_data_from_db(scope, prev_oh_scope, db_session, analysis wonum, asset_location FROM public.wo_staging_maximo_2 - WHERE worktype = 'OH' AND asset_location IS NOT NULL + WHERE worktype = 'OH' AND asset_location IS NOT NULL and asset_unit IN ('3', '00') ), sparepart_usage AS ( -- Get sparepart usage for OH work orders