|
|
|
|
@ -50,435 +50,6 @@ client = httpx.AsyncClient(timeout=300.0)
|
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
setup_logging(logger=log)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class OptimumCostModel:
|
|
|
|
|
def __init__(self, token: str, last_oh_date: date, next_oh_date: date,
|
|
|
|
|
time_window_months: Optional[int] = None,
|
|
|
|
|
base_url: str = "http://192.168.1.82:8000"):
|
|
|
|
|
"""
|
|
|
|
|
Initialize the Optimum Cost Model for overhaul timing optimization.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
token: API authentication token
|
|
|
|
|
last_oh_date: Date of last overhaul
|
|
|
|
|
next_oh_date: Planned date of next overhaul
|
|
|
|
|
time_window_months: Analysis window in months (default: 1.5x planned interval)
|
|
|
|
|
base_url: API base URL
|
|
|
|
|
"""
|
|
|
|
|
self.api_base_url = base_url
|
|
|
|
|
self.token = token
|
|
|
|
|
self.last_oh_date = last_oh_date
|
|
|
|
|
self.next_oh_date = next_oh_date
|
|
|
|
|
self.session = None
|
|
|
|
|
|
|
|
|
|
# Calculate planned overhaul interval in months
|
|
|
|
|
self.planned_oh_months = self._get_months_between(last_oh_date, next_oh_date)
|
|
|
|
|
|
|
|
|
|
# Set analysis time window (default: 1.5x planned interval)
|
|
|
|
|
self.time_window_months = time_window_months or int(self.planned_oh_months * 1.5)
|
|
|
|
|
|
|
|
|
|
# Pre-calculate date range for API calls
|
|
|
|
|
self.date_range = self._generate_date_range()
|
|
|
|
|
|
|
|
|
|
# Setup logging
|
|
|
|
|
logging.basicConfig(level=logging.INFO)
|
|
|
|
|
self.logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
self.logger.info(f"OptimumCostModel initialized:")
|
|
|
|
|
self.logger.info(f" - Planned OH interval: {self.planned_oh_months} months")
|
|
|
|
|
self.logger.info(f" - Analysis window: {self.time_window_months} months")
|
|
|
|
|
|
|
|
|
|
def _get_months_between(self, start_date: date, end_date: date) -> int:
|
|
|
|
|
"""Calculate number of months between two dates"""
|
|
|
|
|
return (end_date.year - start_date.year) * 12 + (end_date.month - start_date.month)
|
|
|
|
|
|
|
|
|
|
def _generate_date_range(self) -> List[datetime]:
|
|
|
|
|
"""Generate date range for analysis based on time window"""
|
|
|
|
|
dates = []
|
|
|
|
|
current_date = datetime.combine(self.last_oh_date, datetime.min.time())
|
|
|
|
|
end_date = current_date + timedelta(days=self.time_window_months * 30)
|
|
|
|
|
|
|
|
|
|
while current_date <= end_date:
|
|
|
|
|
dates.append(current_date)
|
|
|
|
|
current_date += timedelta(days=31)
|
|
|
|
|
|
|
|
|
|
return dates
|
|
|
|
|
|
|
|
|
|
async def _create_session(self):
|
|
|
|
|
"""Create aiohttp session with connection pooling"""
|
|
|
|
|
if self.session is None:
|
|
|
|
|
timeout = aiohttp.ClientTimeout(total=300)
|
|
|
|
|
connector = aiohttp.TCPConnector(
|
|
|
|
|
limit=500,
|
|
|
|
|
limit_per_host=200,
|
|
|
|
|
ttl_dns_cache=300,
|
|
|
|
|
use_dns_cache=True,
|
|
|
|
|
force_close=False,
|
|
|
|
|
enable_cleanup_closed=True
|
|
|
|
|
)
|
|
|
|
|
self.session = aiohttp.ClientSession(
|
|
|
|
|
timeout=timeout,
|
|
|
|
|
connector=connector,
|
|
|
|
|
headers={'Authorization': f'Bearer {self.token}'}
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
async def _close_session(self):
|
|
|
|
|
"""Close aiohttp session"""
|
|
|
|
|
if self.session:
|
|
|
|
|
await self.session.close()
|
|
|
|
|
self.session = None
|
|
|
|
|
|
|
|
|
|
async def get_failures_prediction(self, simulation_id: str, location_tag: str, birnbaum_importance: float):
|
|
|
|
|
"""Get failure predictions for equipment from simulation service"""
|
|
|
|
|
plot_result_url = f"{self.api_base_url}/aeros/simulation/result/plot/{simulation_id}/{location_tag}?use_location_tag=1"
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
response = requests.get(
|
|
|
|
|
plot_result_url,
|
|
|
|
|
headers={
|
|
|
|
|
"Content-Type": "application/json",
|
|
|
|
|
"Authorization": f"Bearer {self.token}",
|
|
|
|
|
},
|
|
|
|
|
timeout=30
|
|
|
|
|
)
|
|
|
|
|
response.raise_for_status()
|
|
|
|
|
prediction_data = response.json()
|
|
|
|
|
except (requests.RequestException, ValueError) as e:
|
|
|
|
|
self.logger.error(f"Failed to fetch prediction data for {location_tag}: {e}")
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
plot_data = prediction_data.get('data', {}).get('timestamp_outs') if prediction_data.get("data") else None
|
|
|
|
|
|
|
|
|
|
if not plot_data:
|
|
|
|
|
self.logger.warning(f"No plot data available for {location_tag}")
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
time_series = create_time_series_data(plot_data, 43830)
|
|
|
|
|
monthly_data = analyze_monthly_metrics(time_series)
|
|
|
|
|
|
|
|
|
|
return monthly_data
|
|
|
|
|
|
|
|
|
|
async def get_simulation_results(self, simulation_id: str = "default"):
|
|
|
|
|
"""Get simulation results for Birnbaum importance calculations"""
|
|
|
|
|
headers = {
|
|
|
|
|
"Authorization": f"Bearer {self.token}",
|
|
|
|
|
"Content-Type": "application/json"
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
calc_result_url = f"{self.api_base_url}/aeros/simulation/result/calc/{simulation_id}?nodetype=RegularNode"
|
|
|
|
|
plant_result_url = f"{self.api_base_url}/aeros/simulation/result/calc/{simulation_id}/plant"
|
|
|
|
|
|
|
|
|
|
async with httpx.AsyncClient(timeout=300.0) as client:
|
|
|
|
|
calc_task = client.get(calc_result_url, headers=headers)
|
|
|
|
|
plant_task = client.get(plant_result_url, headers=headers)
|
|
|
|
|
|
|
|
|
|
calc_response, plant_response = await asyncio.gather(calc_task, plant_task)
|
|
|
|
|
|
|
|
|
|
calc_response.raise_for_status()
|
|
|
|
|
plant_response.raise_for_status()
|
|
|
|
|
|
|
|
|
|
calc_data = calc_response.json()["data"]
|
|
|
|
|
plant_data = plant_response.json()["data"]
|
|
|
|
|
|
|
|
|
|
return {
|
|
|
|
|
"calc_result": calc_data,
|
|
|
|
|
"plant_result": plant_data
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
def _calculate_equipment_costs(self, failures_prediction: Dict, birnbaum_importance: float,
|
|
|
|
|
preventive_cost: float, failure_replacement_cost: float,
|
|
|
|
|
location_tag: str) -> List[Dict]:
|
|
|
|
|
"""Calculate costs for each month for a single equipment"""
|
|
|
|
|
|
|
|
|
|
if not failures_prediction:
|
|
|
|
|
self.logger.warning(f"No failure prediction data for {location_tag}")
|
|
|
|
|
return []
|
|
|
|
|
|
|
|
|
|
months = list(failures_prediction.keys())
|
|
|
|
|
num_months = len(months)
|
|
|
|
|
|
|
|
|
|
# Calculate risk costs and failure costs
|
|
|
|
|
risk_costs = []
|
|
|
|
|
cumulative_risk_costs = []
|
|
|
|
|
failure_counts = []
|
|
|
|
|
|
|
|
|
|
cumulative_risk = 0
|
|
|
|
|
|
|
|
|
|
for month_key in months:
|
|
|
|
|
data = failures_prediction[month_key]
|
|
|
|
|
|
|
|
|
|
# Risk cost = flow_rate × birnbaum_importance × downtime_hours × energy_price
|
|
|
|
|
monthly_risk = data['avg_flow_rate'] * birnbaum_importance * data['total_oos_hours'] * 1000000
|
|
|
|
|
risk_costs.append(monthly_risk)
|
|
|
|
|
|
|
|
|
|
cumulative_risk += monthly_risk
|
|
|
|
|
cumulative_risk_costs.append(cumulative_risk)
|
|
|
|
|
|
|
|
|
|
failure_counts.append(data['cumulative_failures'])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
raise Exception(cumulative_risk_costs)
|
|
|
|
|
# Calculate costs for each month
|
|
|
|
|
results = []
|
|
|
|
|
|
|
|
|
|
for i in range(num_months):
|
|
|
|
|
month_index = i + 1
|
|
|
|
|
|
|
|
|
|
# Failure cost = cumulative failures × replacement cost + cumulative risk cost
|
|
|
|
|
failure_cost = (failure_counts[i] * failure_replacement_cost) + cumulative_risk_costs[i]
|
|
|
|
|
|
|
|
|
|
# Preventive cost = overhaul cost distributed over months
|
|
|
|
|
preventive_cost_month = preventive_cost / month_index
|
|
|
|
|
|
|
|
|
|
# Total cost = failure cost + preventive cost
|
|
|
|
|
total_cost = failure_cost + preventive_cost_month
|
|
|
|
|
|
|
|
|
|
results.append({
|
|
|
|
|
'month': month_index,
|
|
|
|
|
'number_of_failures': failure_counts[i],
|
|
|
|
|
'failure_cost': failure_cost,
|
|
|
|
|
'preventive_cost': preventive_cost_month,
|
|
|
|
|
'total_cost': total_cost,
|
|
|
|
|
'is_after_planned_oh': month_index > self.planned_oh_months,
|
|
|
|
|
'delay_months': max(0, month_index - self.planned_oh_months),
|
|
|
|
|
'risk_cost': cumulative_risk_costs[i],
|
|
|
|
|
'monthly_risk_cost': risk_costs[i],
|
|
|
|
|
'procurement_cost': 0, # For database compatibility
|
|
|
|
|
'procurement_details': [] # For database compatibility
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
return results
|
|
|
|
|
|
|
|
|
|
def _find_optimal_timing(self, cost_results: List[Dict], location_tag: str) -> Optional[Dict]:
|
|
|
|
|
"""Find optimal timing for equipment overhaul"""
|
|
|
|
|
if not cost_results:
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
# Find month with minimum total cost
|
|
|
|
|
min_cost = float('inf')
|
|
|
|
|
optimal_result = None
|
|
|
|
|
optimal_index = -1
|
|
|
|
|
|
|
|
|
|
for i, result in enumerate(cost_results):
|
|
|
|
|
if result['total_cost'] < min_cost:
|
|
|
|
|
min_cost = result['total_cost']
|
|
|
|
|
optimal_result = result
|
|
|
|
|
optimal_index = i
|
|
|
|
|
|
|
|
|
|
if optimal_result is None:
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
# Calculate cost comparison with planned timing
|
|
|
|
|
planned_cost = None
|
|
|
|
|
cost_vs_planned = None
|
|
|
|
|
|
|
|
|
|
if self.planned_oh_months <= len(cost_results):
|
|
|
|
|
planned_cost = cost_results[self.planned_oh_months - 1]['total_cost']
|
|
|
|
|
cost_vs_planned = optimal_result['total_cost'] - planned_cost
|
|
|
|
|
|
|
|
|
|
return {
|
|
|
|
|
'location_tag': location_tag,
|
|
|
|
|
'optimal_month': optimal_result['month'],
|
|
|
|
|
'optimal_index': optimal_index,
|
|
|
|
|
'optimal_cost': optimal_result['total_cost'],
|
|
|
|
|
'failure_cost': optimal_result['failure_cost'],
|
|
|
|
|
'preventive_cost': optimal_result['preventive_cost'],
|
|
|
|
|
'number_of_failures': optimal_result['number_of_failures'],
|
|
|
|
|
'is_delayed': optimal_result['is_after_planned_oh'],
|
|
|
|
|
'delay_months': optimal_result['delay_months'],
|
|
|
|
|
'planned_oh_month': self.planned_oh_months,
|
|
|
|
|
'planned_cost': planned_cost,
|
|
|
|
|
'cost_vs_planned': cost_vs_planned,
|
|
|
|
|
'savings_from_delay': -cost_vs_planned if cost_vs_planned and cost_vs_planned < 0 else 0,
|
|
|
|
|
'cost_of_delay': cost_vs_planned if cost_vs_planned and cost_vs_planned > 0 else 0,
|
|
|
|
|
'all_monthly_costs': cost_results
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
async def calculate_optimal_timing_single_equipment(self, equipment, birnbaum_importance: float,
|
|
|
|
|
simulation_id: str = "default") -> Optional[Dict]:
|
|
|
|
|
"""Calculate optimal overhaul timing for a single equipment"""
|
|
|
|
|
|
|
|
|
|
location_tag = equipment.location_tag
|
|
|
|
|
self.logger.info(f"Calculating optimal timing for {location_tag}")
|
|
|
|
|
|
|
|
|
|
# Get failure predictions
|
|
|
|
|
monthly_data = await self.get_failures_prediction(simulation_id, location_tag, birnbaum_importance)
|
|
|
|
|
|
|
|
|
|
if not monthly_data:
|
|
|
|
|
self.logger.warning(f"No monthly data available for {location_tag}")
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
# Calculate costs
|
|
|
|
|
preventive_cost = equipment.overhaul_cost + equipment.service_cost
|
|
|
|
|
failure_replacement_cost = equipment.material_cost + (3 * 111000 * 3) # Material + Labor
|
|
|
|
|
|
|
|
|
|
cost_results = self._calculate_equipment_costs(
|
|
|
|
|
failures_prediction=monthly_data,
|
|
|
|
|
birnbaum_importance=birnbaum_importance,
|
|
|
|
|
preventive_cost=preventive_cost,
|
|
|
|
|
failure_replacement_cost=failure_replacement_cost,
|
|
|
|
|
location_tag=location_tag
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# Find optimal timing
|
|
|
|
|
optimal_timing = self._find_optimal_timing(cost_results, location_tag)
|
|
|
|
|
|
|
|
|
|
if optimal_timing:
|
|
|
|
|
self.logger.info(f"Optimal timing for {location_tag}: Month {optimal_timing['optimal_month']} "
|
|
|
|
|
f"(Cost: ${optimal_timing['optimal_cost']:,.2f})")
|
|
|
|
|
|
|
|
|
|
if optimal_timing['is_delayed']:
|
|
|
|
|
self.logger.info(f" - Delay recommended: {optimal_timing['delay_months']} months")
|
|
|
|
|
self.logger.info(f" - Savings from delay: ${optimal_timing['savings_from_delay']:,.2f}")
|
|
|
|
|
|
|
|
|
|
return optimal_timing
|
|
|
|
|
|
|
|
|
|
async def calculate_cost_all_equipment(self, db_session, equipments: List, calculation,
|
|
|
|
|
preventive_cost: float, simulation_id: str = "default") -> Dict:
|
|
|
|
|
"""
|
|
|
|
|
Calculate optimal overhaul timing for entire fleet and save to database
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
self.logger.info(f"Starting fleet optimization for {len(equipments)} equipment items")
|
|
|
|
|
max_interval = self.time_window_months
|
|
|
|
|
|
|
|
|
|
# Get Birnbaum importance values
|
|
|
|
|
try:
|
|
|
|
|
importance_results = await self.get_simulation_results(simulation_id)
|
|
|
|
|
equipment_birnbaum = {
|
|
|
|
|
imp['aeros_node']['node_name']: imp['contribution']
|
|
|
|
|
for imp in importance_results["calc_result"]
|
|
|
|
|
}
|
|
|
|
|
except Exception as e:
|
|
|
|
|
self.logger.error(f"Failed to get simulation results: {e}")
|
|
|
|
|
equipment_birnbaum = {}
|
|
|
|
|
|
|
|
|
|
# Initialize fleet aggregation arrays
|
|
|
|
|
fleet_results = []
|
|
|
|
|
total_corrective_costs = np.zeros(max_interval)
|
|
|
|
|
total_preventive_costs = np.zeros(max_interval)
|
|
|
|
|
total_procurement_costs = np.zeros(max_interval)
|
|
|
|
|
total_costs = np.zeros(max_interval)
|
|
|
|
|
|
|
|
|
|
for equipment in equipments:
|
|
|
|
|
location_tag = equipment.location_tag
|
|
|
|
|
birnbaum = equipment_birnbaum.get(location_tag, 0.0)
|
|
|
|
|
|
|
|
|
|
if birnbaum == 0.0:
|
|
|
|
|
self.logger.warning(f"No Birnbaum importance found for {location_tag}, using 0.0")
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
# Get failure predictions
|
|
|
|
|
monthly_data = await self.get_failures_prediction(simulation_id, location_tag, birnbaum)
|
|
|
|
|
|
|
|
|
|
if not monthly_data:
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
# Calculate costs
|
|
|
|
|
equipment_preventive_cost = equipment.overhaul_cost + equipment.service_cost
|
|
|
|
|
failure_replacement_cost = equipment.material_cost + (3 * 111000 * 3)
|
|
|
|
|
|
|
|
|
|
cost_results = self._calculate_equipment_costs(
|
|
|
|
|
failures_prediction=monthly_data,
|
|
|
|
|
birnbaum_importance=birnbaum,
|
|
|
|
|
preventive_cost=equipment_preventive_cost,
|
|
|
|
|
failure_replacement_cost=failure_replacement_cost,
|
|
|
|
|
location_tag=location_tag
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
if not cost_results:
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
# Find optimal timing
|
|
|
|
|
optimal_timing = self._find_optimal_timing(cost_results, location_tag)
|
|
|
|
|
|
|
|
|
|
if not optimal_timing:
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
# Prepare arrays for database (pad to max_interval length)
|
|
|
|
|
corrective_costs = [r["failure_cost"] for r in cost_results]
|
|
|
|
|
preventive_costs = [r["preventive_cost"] for r in cost_results]
|
|
|
|
|
procurement_costs = [r["procurement_cost"] for r in cost_results]
|
|
|
|
|
failures = [r["number_of_failures"] for r in cost_results]
|
|
|
|
|
total_costs_equipment = [r['total_cost'] for r in cost_results]
|
|
|
|
|
procurement_details = [r["procurement_details"] for r in cost_results]
|
|
|
|
|
|
|
|
|
|
# Pad arrays to max_interval length
|
|
|
|
|
def pad_array(arr, target_length):
|
|
|
|
|
if len(arr) < target_length:
|
|
|
|
|
return arr + [arr[-1]] * (target_length - len(arr)) # Use last value for padding
|
|
|
|
|
return arr[:target_length]
|
|
|
|
|
|
|
|
|
|
corrective_costs = pad_array(corrective_costs, max_interval)
|
|
|
|
|
preventive_costs = pad_array(preventive_costs, max_interval)
|
|
|
|
|
procurement_costs = pad_array(procurement_costs, max_interval)
|
|
|
|
|
failures = pad_array(failures, max_interval)
|
|
|
|
|
total_costs_equipment = pad_array(total_costs_equipment, max_interval)
|
|
|
|
|
procurement_details = pad_array(procurement_details, max_interval)
|
|
|
|
|
|
|
|
|
|
# Create database result object
|
|
|
|
|
equipment_result = CalculationEquipmentResult(
|
|
|
|
|
corrective_costs=corrective_costs,
|
|
|
|
|
overhaul_costs=preventive_costs,
|
|
|
|
|
procurement_costs=procurement_costs,
|
|
|
|
|
daily_failures=failures,
|
|
|
|
|
location_tag=equipment.location_tag,
|
|
|
|
|
material_cost=equipment.material_cost,
|
|
|
|
|
service_cost=equipment.service_cost,
|
|
|
|
|
optimum_day=optimal_timing['optimal_index'],
|
|
|
|
|
calculation_data_id=calculation.id,
|
|
|
|
|
procurement_details=procurement_details
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
fleet_results.append(equipment_result)
|
|
|
|
|
|
|
|
|
|
# Aggregate costs for fleet analysis
|
|
|
|
|
total_corrective_costs += np.array(corrective_costs)
|
|
|
|
|
total_preventive_costs += np.array(preventive_costs)
|
|
|
|
|
total_procurement_costs += np.array(procurement_costs)
|
|
|
|
|
total_costs += np.array(total_costs_equipment)
|
|
|
|
|
|
|
|
|
|
self.logger.info(f"Processed {location_tag}: Optimal month {optimal_timing['optimal_month']}")
|
|
|
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
self.logger.error(f"Failed to calculate timing for {location_tag}: {e}")
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
# Calculate fleet optimal interval
|
|
|
|
|
fleet_optimal_index = np.argmin(total_costs)
|
|
|
|
|
fleet_optimal_cost = total_costs[fleet_optimal_index]
|
|
|
|
|
|
|
|
|
|
# Update calculation with results
|
|
|
|
|
calculation.optimum_oh_day = fleet_optimal_index
|
|
|
|
|
calculation.max_interval = max_interval
|
|
|
|
|
|
|
|
|
|
# Save all results to database
|
|
|
|
|
db_session.add_all(fleet_results)
|
|
|
|
|
await db_session.commit()
|
|
|
|
|
|
|
|
|
|
self.logger.info(f"Fleet optimization completed:")
|
|
|
|
|
self.logger.info(f" - Fleet optimal month: {fleet_optimal_index + 1}")
|
|
|
|
|
self.logger.info(f" - Fleet optimal cost: ${fleet_optimal_cost:,.2f}")
|
|
|
|
|
self.logger.info(f" - Results saved to database for {len(fleet_results)} equipment")
|
|
|
|
|
|
|
|
|
|
return {
|
|
|
|
|
'id': calculation.id,
|
|
|
|
|
'fleet_results': fleet_results,
|
|
|
|
|
'fleet_optimal_interval': fleet_optimal_index + 1,
|
|
|
|
|
'fleet_optimal_cost': fleet_optimal_cost,
|
|
|
|
|
'total_corrective_costs': total_corrective_costs.tolist(),
|
|
|
|
|
'total_preventive_costs': total_preventive_costs.tolist(),
|
|
|
|
|
'total_procurement_costs': total_procurement_costs.tolist(),
|
|
|
|
|
'analysis_parameters': {
|
|
|
|
|
'planned_oh_months': self.planned_oh_months,
|
|
|
|
|
'analysis_window_months': self.time_window_months,
|
|
|
|
|
'last_oh_date': self.last_oh_date.isoformat(),
|
|
|
|
|
'next_oh_date': self.next_oh_date.isoformat()
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class OptimumCostModelWithSpareparts:
|
|
|
|
|
def __init__(self, token: str, last_oh_date: date, next_oh_date: date,
|
|
|
|
|
sparepart_manager,
|
|
|
|
|
@ -603,7 +174,7 @@ class OptimumCostModelWithSpareparts:
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
def _calculate_equipment_costs_with_spareparts(self, failures_prediction: Dict, birnbaum_importance: float,
|
|
|
|
|
preventive_cost: float, failure_replacement_cost: float,
|
|
|
|
|
preventive_cost: float, failure_replacement_cost: float, ecs,
|
|
|
|
|
location_tag: str, planned_overhauls: List = None) -> List[Dict]:
|
|
|
|
|
"""Calculate costs for each month including sparepart costs and availability"""
|
|
|
|
|
|
|
|
|
|
@ -614,21 +185,20 @@ class OptimumCostModelWithSpareparts:
|
|
|
|
|
months = list(failures_prediction.keys())
|
|
|
|
|
num_months = len(months)
|
|
|
|
|
|
|
|
|
|
# Calculate basic costs (same as before)
|
|
|
|
|
risk_costs = []
|
|
|
|
|
cumulative_risk_costs = []
|
|
|
|
|
failure_counts = []
|
|
|
|
|
|
|
|
|
|
cumulative_risk = 0
|
|
|
|
|
monthly_risk_cost_per_failure = 0
|
|
|
|
|
|
|
|
|
|
if ecs:
|
|
|
|
|
is_trip = 1 if ecs.get("Diskripsi Operasional Akibat Equip. Failure") == "Trip" else 0
|
|
|
|
|
if is_trip:
|
|
|
|
|
downtime = ecs.get("Estimasi Waktu Maint. / Downtime / Gangguan (Jam)")
|
|
|
|
|
monthly_risk_cost_per_failure = 660 * 1000000 * is_trip * downtime
|
|
|
|
|
print("ECS Trip", location_tag, monthly_risk_cost_per_failure)
|
|
|
|
|
|
|
|
|
|
for month_key in months:
|
|
|
|
|
data = failures_prediction[month_key]
|
|
|
|
|
monthly_risk = data['avg_flow_rate'] * birnbaum_importance * data['total_oos_hours'] * 1000000
|
|
|
|
|
risk_costs.append(monthly_risk)
|
|
|
|
|
|
|
|
|
|
cumulative_risk += monthly_risk
|
|
|
|
|
cumulative_risk_costs.append(cumulative_risk)
|
|
|
|
|
|
|
|
|
|
failure_counts.append(data['cumulative_failures'])
|
|
|
|
|
|
|
|
|
|
# Calculate costs for each month including sparepart considerations
|
|
|
|
|
@ -638,7 +208,7 @@ class OptimumCostModelWithSpareparts:
|
|
|
|
|
month_index = i + 1
|
|
|
|
|
|
|
|
|
|
# Basic failure and preventive costs
|
|
|
|
|
failure_cost = (failure_counts[i] * failure_replacement_cost) + cumulative_risk_costs[i]
|
|
|
|
|
failure_cost = (failure_counts[i] * (failure_replacement_cost + monthly_risk_cost_per_failure))
|
|
|
|
|
preventive_cost_month = preventive_cost / month_index
|
|
|
|
|
|
|
|
|
|
# Check sparepart availability for this month
|
|
|
|
|
@ -667,8 +237,6 @@ class OptimumCostModelWithSpareparts:
|
|
|
|
|
'total_cost': total_cost,
|
|
|
|
|
'is_after_planned_oh': month_index > self.planned_oh_months,
|
|
|
|
|
'delay_months': max(0, month_index - self.planned_oh_months),
|
|
|
|
|
'risk_cost': cumulative_risk_costs[i],
|
|
|
|
|
'monthly_risk_cost': risk_costs[i],
|
|
|
|
|
'procurement_details': procurement_details,
|
|
|
|
|
'sparepart_available': sparepart_analysis['available'],
|
|
|
|
|
'sparepart_status': sparepart_analysis['message'],
|
|
|
|
|
@ -788,45 +356,55 @@ class OptimumCostModelWithSpareparts:
|
|
|
|
|
# Phase 1: Calculate individual optimal timings without considering interactions
|
|
|
|
|
individual_results = {}
|
|
|
|
|
|
|
|
|
|
with open('src/calculation_time_constrains/full_equipment_with_downtime_opdesc.json', 'r') as f:
|
|
|
|
|
data = json.load(f)
|
|
|
|
|
|
|
|
|
|
ecs_tags = {
|
|
|
|
|
eq["Location"]: eq
|
|
|
|
|
for eq in data
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for equipment in equipments:
|
|
|
|
|
location_tag = equipment.location_tag
|
|
|
|
|
birnbaum = equipment_birnbaum.get(location_tag, 0.0)
|
|
|
|
|
ecs = ecs_tags.get(location_tag, None)
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
# Get failure predictions
|
|
|
|
|
monthly_data = await self.get_failures_prediction(simulation_id, location_tag, birnbaum)
|
|
|
|
|
|
|
|
|
|
if not monthly_data:
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
# Calculate costs without considering other equipment (first pass)
|
|
|
|
|
equipment_preventive_cost = equipment.overhaul_cost + equipment.service_cost
|
|
|
|
|
failure_replacement_cost = equipment.material_cost + (3 * 111000 * 3)
|
|
|
|
|
|
|
|
|
|
cost_results = self._calculate_equipment_costs_with_spareparts(
|
|
|
|
|
failures_prediction=monthly_data,
|
|
|
|
|
birnbaum_importance=birnbaum,
|
|
|
|
|
preventive_cost=equipment_preventive_cost,
|
|
|
|
|
failure_replacement_cost=failure_replacement_cost,
|
|
|
|
|
location_tag=location_tag,
|
|
|
|
|
planned_overhauls=[] # Empty in first pass
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
if not cost_results:
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
# Find individual optimal timing
|
|
|
|
|
optimal_timing = self._find_optimal_timing_with_spareparts(cost_results, location_tag)
|
|
|
|
|
# try:
|
|
|
|
|
# # Get failure predictions
|
|
|
|
|
monthly_data = await self.get_failures_prediction(simulation_id, location_tag, birnbaum)
|
|
|
|
|
|
|
|
|
|
if not monthly_data:
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
# Calculate costs without considering other equipment (first pass)
|
|
|
|
|
equipment_preventive_cost = equipment.overhaul_cost + equipment.service_cost
|
|
|
|
|
failure_replacement_cost = equipment.material_cost + (3 * 111000 * 3)
|
|
|
|
|
|
|
|
|
|
cost_results = self._calculate_equipment_costs_with_spareparts(
|
|
|
|
|
failures_prediction=monthly_data,
|
|
|
|
|
birnbaum_importance=birnbaum,
|
|
|
|
|
preventive_cost=equipment_preventive_cost,
|
|
|
|
|
failure_replacement_cost=failure_replacement_cost,
|
|
|
|
|
location_tag=location_tag,
|
|
|
|
|
planned_overhauls=[] , # Empty in first pass
|
|
|
|
|
ecs=ecs
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
if not cost_results:
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
# Find individual optimal timing
|
|
|
|
|
optimal_timing = self._find_optimal_timing_with_spareparts(cost_results, location_tag)
|
|
|
|
|
|
|
|
|
|
if optimal_timing:
|
|
|
|
|
optimal_timing['all_monthly_costs'] = cost_results
|
|
|
|
|
individual_results[location_tag] = optimal_timing
|
|
|
|
|
|
|
|
|
|
if optimal_timing:
|
|
|
|
|
optimal_timing['all_monthly_costs'] = cost_results
|
|
|
|
|
individual_results[location_tag] = optimal_timing
|
|
|
|
|
|
|
|
|
|
self.logger.info(f"Individual optimal for {location_tag}: Month {optimal_timing['optimal_month']}")
|
|
|
|
|
self.logger.info(f"Individual optimal for {location_tag}: Month {optimal_timing['optimal_month']}")
|
|
|
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
self.logger.error(f"Failed to calculate individual timing for {location_tag}: {e}")
|
|
|
|
|
continue
|
|
|
|
|
# except Exception as e:
|
|
|
|
|
# self.logger.error(f"Failed to calculate individual timing for {location_tag}: {e}")
|
|
|
|
|
# raise Exception(e)
|
|
|
|
|
|
|
|
|
|
# Phase 2: Optimize considering sparepart interactions
|
|
|
|
|
self.logger.info("Phase 2: Optimizing with sparepart interactions...")
|
|
|
|
|
@ -1105,7 +683,9 @@ async def run_simulation_with_spareparts(*, db_session, calculation, token: str,
|
|
|
|
|
db_session=db_session, calculation_id=calculation.id
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
sparepart_manager = await load_sparepart_data_from_db(scope=scope, prev_oh_scope=prev_oh_scope, db_session=collector_db_session)
|
|
|
|
|
time_window_months = 60
|
|
|
|
|
|
|
|
|
|
sparepart_manager = await load_sparepart_data_from_db(scope=scope, prev_oh_scope=prev_oh_scope, db_session=collector_db_session, analysis_window_months=time_window_months)
|
|
|
|
|
|
|
|
|
|
# Initialize optimization model with sparepart management
|
|
|
|
|
optimum_oh_model = OptimumCostModelWithSpareparts(
|
|
|
|
|
@ -1138,57 +718,6 @@ async def run_simulation_with_spareparts(*, db_session, calculation, token: str,
|
|
|
|
|
await optimum_oh_model._close_session()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def run_simulation(*, db_session, calculation, token: str, collector_db_session,
|
|
|
|
|
time_window_months: Optional[int] = None,
|
|
|
|
|
simulation_id: str = "default") -> Dict:
|
|
|
|
|
"""
|
|
|
|
|
Run complete overhaul optimization simulation
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
time_window_months: Analysis window in months (default: 1.5x planned interval)
|
|
|
|
|
simulation_id: Simulation ID for failure predictions
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
# Get equipment and scope data
|
|
|
|
|
equipments = await get_standard_scope_by_session_id(
|
|
|
|
|
db_session=db_session,
|
|
|
|
|
overhaul_session_id=calculation.overhaul_session_id,
|
|
|
|
|
collector_db=collector_db_session
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
scope = await get_scope(db_session=db_session, overhaul_session_id=calculation.overhaul_session_id)
|
|
|
|
|
prev_oh_scope = await get_prev_oh(db_session=db_session, overhaul_session=scope)
|
|
|
|
|
|
|
|
|
|
calculation_data = await get_calculation_data_by_id(
|
|
|
|
|
db_session=db_session, calculation_id=calculation.id
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
# Initialize optimization model
|
|
|
|
|
optimum_oh_model = OptimumCostModel(
|
|
|
|
|
token=token,
|
|
|
|
|
last_oh_date=prev_oh_scope.end_date,
|
|
|
|
|
next_oh_date=scope.start_date,
|
|
|
|
|
time_window_months=time_window_months,
|
|
|
|
|
base_url=RBD_SERVICE_API
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
# Run fleet optimization and save to database
|
|
|
|
|
results = await optimum_oh_model.calculate_cost_all_equipment(
|
|
|
|
|
db_session=db_session,
|
|
|
|
|
equipments=equipments,
|
|
|
|
|
calculation=calculation_data,
|
|
|
|
|
preventive_cost=calculation_data.parameter.overhaul_cost,
|
|
|
|
|
simulation_id=simulation_id
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
return results
|
|
|
|
|
|
|
|
|
|
finally:
|
|
|
|
|
await optimum_oh_model._close_session()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
async def get_corrective_cost_time_chart(
|
|
|
|
|
material_cost: float,
|
|
|
|
|
service_cost: float,
|
|
|
|
|
|