|
|
|
|
@ -27,6 +27,9 @@ from .schema import CalculationTimeConstrainsRead
|
|
|
|
|
from .schema import OptimumResult
|
|
|
|
|
from .schema import CalculationSelectedEquipmentUpdate
|
|
|
|
|
|
|
|
|
|
import requests
|
|
|
|
|
import datetime
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_overhaul_cost_by_time_chart(overhaul_cost: float, days: int,numEquipments:int ,decay_base: float = 1.01) -> np.ndarray:
|
|
|
|
|
if overhaul_cost < 0:
|
|
|
|
|
@ -58,25 +61,65 @@ def get_overhaul_cost_by_time_chart(overhaul_cost: float, days: int,numEquipment
|
|
|
|
|
# results = np.where(np.isfinite(results), results, 0)
|
|
|
|
|
# return results
|
|
|
|
|
|
|
|
|
|
def get_corrective_cost_time_chart(material_cost: float, service_cost: float, days: int) -> Tuple[np.ndarray, np.ndarray]:
|
|
|
|
|
day_points = np.arange(0, days)
|
|
|
|
|
async def get_corrective_cost_time_chart(material_cost: float, service_cost: float, location_tag: str, token) -> Tuple[np.ndarray, np.ndarray]:
|
|
|
|
|
"""
|
|
|
|
|
Fetch failure data from API and calculate corrective costs, ensuring 365 days of data.
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
material_cost: Cost of materials per failure
|
|
|
|
|
service_cost: Cost of service per failure
|
|
|
|
|
location_tag: Location tag of the equipment
|
|
|
|
|
token: Authorization token
|
|
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
|
Tuple of (corrective_costs, daily_failure_rate)
|
|
|
|
|
"""
|
|
|
|
|
url = f'http://192.168.1.82:8000/reliability/main/number-of-failures/{location_tag}/2024-01-01/2024-12-31'
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
|
response = requests.get(
|
|
|
|
|
url,
|
|
|
|
|
headers={
|
|
|
|
|
'Content-Type': 'application/json',
|
|
|
|
|
'Authorization': f'Bearer {token}'
|
|
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
data = response.json()
|
|
|
|
|
|
|
|
|
|
# Create a complete date range for 2024
|
|
|
|
|
start_date = datetime.datetime(2024, 1, 1)
|
|
|
|
|
date_range = [start_date + datetime.timedelta(days=x) for x in range(365)]
|
|
|
|
|
|
|
|
|
|
# Create a dictionary of existing data
|
|
|
|
|
data_dict = {
|
|
|
|
|
datetime.datetime.strptime(item['date'], '%d %b %Y'): item['num_fail']
|
|
|
|
|
for item in data['data']
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# Fill in missing dates with nearest available value
|
|
|
|
|
complete_data = []
|
|
|
|
|
last_known_value = 0 # Default value if no data is available
|
|
|
|
|
|
|
|
|
|
# Parameters for failure rate
|
|
|
|
|
base_rate = 0.2 # Base failure rate per day
|
|
|
|
|
acceleration = 2.4 # How quickly failure rate increases
|
|
|
|
|
grace_period = 170 # Days before failures start increasing significantly
|
|
|
|
|
for date in date_range:
|
|
|
|
|
if date in data_dict:
|
|
|
|
|
if data_dict[date] is not None:
|
|
|
|
|
last_known_value = data_dict[date]
|
|
|
|
|
complete_data.append(last_known_value)
|
|
|
|
|
else:
|
|
|
|
|
complete_data.append(last_known_value)
|
|
|
|
|
|
|
|
|
|
# Calculate daily failure rate using sigmoid function
|
|
|
|
|
daily_failure_rate = base_rate / (1 + np.exp(-acceleration * (day_points - grace_period)/days))
|
|
|
|
|
# Convert to numpy array
|
|
|
|
|
daily_failure = np.array(complete_data)
|
|
|
|
|
|
|
|
|
|
# Calculate cumulative failures
|
|
|
|
|
failure_counts = np.cumsum(daily_failure_rate)
|
|
|
|
|
# Calculate corrective costs
|
|
|
|
|
cost_per_failure = material_cost + service_cost
|
|
|
|
|
corrective_costs = daily_failure * cost_per_failure
|
|
|
|
|
|
|
|
|
|
# Calculate corrective costs based on cumulative failures and combined costs
|
|
|
|
|
cost_per_failure = material_cost + service_cost
|
|
|
|
|
corrective_costs = failure_counts * cost_per_failure
|
|
|
|
|
return corrective_costs, daily_failure
|
|
|
|
|
|
|
|
|
|
return corrective_costs, daily_failure_rate
|
|
|
|
|
except Exception as e:
|
|
|
|
|
print(f"Error fetching or processing data: {str(e)}")
|
|
|
|
|
raise
|
|
|
|
|
|
|
|
|
|
# def get_corrective_cost_time_chart(material_cost: float, service_cost: float, days: int, numEquipments: int) -> Tuple[np.ndarray, np.ndarray]:
|
|
|
|
|
# day_points = np.arange(0, days)
|
|
|
|
|
@ -246,6 +289,7 @@ async def get_calculation_data_by_id(db_session: DbSession, calculation_id) -> C
|
|
|
|
|
async def create_calculation_result_service(
|
|
|
|
|
db_session: DbSession,
|
|
|
|
|
calculation: CalculationData,
|
|
|
|
|
token: str
|
|
|
|
|
) -> CalculationTimeConstrainsRead:
|
|
|
|
|
days = 365 # Changed to 365 days as per requirement
|
|
|
|
|
|
|
|
|
|
@ -265,15 +309,16 @@ async def create_calculation_result_service(
|
|
|
|
|
|
|
|
|
|
# Calculate for each equipment
|
|
|
|
|
for eq in equipments:
|
|
|
|
|
corrective_costs, daily_failures = get_corrective_cost_time_chart(
|
|
|
|
|
corrective_costs, daily_failures = await get_corrective_cost_time_chart(
|
|
|
|
|
material_cost=eq.material_cost,
|
|
|
|
|
service_cost=eq.service_cost,
|
|
|
|
|
days=days
|
|
|
|
|
token=token,
|
|
|
|
|
location_tag=eq.equipment.location_tag
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
overhaul_cost_points = get_overhaul_cost_by_time_chart(
|
|
|
|
|
calculation_data.parameter.overhaul_cost,
|
|
|
|
|
days=days,
|
|
|
|
|
days=len(corrective_costs),
|
|
|
|
|
numEquipments=len(equipments)
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|