diff options
| author | Radu Nicolae <rnicolae04@gmail.com> | 2025-06-16 18:01:07 +0200 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2025-06-16 18:01:07 +0200 |
| commit | 0df3d9ced743ac3385dd710c7133a6cf369b051c (patch) | |
| tree | eff5d6d67c275643e229731ba08c5fe7dc4ccd0a /opendc-experiments/opendc-experiments-m3sa/src/main/python/models | |
| parent | c7e303ad1b5217e2ff24cee9538ac841d6149706 (diff) | |
integrated M3SA, updated with tests and CpuPowerModels
Diffstat (limited to 'opendc-experiments/opendc-experiments-m3sa/src/main/python/models')
| -rw-r--r-- | opendc-experiments/opendc-experiments-m3sa/src/main/python/models/Model.py | 70 | ||||
| -rw-r--r-- | opendc-experiments/opendc-experiments-m3sa/src/main/python/models/MultiModel.py | 501 | ||||
| -rw-r--r-- | opendc-experiments/opendc-experiments-m3sa/src/main/python/models/__init__.py | 3 | ||||
| -rw-r--r-- | opendc-experiments/opendc-experiments-m3sa/src/main/python/models/meta_model.py (renamed from opendc-experiments/opendc-experiments-m3sa/src/main/python/models/MetaModel.py) | 142 | ||||
| -rw-r--r-- | opendc-experiments/opendc-experiments-m3sa/src/main/python/models/model.py | 32 | ||||
| -rw-r--r-- | opendc-experiments/opendc-experiments-m3sa/src/main/python/models/multi_model.py | 410 |
6 files changed, 495 insertions, 663 deletions
diff --git a/opendc-experiments/opendc-experiments-m3sa/src/main/python/models/Model.py b/opendc-experiments/opendc-experiments-m3sa/src/main/python/models/Model.py deleted file mode 100644 index f60f0bb0..00000000 --- a/opendc-experiments/opendc-experiments-m3sa/src/main/python/models/Model.py +++ /dev/null @@ -1,70 +0,0 @@ -""" -A model is the output of simulator. It contains the data the simulator output, under a certain topology, seed, -workload, datacenter configuration, etc. A model is further used in the analyzer as part of the MultiModel class, -and further in the MetaModel class. - -:param sim: the simulation data of the model -""" -import json -from dataclasses import dataclass, field - -@dataclass -class Model: - """ - Represents a single simulation output containing various data metrics collected under specific simulation conditions. - A Model object stores raw and processed simulation data and is designed to interact with higher-level structures like - MultiModel and MetaModel for complex data analysis. - - Attributes: - raw_sim_data (list): Initial raw data from the simulator output. - processed_sim_data (list): Data derived from raw_sim_data after applying certain processing operations like aggregation or smoothing. - cumulative_time_series_values (list): Stores cumulative data values useful for time series analysis. - id (int): Unique identifier for the model, typically used for tracking and referencing within analysis tools. - path (str): Base path for storing or accessing related data files. - cumulated (float): Cumulative sum of processed data, useful for quick summaries and statistical analysis. - experiment_name (str): A descriptive name for the experiment associated with this model, potentially extracted from external metadata. - margins_of_error (list): Stores error margins associated with the data, useful for uncertainty analysis. - topologies (list): Describes the network or system topologies used during the simulation. - workloads (list): Lists the types of workloads applied during the simulation, affecting the simulation's applicability and scope. - allocation_policies (list): Details the resource allocation policies used, which influence the simulation outcomes. - carbon_trace_paths (list): Paths to data files containing carbon output or usage data, important for environmental impact studies. - - Methods: - parse_trackr(): Reads additional configuration and metadata from a JSON file named 'trackr.json', enhancing the model with detailed context information. - - Usage: - Model objects are typically instantiated with raw data from simulation outputs and an identifier. After instantiation, - the 'parse_trackr' method can be called to load additional experimental details from a corresponding JSON file. - """ - - path: str - raw_sim_data: list - id: int - processed_sim_data: list = field(default_factory=list) - cumulative_time_series_values: list = field(default_factory=list) - cumulated: float = 0.0 - experiment_name: str = "" - margins_of_error: list = field(default_factory=list) - topologies: list = field(default_factory=list) - workloads: list = field(default_factory=list) - allocation_policies: list = field(default_factory=list) - carbon_trace_paths: list = field(default_factory=list) - - def parse_trackr(self): - """ - Parses the 'trackr.json' file located in the model's base path to extract and store detailed experimental metadata. - This method enhances the model with comprehensive contextual information about the simulation environment. - - :return: None - :side effect: Updates model attributes with data from the 'trackr.json' file, such as experiment names, topologies, and policies. - :raises FileNotFoundError: If the 'trackr.json' file does not exist at the specified path. - :raises json.JSONDecodeError: If there is an error parsing the JSON data. - """ - trackr_path = self.path + "/trackr.json" - with open(trackr_path) as f: - trackr = json.load(f) - self.experiment_name = trackr.get(self.id, {}).get('name', "") - self.topologies = trackr.get(self.id, {}).get('topologies', []) - self.workloads = trackr.get(self.id, {}).get('workloads', []) - self.allocation_policies = trackr.get(self.id, {}).get('allocationPolicies', []) - self.carbon_trace_paths = trackr.get(self.id, {}).get('carbonTracePaths', []) diff --git a/opendc-experiments/opendc-experiments-m3sa/src/main/python/models/MultiModel.py b/opendc-experiments/opendc-experiments-m3sa/src/main/python/models/MultiModel.py deleted file mode 100644 index 17a92765..00000000 --- a/opendc-experiments/opendc-experiments-m3sa/src/main/python/models/MultiModel.py +++ /dev/null @@ -1,501 +0,0 @@ -import matplotlib.pyplot as plt -import numpy as np -import os -import pyarrow.parquet as pq -import time -from matplotlib.ticker import MaxNLocator, FuncFormatter - -from simulator_specifics import * -from .MetaModel import MetaModel -from .Model import Model - - -def is_meta_model(model): - """ - Check if the given model is a MetaModel based on its ID. A metamodel will always have an id of -101. - - Args: - model (Model): The model to check. - - Returns: - bool: True if model is MetaModel, False otherwise. - """ - return model.id == MetaModel.META_MODEL_ID - - -class MultiModel: - """ - Handles multiple simulation models, aggregates their data based on user-defined parameters, - and generates plots and statistics. - - Attributes: - user_input (dict): Configuration dictionary containing user settings for model processing. - path (str): The base directory path where output files and analysis results are stored. - window_size (int): The size of the window for data aggregation, which affects how data smoothing and granularity are handled. - models (list of Model): A list of Model instances that store the simulation data. - metric (str): The specific metric to be analyzed and plotted, as defined by the user. - measure_unit (str): The unit of measurement for the simulation data, adjusted according to the user's specifications. - output_folder_path (str): Path to the folder where output files are saved. - raw_output_path (str): Directory path where raw simulation data is stored. - analysis_file_path (str): Path to the file where detailed analysis results are recorded. - plot_type (str): The type of plot to generate, which can be 'time_series', 'cumulative', or 'cumulative_time_series'. - plot_title (str): The title of the plot. - x_label (str), y_label (str): Labels for the x and y axes of the plot. - x_min (float), x_max (float), y_min (float), y_max (float): Optional parameters to define axis limits for the plots. - - Methods: - parse_user_input(window_size): Parses and sets the class attributes based on the provided user input. - adjust_unit(): Adjusts the unit of measurement based on user settings, applying appropriate metric prefixes. - set_paths(): Initializes the directory paths for storing outputs and analysis results. - init_models(): Reads simulation data from Parquet files and initializes Model instances. - compute_windowed_aggregation(): Processes the raw data by applying a windowed aggregation function for smoothing. - generate_plot(): Orchestrates the generation of the specified plot type by calling the respective plotting functions. - generate_time_series_plot(): Generates a time series plot of the aggregated data. - generate_cumulative_plot(): Creates a bar chart showing cumulative data for each model. - generate_cumulative_time_series_plot(): Produces a plot that displays cumulative data over time for each model. - save_plot(): Saves the generated plot to a PDF file in the specified directory. - output_stats(): Writes detailed statistics of the simulation to an analysis file for record-keeping. - mean_of_chunks(np_array, window_size): Calculates the mean of data segments for smoothing and processing. - get_cumulative_limits(model_sums): Determines appropriate x-axis limits for cumulative plots based on the model data. - - Usage: - To use this class, instantiate it with a dictionary of user settings, a path for outputs, and optionally a window size. - Call the `generate_plot` method to process the data and generate plots as configured by the user. - """ - - def __init__(self, user_input, path, window_size=-1): - """ - Initializes the MultiModel with provided user settings and prepares the environment. - - :param user_input (dict): Configurations and settings from the user. - :param path (str): Path where output and analysis will be stored. - :param window_size (int): The size of the window to aggregate data; uses user input if -1. - :return: None - """ - - self.starting_time = time.time() - self.end_time = None - self.workload_time = None - - self.user_input = user_input - - self.metric = None - self.measure_unit = None - self.path = path - self.models = [] - - self.folder_path = None - self.output_folder_path = None - self.raw_output_path = None - self.analysis_file_path = None - self.unit_scaling = 1 - self.window_size = -1 - self.window_function = "median" - self.max_model_len = 0 - self.seed = 0 - - self.plot_type = None - self.plot_title = None - self.x_label = None - self.y_label = None - self.x_min = None - self.x_max = None - self.y_min = None - self.y_max = None - self.plot_path = None - - self.parse_user_input(window_size) - self.set_paths() - self.init_models() - - self.compute_windowed_aggregation() - - def parse_user_input(self, window_size): - """ - Parses and sets attributes based on user input. - - :param window_size (int): Specified window size for data aggregation, defaults to user_input if -1. - :return: None - """ - if window_size == -1: - self.window_size = self.user_input["window_size"] - else: - self.window_size = window_size - self.metric = self.user_input["metric"] - self.measure_unit = self.adjust_unit() - self.window_function = self.user_input["window_function"] - self.seed = self.user_input["seed"] - - self.plot_type = self.user_input["plot_type"] - self.plot_title = self.user_input["plot_title"] - if self.user_input["x_label"] == "": - self.x_label = "Samples" - else: - self.x_label = self.user_input["x_label"] - - if self.user_input["y_label"] == "": - self.y_label = self.metric + " [" + self.measure_unit + "]" - else: - self.y_label = self.user_input["y_label"] - - self.y_min = self.user_input["y_min"] - self.y_max = self.user_input["y_max"] - self.x_min = self.user_input["x_min"] - self.x_max = self.user_input["x_max"] - - def adjust_unit(self): - """ - Adjusts the unit of measurement according to the scaling magnitude specified by the user. - This method translates the given measurement scale into a scientifically accepted metric prefix. - - :return str: The metric prefixed by the appropriate scale (e.g., 'kWh' for kilo-watt-hour if the scale is 3). - :raise ValueError: If the unit scaling magnitude provided by the user is not within the accepted range of scaling factors. - """ - prefixes = ['n', 'μ', 'm', '', 'k', 'M', 'G', 'T'] - scaling_factors = [-9, -6, -3, 1, 3, 6, 9] - given_metric = self.user_input["current_unit"] - self.unit_scaling = self.user_input["unit_scaling_magnitude"] - - if self.unit_scaling not in scaling_factors: - raise ValueError( - "Unit scaling factor not found. Please enter a valid unit from [-9, -6, -3, 1, 3, 6, 9].") - - if self.unit_scaling == 1: - return given_metric - - for i in range(len(scaling_factors)): - if self.unit_scaling == scaling_factors[i]: - self.unit_scaling = 10 ** self.unit_scaling - result = prefixes[i] + given_metric - return result - - def set_paths(self): - """ - Configures and initializes the directory paths for output and analysis based on the base directory provided. - This method sets paths for the raw output and detailed analysis results, ensuring directories are created if - they do not already exist, and prepares a base file for capturing analytical summaries. - - :return: None - :side effect: Creates necessary directories and files for output and analysis. - """ - self.output_folder_path = os.getcwd() + "/" + self.path - self.raw_output_path = os.getcwd() + "/" + self.path + "/raw-output" - self.analysis_file_path = os.getcwd() + "/" + self.path + "/simulation-analysis/" - os.makedirs(self.analysis_file_path, exist_ok=True) - self.analysis_file_path = os.path.join(self.analysis_file_path, "analysis.txt") - if not os.path.exists(self.analysis_file_path): - with open(self.analysis_file_path, "w") as f: - f.write("Analysis file created.\n") - - def init_models(self): - """ - Initializes models from the simulation output stored in Parquet files. This method reads each Parquet file, - processes the relevant data, and initializes Model instances which are stored in the model list. - - :return: None - :raise ValueError: If the unit scaling has not been set prior to model initialization. - """ - model_id = 0 - - for simulation_folder in os.listdir(self.raw_output_path): - if simulation_folder == "metamodel": - continue - path_of_parquet_file = f"{self.raw_output_path}/{simulation_folder}/seed={self.seed}/{SIMULATION_DATA_FILE}.parquet" - parquet_file = pq.read_table(path_of_parquet_file).to_pandas() - raw = parquet_file.select_dtypes(include=[np.number]).groupby("timestamp") - raw = raw[self.metric].sum().values - - if self.unit_scaling is None: - raise ValueError("Unit scaling factor is not set. Please ensure it is set correctly.") - - raw = np.divide(raw, self.unit_scaling) - - if self.user_input["samples_per_minute"] > 0: - MINUTES_IN_DAY = 1440 - self.workload_time = len(raw) * self.user_input["samples_per_minute"] / MINUTES_IN_DAY - - model = Model(raw_sim_data=raw, id=model_id, path=self.output_folder_path) - self.models.append(model) - model_id += 1 - - self.max_model_len = min([len(model.raw_sim_data) for model in self.models]) - - def compute_windowed_aggregation(self): - """ - Applies a windowed aggregation function to each model's dataset. This method is typically used for smoothing - or reducing data granularity. It involves segmenting the dataset into windows of specified size and applying - an aggregation function to each segment. - - :return: None - :side effect: Modifies each model's processed_sim_data attribute to contain aggregated data. - """ - if self.plot_type != "cumulative": - for model in self.models: - numeric_values = model.raw_sim_data - model.processed_sim_data = self.mean_of_chunks(numeric_values, self.window_size) - - def generate_plot(self): - """ - Creates and saves plots based on the processed data from multiple models. This method determines - the type of plot to generate based on user input and invokes the appropriate plotting function. - - The plotting options supported are 'time_series', 'cumulative', and 'cumulative_time_series'. - Depending on the type specified, this method delegates to specific plot-generating functions. - - :return: None - :raises ValueError: If the plot type specified is not recognized or supported by the system. - :side effect: - - Generates and saves a plot to the file system. - - Updates the plot attributes based on the generated plot. - - Displays the plot on the matplotlib figure canvas. - """ - plt.figure(figsize=(12, 10)) - plt.xticks(size=22) - plt.yticks(size=22) - plt.ylabel(self.y_label, size=26) - plt.xlabel(self.x_label, size=26) - plt.title(self.plot_title, size=26) - plt.grid() - - formatter = FuncFormatter(lambda x, _: '{:,}'.format(int(x)) if x >= 1000 else int(x)) - ax = plt.gca() - ax.xaxis.set_major_formatter(formatter) - # ax.yaxis.set_major_formatter(formatter) yaxis has formatting issues - to solve in a future iteration - - if self.user_input['x_ticks_count'] is not None: - ax = plt.gca() - ax.xaxis.set_major_locator(MaxNLocator(self.user_input['x_ticks_count'])) - - if self.user_input['y_ticks_count'] is not None: - ax = plt.gca() - ax.yaxis.set_major_locator(MaxNLocator(self.user_input['y_ticks_count'])) - - self.set_x_axis_lim() - self.set_y_axis_lim() - - if self.plot_type == "time_series": - self.generate_time_series_plot() - elif self.plot_type == "cumulative": - self.generate_cumulative_plot() - elif self.plot_type == "cumulative_time_series": - self.generate_cumulative_time_series_plot() - else: - raise ValueError( - "Plot type not recognized. Please enter a valid plot type. The plot can be either " - "'time_series', 'cumulative', or 'cumulative_time_series'." - ) - - plt.tight_layout() - plt.subplots_adjust(right=0.85) - plt.legend(fontsize=12, bbox_to_anchor=(1, 1)) - self.save_plot() - self.output_stats() - - def generate_time_series_plot(self): - """ - Plots time series data for each model. This function iterates over each model, applies the defined - windowing function to smooth the data, and plots the resulting series. - - :return: None - :side effect: Plots are displayed on the matplotlib figure canvas. - """ - for model in self.models: - label = "Meta-Model" if is_meta_model(model) else "Model " + str(model.id) - if is_meta_model(model): - repeated_means = np.repeat(means, self.window_size)[:len(model.processed_sim_data) * self.window_size] - plt.plot( - repeated_means, - drawstyle='steps-mid', - label=label, - color="red", - linestyle="--", - marker="o", - markevery=max(1, len(repeated_means) // 50), - linewidth=2 - ) - else: - means = self.mean_of_chunks(model.raw_sim_data, self.window_size) - repeated_means = np.repeat(means, self.window_size)[:len(model.raw_sim_data)] - plt.plot(repeated_means, drawstyle='steps-mid', label=label) - - def generate_cumulative_plot(self): - """ - Generates a horizontal bar chart showing cumulative data for each model. This function - aggregates total values per model and displays them in a bar chart, providing a visual - comparison of total values across models. - - :return: None - :side effect: Plots are displayed on the matplotlib figure canvas. - """ - plt.xlim(self.get_cumulative_limits(model_sums=self.sum_models_entries())) - plt.ylabel("Model ID", size=20) - plt.xlabel("Total " + self.metric + " [" + self.measure_unit + "]") - plt.yticks(range(len(self.models)), [model.id for model in self.models]) - plt.grid(False) - - cumulated_energies = self.sum_models_entries() - for i, model in enumerate(self.models): - label = "Meta-Model" if is_meta_model(model) else "Model " + str(model.id) - if is_meta_model(model): - plt.barh(label=label, y=i, width=cumulated_energies[i], color="red") - else: - plt.barh(label=label, y=i, width=cumulated_energies[i]) - plt.text(cumulated_energies[i], i, str(cumulated_energies[i]), ha='left', va='center', size=26) - - def generate_cumulative_time_series_plot(self): - """ - Generates a plot showing the cumulative data over time for each model. This visual representation is - useful for analyzing trends and the accumulation of values over time. - - :return: None - :side effect: Displays the cumulative data over time on the matplotlib figure canvas. - """ - self.compute_cumulative_time_series() - - for model in self.models: - if is_meta_model(model): - cumulative_repeated = np.repeat(model.cumulative_time_series_values, self.window_size)[ - :len(model.processed_sim_data) * self.window_size] - plt.plot( - cumulative_repeated, - drawstyle='steps-mid', - label=("Meta-Model"), - color="red", - linestyle="--", - marker="o", - markevery=max(1, len(cumulative_repeated) // 10), - linewidth=3 - ) - else: - cumulative_repeated = np.repeat(model.cumulative_time_series_values, self.window_size)[ - :len(model.raw_sim_data)] - plt.plot(cumulative_repeated, drawstyle='steps-mid', label=("Model " + str(model.id))) - - def compute_cumulative_time_series(self): - """ - Computes the cumulative sum of processed data over time for each model, storing the result for use in plotting. - - :return: None - :side effect: Updates each model's 'cumulative_time_series_values' attribute with the cumulative sums. - """ - for model in self.models: - cumulative_array = [] - _sum = 0 - for value in model.processed_sim_data: - _sum += value - cumulative_array.append(_sum * self.window_size) - model.cumulative_time_series_values = cumulative_array - - def save_plot(self): - """ - Saves the current plot to a PDF file in the specified directory, constructing the file path from the - plot attributes and ensuring that the directory exists before saving. - - :return: None - :side effect: Creates or overwrites a PDF file containing the plot in the designated folder. - """ - folder_prefix = self.output_folder_path + "/simulation-analysis/" + self.metric + "/" - self.plot_path = folder_prefix + self.plot_type + "_plot_multimodel_metric=" + self.metric + "_window=" + str( - self.window_size) + ".pdf" - plt.savefig(self.plot_path) - - def set_x_axis_lim(self): - """ - Sets the x-axis limits for the plot based on user-defined minimum and maximum values. If values - are not specified, the axis limits will default to encompassing all data points. - - :return: None - :side effect: Adjusts the x-axis limits of the current matplotlib plot. - """ - if self.x_min is not None: - plt.xlim(left=self.x_min) - - if self.x_max is not None: - plt.xlim(right=self.x_max) - - def set_y_axis_lim(self): - """ - Dynamically sets the y-axis limits to be slightly larger than the range of the data, enhancing - the readability of the plot by ensuring all data points are comfortably within the view. - - :return: None - :side effect: Adjusts the y-axis limits of the current matplotlib plot. - """ - if self.y_min is not None: - plt.ylim(bottom=self.y_min) - if self.y_max is not None: - plt.ylim(top=self.y_max) - - def sum_models_entries(self): - """ - Computes the total values from each model for use in cumulative plotting. This method aggregates - the data across all models and prepares it for cumulative display. - - :return: List of summed values for each model, useful for plotting and analysis. - """ - models_sums = [] - for (i, model) in enumerate(self.models): - if is_meta_model(model): - models_sums.append(model.cumulated) - else: - cumulated_energy = model.raw_sim_data.sum() - cumulated_energy = round(cumulated_energy, 2) - models_sums.append(cumulated_energy) - - return models_sums - - def output_stats(self): - """ - Records and writes detailed simulation statistics to an analysis file. This includes time stamps, - performance metrics, and other relevant details. - - :return: None - :side effect: Appends detailed simulation statistics to an existing file for record-keeping and analysis. - """ - self.end_time = time.time() - with open(self.analysis_file_path, "a") as f: - f.write("\n\n========================================\n") - f.write("Simulation made at " + time.strftime("%Y-%m-%d %H:%M:%S") + "\n") - f.write("Metric: " + self.metric + "\n") - f.write("Unit: " + self.measure_unit + "\n") - f.write("Window size: " + str(self.window_size) + "\n") - f.write("Sample count in raw sim data: " + str(self.max_model_len) + "\n") - f.write("Computing time " + str(round(self.end_time - self.starting_time, 1)) + "s\n") - if (self.user_input["samples_per_minute"] > 0): - f.write("Workload time: " + str(round(self.workload_time, 2)) + " days\n") - f.write("Plot path" + self.plot_path + "\n") - f.write("========================================\n") - - def mean_of_chunks(self, np_array, window_size): - """ - Calculates the mean of data within each chunk for a given array. This method helps in smoothing the data by - averaging over specified 'window_size' segments. - - :param np_array (np.array): Array of numerical data to be chunked and averaged. - :param window_size (int): The size of each segment to average over. - :return: np.array: An array of mean values for each chunk. - :side effect: None - """ - if window_size == 1: - return np_array - - chunks = [np_array[i:i + window_size] for i in range(0, len(np_array), window_size)] - means = [np.mean(chunk) for chunk in chunks] - return np.array(means) - - def get_cumulative_limits(self, model_sums): - """ - Calculates the appropriate x-axis limits for cumulative plots based on the summarized data from each model. - - :param model_sums (list of float): The total values for each model. - :return: tuple: A tuple containing the minimum and maximum x-axis limits. - """ - axis_min = min(model_sums) * 0.9 - axis_max = max(model_sums) * 1.1 - - if self.user_input["x_min"] is not None: - axis_min = self.user_input["x_min"] - if self.user_input["x_max"] is not None: - axis_max = self.user_input["x_max"] - - return [axis_min * 0.9, axis_max * 1.1] diff --git a/opendc-experiments/opendc-experiments-m3sa/src/main/python/models/__init__.py b/opendc-experiments/opendc-experiments-m3sa/src/main/python/models/__init__.py new file mode 100644 index 00000000..e2d5aaee --- /dev/null +++ b/opendc-experiments/opendc-experiments-m3sa/src/main/python/models/__init__.py @@ -0,0 +1,3 @@ +from .model import Model +from .multi_model import MultiModel +from .meta_model import MetaModel diff --git a/opendc-experiments/opendc-experiments-m3sa/src/main/python/models/MetaModel.py b/opendc-experiments/opendc-experiments-m3sa/src/main/python/models/meta_model.py index 49930d25..a6d0fded 100644 --- a/opendc-experiments/opendc-experiments-m3sa/src/main/python/models/MetaModel.py +++ b/opendc-experiments/opendc-experiments-m3sa/src/main/python/models/meta_model.py @@ -1,8 +1,8 @@ -import numpy as np import os import pandas as pd - -from .Model import Model +from models import Model, MultiModel +from typing import Callable +from util import PlotType class MetaModel: @@ -20,43 +20,32 @@ class MetaModel: function_map (dict): Mapping of aggregation function names to function implementations. """ - META_MODEL_ID = -101 + META_MODEL_ID = 'M' - def __init__(self, multimodel, meta_function=None): + def __init__(self, multi_model: MultiModel, meta_function: Callable[[any], float] = None): """ Initializes the Metamodel with a MultiModel instance and prepares aggregation functions based on configuration. - :param multimodel: MultiModel instance containing the models to aggregate. + :param multi_model: MultiModel instance containing the models to aggregate. :raise ValueError: If metamodel functionality is not enabled in the configuration. """ - if not multimodel.user_input.get('metamodel', False): + if not multi_model.config.is_metamodel: raise ValueError("Metamodel is not enabled in the config file") - self.function_map = { - 'mean': self.mean, - 'median': self.median, - 'meta_equation1': self.meta_equation1, - } - - self.multi_model = multimodel - self.meta_model = Model( + self.multi_model = multi_model + self.meta_model: Model = Model( raw_sim_data=[], - id=self.META_MODEL_ID, - path=self.multi_model.output_folder_path + identifier=self.META_MODEL_ID, ) - if meta_function is not None: - self.meta_function = meta_function - else: - self.meta_function = self.function_map.get(multimodel.user_input['meta_function'], self.mean) + self.meta_function: Callable[ + [any], float] = self.multi_model.config.meta_function if meta_function is None else meta_function self.min_raw_model_len = min([len(model.raw_sim_data) for model in self.multi_model.models]) self.min_processed_model_len = min([len(model.processed_sim_data) for model in self.multi_model.models]) self.number_of_models = len(self.multi_model.models) - self.compute() - self.output() - def output(self): + def output(self) -> None: """ Generates outputs by plotting the aggregated results and exporting the metamodel data to a file. :return: None @@ -65,34 +54,32 @@ class MetaModel: self.plot() self.output_metamodel() - def compute(self): + def compute(self) -> None: """ Computes aggregated data based on the specified plot type from the configuration. :raise ValueError: If an unsupported plot type is specified in the configuration. """ - if self.multi_model.plot_type == 'time_series': - self.compute_time_series() - elif self.multi_model.plot_type == 'cumulative': - self.compute_cumulative() - elif self.multi_model.plot_type == 'cumulative_time_series': - self.compute_cumulative_time_series() - else: - raise ValueError("Invalid plot type in config file") + match self.multi_model.config.plot_type: + case PlotType.TIME_SERIES: + self.compute_time_series() + case PlotType.CUMULATIVE: + self.compute_cumulative() + case PlotType.CUMULATIVE_TIME_SERIES: + self.compute_cumulative_time_series() - def plot(self): + def plot(self) -> None: """ Plots the aggregated data according to the specified plot type from the configuration. :raise ValueError: If an unsupported plot type is specified. """ - if self.multi_model.plot_type == 'time_series': - self.plot_time_series() - elif self.multi_model.plot_type == 'cumulative': - self.plot_cumulative() - elif self.multi_model.plot_type == 'cumulative_time_series': - self.plot_cumulative_time_series() - else: - raise ValueError("Invalid plot type in config file") + match self.multi_model.config.plot_type: + case PlotType.TIME_SERIES: + self.plot_time_series() + case PlotType.CUMULATIVE: + self.plot_cumulative() + case PlotType.CUMULATIVE_TIME_SERIES: + self.plot_cumulative_time_series() def compute_time_series(self): """ @@ -102,8 +89,8 @@ class MetaModel: """ for i in range(0, self.min_processed_model_len): data_entries = [] - for j in range(self.number_of_models): - data_entries.append(self.multi_model.models[j].processed_sim_data[i]) + for model in self.multi_model.models: + data_entries.append(model.processed_sim_data[i]) self.meta_model.processed_sim_data.append(self.meta_function(data_entries)) self.meta_model.raw_sim_data = self.meta_model.processed_sim_data @@ -122,14 +109,14 @@ class MetaModel: :return: None :side effect: Updates the meta_model's cumulative data with aggregated results. """ - for i in range(0, self.min_raw_model_len): data_entries = [] - for j in range(self.number_of_models): - sim_data = self.multi_model.models[j].raw_sim_data + for model in self.multi_model.models: + sim_data = model.raw_sim_data ith_element = sim_data[i] data_entries.append(ith_element) - self.meta_model.cumulated += self.mean(data_entries) + self.meta_model.cumulated += self.meta_function(data_entries) + self.meta_model.cumulated = round(self.meta_model.cumulated, 2) def plot_cumulative(self): @@ -149,8 +136,8 @@ class MetaModel: """ for i in range(0, self.min_processed_model_len): data_entries = [] - for j in range(self.number_of_models): - data_entries.append(self.multi_model.models[j].processed_sim_data[i]) + for model in self.multi_model.models: + data_entries.append(model.processed_sim_data[i]) self.meta_model.processed_sim_data.append(self.meta_function(data_entries)) def plot_cumulative_time_series(self): @@ -168,47 +155,18 @@ class MetaModel: :return: None :side effect: Writes data to a parquet file at the specified directory path. """ - directory_path = os.path.join(self.multi_model.output_folder_path, "raw-output/metamodel/seed=0") - os.makedirs(directory_path, exist_ok=True) - current_path = os.path.join(directory_path, f"{self.multi_model.metric}.parquet") - df = pd.DataFrame({'processed_sim_data': self.meta_model.processed_sim_data}) - df.to_parquet(current_path, index=False) + directory_path = os.path.join(self.multi_model.config.output_path, "raw-output/metamodel/seed=0") + try: + os.makedirs(directory_path, exist_ok=True) + except OSError as e: + print(f"Error creating directory: {e}") + exit(1) - def mean(self, chunks): - """ - Calculates the mean of a list of numerical data. - - :param chunks (list): The data over which to calculate the mean. - :return: float: The mean of the provided data. - """ - return np.mean(chunks) + current_path = os.path.join(directory_path, f"{self.multi_model.config.metric}.parquet") + minimum = min(len(self.multi_model.timestamps), len(self.meta_model.processed_sim_data)) - def median(self, chunks): - """ - Calculates the median of a list of numerical data. - - :param chunks (list): The data over which to calculate the median. - :return: float: The median of the provided data. - """ - return np.median(chunks) - - def meta_equation1(self, chunks): - """ - Calculates a weighted mean where the weights are inversely proportional to the absolute difference from the median value. - :param chunks (list): Data chunks from which to calculate the weighted mean. - :return: float: The calculated weighted mean. - """ - - """Attempt 1""" - # median_val = np.median(chunks) - # proximity_weights = 1 / (1 + np.abs(chunks - median_val)) # Avoid division by zero - # weighted_mean = np.sum(proximity_weights * chunks) / np.sum(proximity_weights) - # return weighted_mean - - """Attempt 2 Inter-Quartile Mean (same accuracy as mean)""" - # sorted_preds = np.sort(chunks, axis=0) - # Q1 = int(np.floor(0.25 * len(sorted_preds))) - # Q3 = int(np.floor(0.75 * len(sorted_preds))) - # - # iqm = np.mean(sorted_preds[Q1:Q3], axis=0) - # return iqm + df = pd.DataFrame({ + "timestamp": self.multi_model.timestamps[:minimum], + self.multi_model.config.metric: self.meta_model.processed_sim_data[:minimum] + }) + df.to_parquet(current_path, index=False) diff --git a/opendc-experiments/opendc-experiments-m3sa/src/main/python/models/model.py b/opendc-experiments/opendc-experiments-m3sa/src/main/python/models/model.py new file mode 100644 index 00000000..bfffd090 --- /dev/null +++ b/opendc-experiments/opendc-experiments-m3sa/src/main/python/models/model.py @@ -0,0 +1,32 @@ +""" +A model is the output of simulator. It contains the data the simulator output, under a certain topology, seed, +workload, datacenter configuration, etc. A model is further used in the analyzer as part of the MultiModel class, +and further in the MetaModel class. + +:param sim: the simulation data of the model +""" +import json + + +class Model: + """ + Represents a single simulation output containing various data metrics collected under specific simulation conditions. + A Model object stores raw and processed simulation data and is designed to interact with higher-level structures like + MultiModel and MetaModel for complex data analysis. + """ + + def __init__(self, raw_sim_data, identifier: str): + self.raw_sim_data = raw_sim_data + self.id: str = str(identifier) + self.processed_sim_data = [] + self.cumulative_time_series_values = [] + self.cumulated: float = 0.0 + self.experiment_name: str = "" + self.margins_of_error = [] + self.topologies = [] + self.workloads = [] + self.allocation_policies = [] + self.carbon_trace_paths = [] + + def is_meta_model(self) -> bool: + return self.id == "M" diff --git a/opendc-experiments/opendc-experiments-m3sa/src/main/python/models/multi_model.py b/opendc-experiments/opendc-experiments-m3sa/src/main/python/models/multi_model.py new file mode 100644 index 00000000..4f993fee --- /dev/null +++ b/opendc-experiments/opendc-experiments-m3sa/src/main/python/models/multi_model.py @@ -0,0 +1,410 @@ +import matplotlib.pyplot as plt +import numpy as np +import os +import pyarrow.parquet as pq +from time import time, strftime +from matplotlib.ticker import MaxNLocator, FuncFormatter +from matplotlib.ticker import AutoMinorLocator +from typing import IO +from textwrap import dedent +from models import Model +from util import SimulationConfig, adjust_unit, PlotType, SIMULATION_DATA_FILE + + +class MultiModel: + """ + Handles multiple simulation models, aggregates their data based on user-defined parameters, + and generates plots and statistics. + + Attributes: + window_size (int): The size of the window for data aggregation, which affects how data smoothing and granularity are handled. + models (list of Model): A list of Model instances that store the simulation data. + measure_unit (str): The unit of measurement for the simulation data, adjusted according to the user's specifications. + unit_scaling (int): The scaling factor applied to the unit of measurement. + max_model_len (int): The length of the shortest model's raw data, used for consistency in processing. + plot_path (str): The path where the generated plot will be saved. + analysis_file (IO): The file object for writing detailed analysis statistics. + COLOR_PALETTE (list of str): A list of color codes for plotting multiple models. + + Methods: + parse_user_input(window_size): Parses and sets the class attributes based on the provided user input. + adjust_unit(): Adjusts the unit of measurement based on user settings, applying appropriate metric prefixes. + set_paths(): Initializes the directory paths for storing outputs and analysis results. + init_models(): Reads simulation data from Parquet files and initializes Model instances. + compute_windowed_aggregation(): Processes the raw data by applying a windowed aggregation function for smoothing. + generate_plot(): Orchestrates the generation of the specified plot type by calling the respective plotting functions. + generate_time_series_plot(): Generates a time series plot of the aggregated data. + generate_cumulative_plot(): Creates a bar chart showing cumulative data for each model. + generate_cumulative_time_series_plot(): Produces a plot that displays cumulative data over time for each model. + save_plot(): Saves the generated plot to a PDF file in the specified directory. + output_stats(): Writes detailed statistics of the simulation to an analysis file for record-keeping. + mean_of_chunks(np_array, window_size): Calculates the mean of data segments for smoothing and processing. + get_cumulative_limits(model_sums): Determines appropriate x-axis limits for cumulative plots based on the model data. + + Usage: + To use this class, instantiate it with a dictionary of user settings, a path for outputs, and optionally a window size. + Call the `generate_plot` method to process the data and generate plots as configured by the user. + """ + + COLOR_PALETTE: list[str] = [ + # Colorblind-friendly palette + "#0072B2", "#E69F00", "#009E73", "#D55E00", "#CC79A7", "#F0E442", "#8B4513", + "#56B4E9", "#F0A3FF", "#FFB400", "#00BFFF", "#90EE90", "#FF6347", "#8A2BE2", "#CD5C5C", + "#4682B4", "#FFDEAD", "#32CD32", "#D3D3D3", "#999999" + ] + + def __init__(self, config: SimulationConfig, window_size: int = -1): + """ + Initializes the MultiModel with provided user settings and prepares the environment. + + :param user_input (dict): Configurations and settings from the user. + :param path (str): Path where output and analysis will be stored. + :param window_size (int): The size of the window to aggregate data; uses user input if -1. + :return: None + """ + + self.config: SimulationConfig = config + self.starting_time: float = time() + self.workload_time = None + self.timestamps = None + self.plot_path: str | None = None + + self.window_size = config.window_size if window_size == -1 else window_size + self.measure_unit: str + self.unit_scaling: int + self.measure_unit, self.unit_scaling = adjust_unit(config.current_unit, config.unit_scaling_magnitude) + + self.models: list[Model] = [] + self.max_model_len = 0 + + try: + os.makedirs(self.config.output_path, exist_ok=True) + self.analysis_file: IO = open(config.output_path + "/analysis.txt", "w") + except Exception as e: + print(f"Error handling output directory: {e}") + exit(1) + + self.analysis_file.write("Analysis file create\n") + + self.init_models() + if self.config.is_metamodel: + self.COLOR_PALETTE = ["#b3b3b3" for _ in range(len(self.models))] + if len(self.config.plot_colors) > 0: + self.COLOR_PALETTE = self.config.plot_colors + self.compute_windowed_aggregation() + + def get_model_path(self, dir: str) -> str: + return ( + f"{self.config.simulation_path}/" + f"{dir}/" + f"seed={self.config.seed}/" + f"{SIMULATION_DATA_FILE}.parquet" + ) + + def init_models(self): + """ + Initializes models from the simulation output stored in Parquet files. This method reads each Parquet file, + processes the relevant data, and initializes Model instances which are stored in the model list. + + :return: None + :raise ValueError: If the unit scaling has not been set prior to model initialization. + """ + if self.unit_scaling is None: + raise ValueError("Unit scaling factor is not set. Please ensure it is set correctly.") + + simulation_directories = os.listdir(self.config.simulation_path) + simulation_directories.sort() + + for sim_dir in simulation_directories: + print("Processing simulation: ", sim_dir) + if sim_dir == "metamodel": + continue + + simulation_id: str = os.path.basename(sim_dir) + columns_to_read = ['timestamp', self.config.metric] + parquet_file = pq.read_table(self.get_model_path(sim_dir), columns=columns_to_read).to_pandas() + + grouped_data = parquet_file.groupby('timestamp')[self.config.metric].sum() + # Apply unit scaling to the raw data + raw = np.divide(grouped_data.values, self.unit_scaling) + timestamps = parquet_file['timestamp'].unique() + + model = Model(raw_sim_data=raw, identifier=simulation_id) + self.models.append(model) + + if self.timestamps is None or len(self.timestamps) > len(timestamps): + self.timestamps = timestamps + + self.max_model_len = min([len(model.raw_sim_data) for model in self.models]) + + def compute_windowed_aggregation(self) -> None: + """ + Applies a windowed aggregation function to each model's dataset. This method is typically used for smoothing + or reducing data granularity. It involves segmenting the dataset into windows of specified size and applying + an aggregation function to each segment. + + :return: None + :side effect: Modifies each model's processed_sim_data attribute to contain aggregated data. + """ + if self.config.plot_type == PlotType.CUMULATIVE: + return + + for model in self.models: + numeric_values = model.raw_sim_data + model.processed_sim_data = self.mean_of_chunks(numeric_values, self.config.window_size) + + def generate_plot(self): + """ + Creates and saves plots based on the processed data from multiple models. This method determines + the type of plot to generate based on user input and invokes the appropriate plotting function. + + The plotting options supported are 'time_series', 'cumulative', and 'cumulative_time_series'. + Depending on the type specified, this method delegates to specific plot-generating functions. + + :return: None + :raises ValueError: If the plot type specified is not recognized or supported by the system. + :side effect: + - Generates and saves a plot to the file system. + - Updates the plot attributes based on the generated plot. + - Displays the plot on the matplotlib figure canvas. + """ + plt.figure(figsize=self.config.fig_size) + + plt.xticks(size=32) + plt.yticks(size=32) + plt.ylabel(self.config.y_axis.label, size=26) + plt.xlabel(self.config.x_axis.label, size=26) + plt.title(self.config.plot_title, size=26) + plt.grid() + + formatter = FuncFormatter(lambda x, _: '{:,}'.format(int(x)) if x >= 1000 else int(x)) + ax = plt.gca() + ax.xaxis.set_major_formatter(formatter) + + if self.config.x_axis.has_ticks(): + ax = plt.gca() + ax.xaxis.set_major_locator(MaxNLocator(self.config.x_axis.ticks)) + + if self.config.y_axis.has_ticks(): + ax = plt.gca() + ax.yaxis.set_major_locator(MaxNLocator(self.config.y_axis.ticks)) + + self.set_axis_limits() + + match self.config.plot_type: + case PlotType.TIME_SERIES: + self.generate_time_series_plot() + case PlotType.CUMULATIVE: + self.generate_cumulative_plot() + case PlotType.CUMULATIVE_TIME_SERIES: + self.generate_cumulative_time_series_plot() + + plt.tight_layout() + plt.subplots_adjust(right=0.85) + self.save_plot() + self.output_stats() + + def generate_time_series_plot(self): + """ + Plots time series data for each model. This function iterates over each model, applies the defined + windowing function to smooth the data, and plots the resulting series. + + :return: None + :side effect: Plots are displayed on the matplotlib figure canvas. + """ + + for i, model in enumerate(self.models): + label = "Meta-Model" if model.is_meta_model() else "Model " + str(model.id) + + if model.is_meta_model(): + repeated_means = np.repeat(model.processed_sim_data, self.window_size) + plt.plot(repeated_means, drawstyle='steps-mid', label=label, color="#228B22", linestyle="solid", + linewidth=2) + else: + means = self.mean_of_chunks(model.raw_sim_data, self.window_size) + repeated_means = np.repeat(means, self.window_size)[:len(model.raw_sim_data)] + plt.plot(repeated_means, drawstyle='steps-mid', label=label, color=self.COLOR_PALETTE[i]) + + def generate_cumulative_plot(self): + """ + Generates a horizontal bar chart showing cumulative data for each model. This function + aggregates total values per model and displays them in a bar chart, providing a visual + comparison of total values across models. + + :return: None + :side effect: Plots are displayed on the matplotlib figure canvas. + """ + plt.xlim(self.get_cumulative_limits(model_sums=self.sum_models_entries())) + plt.ylabel("Model ID", size=30) + plt.xlabel(self.config.x_axis.label, size=30) + + ax = plt.gca() + ax.tick_params(axis='x', which='major', length=12) # Set length of the ticks + ax.set_xticklabels([]) # Hide x-axis numbers + ax.xaxis.set_minor_locator(AutoMinorLocator(5)) # Set two minor ticks between majors + ax.tick_params(axis='x', which='minor', length=7, color='black') + plt.yticks(range(len(self.models)), [model.id for model in self.models]) + + plt.grid(False) + + cumulated_energies = self.sum_models_entries() + + for i, model in (enumerate(self.models)): + label = "Meta-Model" if model.is_meta_model() else "Model " + str(model.id) + if model.is_meta_model(): + plt.barh(i, cumulated_energies[i], label=label, color='#009E73', hatch='//') + plt.text(cumulated_energies[i], i, str(int(round(cumulated_energies[i], 0))), ha='left', va='center', + size=26) + else: + round_decimals = 0 if cumulated_energies[i] > 500 else 1 + plt.barh(label=label, y=i, width=cumulated_energies[i], color=self.COLOR_PALETTE[i]) + plt.text(cumulated_energies[i], i, str(int(round(cumulated_energies[i], round_decimals))), ha='left', + va='center', size=26) + + def generate_cumulative_time_series_plot(self): + """ + Generates a plot showing the cumulative data over time for each model. This visual representation is + useful for analyzing trends and the accumulation of values over time. + + :return: None + :side effect: Displays the cumulative data over time on the matplotlib figure canvas. + """ + self.compute_cumulative_time_series() + + for i, model in enumerate(self.models): + label = "Meta-Model" if model.is_meta_model() else "Model " + str(model.id) + if model.is_meta_model(): + cumulative_repeated = np.repeat(model.cumulative_time_series_values, self.window_size)[ + :len(model.processed_sim_data) * self.window_size] + plt.plot(cumulative_repeated, label=label, drawstyle='steps-mid', color="#228B22", linestyle="solid", + linewidth=2) + else: + cumulative_repeated = np.repeat(model.cumulative_time_series_values, self.window_size)[ + :len(model.raw_sim_data)] + plt.plot(cumulative_repeated, drawstyle='steps-mid', label=("Model " + str(model.id)), + color=self.COLOR_PALETTE[i]) + + def compute_cumulative_time_series(self): + """ + Computes the cumulative sum of processed data over time for each model, storing the result for use in plotting. + + :return: None + :side effect: Updates each model's 'cumulative_time_series_values' attribute with the cumulative sums. + """ + for model in self.models: + cumulative_array = [] + _sum = 0 + for value in model.processed_sim_data: + _sum += value + cumulative_array.append(_sum * self.window_size) + model.cumulative_time_series_values = cumulative_array + + def save_plot(self): + """ + Saves the current plot to a PDF file in the specified directory, constructing the file path from the + plot attributes and ensuring that the directory exists before saving. + + :return: None + :side effect: Creates or overwrites a PDF file containing the plot in the designated folder. + """ + output_dir = f"{self.config.output_path}/simulation-analysis/{self.config.metric}" + try: + os.makedirs(output_dir, exist_ok=True) + except OSError as e: + print(f"Error handling output directory: {e}") + exit(1) + + self.plot_path: str = ( + f"{output_dir}/" + f"{self.config.plot_type}" + f"_plot_multimodel_metric={self.config.metric}" + f"_window={self.window_size}" + f".pdf" + ) if self.config.figure_export_name is None \ + else f"{output_dir}/{self.config.figure_export_name}.pdf" + + plt.savefig(self.plot_path) + + def set_axis_limits(self) -> None: + """ + Sets the x-axis and y-axis limits for the current plot based on the user-defined configuration. + This method ensures that the plot displays the data within the specified range, enhancing readability. + """ + if self.config.x_axis.has_range(): + plt.xlim(left=self.config.x_axis.value_range[0], right=self.config.x_axis.value_range[1]) + + if self.config.y_axis.has_range(): + plt.ylim(bottom=self.config.y_axis.value_range[0], top=self.config.y_axis.value_range[1]) + + def sum_models_entries(self): + """ + Computes the total values from each model for use in cumulative plotting. This method aggregates + the data across all models and prepares it for cumulative display. + + :return: List of summed values for each model, useful for plotting and analysis. + """ + models_sums = [] + for i, model in enumerate(self.models): + if model.is_meta_model(): + models_sums.append(model.cumulated) + else: + cumulated_energy = model.raw_sim_data.sum() + cumulated_energy = round(cumulated_energy, 2) + models_sums.append(cumulated_energy) + + return models_sums + + def output_stats(self) -> None: + """ + Records and writes detailed simulation statistics to an analysis file. This includes time stamps, + performance metrics, and other relevant details. + + :return: None + :side effect: Appends detailed simulation statistics to an existing file for record-keeping and analysis. + """ + end_time: float = time() + self.analysis_file.write(dedent( + f""" + ========================================================= + Simulation made at {strftime("%Y-%m-%d %H:%M:%S")} + Metric: {self.config.metric} + Unit: {self.measure_unit} + Window size: {self.window_size} + Sample count in raw sim data: {self.max_model_len} + Computing time {round(end_time - self.starting_time, 1)}s + Plot path: {self.plot_path} + ========================================================= + """ + )) + + def mean_of_chunks(self, np_array: np.array, window_size: int) -> np.array: + """ + Calculates the mean of data within each chunk for a given array. This method helps in smoothing the data by + averaging over specified 'window_size' segments. + + :param np_array: Array of numerical data to be chunked and averaged. + :param window_size: The size of each segment to average over. + :return: np.array: An array of mean values for each chunk. + """ + if window_size == 1: + return np_array + + chunks: list[np.array] = [np_array[i:i + window_size] for i in range(0, len(np_array), window_size)] + means: list[float] = [np.mean(chunk) for chunk in chunks] + return np.array(means) + + def get_cumulative_limits(self, model_sums: list[float]) -> list[float]: + """ + Calculates the appropriate x-axis limits for cumulative plots based on the summarized data from each model. + + :param model_sums: List of summed values for each model. + :return: list[float]: A list containing the minimum and maximum values for the x-axis limits. + """ + axis_min = min(model_sums) * 0.9 + axis_max = max(model_sums) * 1.1 + + if self.config.x_axis.value_range is not None: + axis_min = self.config.x_axis.value_range[0] + axis_max = self.config.x_axis.value_range[1] + + return [axis_min * 0.9, axis_max * 1.1] |
