summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDante Niewenhuis <d.niewenhuis@hotmail.com>2025-05-16 10:32:08 +0200
committerGitHub <noreply@github.com>2025-05-16 10:32:08 +0200
commitd70312f122d9ef7c31b05757239ffc66af832dee (patch)
treec8eb5d86ce751b783c3f15744bcda35861eed65d
parent1bc17abd7691bc81f11ee125e2eeb4cb08da5245 (diff)
Updated website documentation (#334)
* Updated website documentation * Updated some documentation and fixed links * small updates * small updates
-rw-r--r--opendc-compute/opendc-compute-simulator/src/main/java/org/opendc/compute/simulator/service/ComputeService.java17
-rw-r--r--opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/scheduler/ComputeSchedulers.kt9
-rw-r--r--opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/scheduler/MemorizingScheduler.kt6
-rw-r--r--opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/ComputeExportConfig.kt2
-rw-r--r--opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltBatteryExportColumns.kt10
-rw-r--r--opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltHostExportColumns.kt32
-rw-r--r--opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltPowerSourceExportColumns.kt10
-rw-r--r--opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltServiceExportColumns.kt25
-rw-r--r--opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltTaskExportColumns.kt18
-rw-r--r--opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/table/host/HostTableReader.kt2
-rw-r--r--opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/table/host/HostTableReaderImpl.kt12
-rw-r--r--opendc-compute/opendc-compute-topology/src/main/kotlin/org/opendc/compute/topology/TopologyFactories.kt4
-rw-r--r--opendc-compute/opendc-compute-topology/src/main/kotlin/org/opendc/compute/topology/specs/TopologySpecs.kt44
-rw-r--r--opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/ExperimentSpec.kt17
-rw-r--r--opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/ExportModelSpec.kt11
-rw-r--r--opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/ScenarioSpec.kt1
-rw-r--r--opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/allocation/AllocationPolicySpec.kt10
-rw-r--r--opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/allocation/HostFilterSpec.kt14
-rw-r--r--opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/allocation/HostWeigherSpec.kt22
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/ExperimentTest.kt14
-rw-r--r--site/docs/documentation/Input/AllocationPolicy.md265
-rw-r--r--site/docs/documentation/Input/CheckpointModel.md25
-rw-r--r--site/docs/documentation/Input/Experiment.md136
-rw-r--r--site/docs/documentation/Input/ExperimentSchema.md81
-rw-r--r--site/docs/documentation/Input/ExportModel.md50
-rw-r--r--site/docs/documentation/Input/FailureModel.md8
-rw-r--r--site/docs/documentation/Input/Topology.md220
-rw-r--r--site/docs/documentation/Input/Topology/Battery.md37
-rw-r--r--site/docs/documentation/Input/Topology/Host.md55
-rw-r--r--site/docs/documentation/Input/Topology/PowerModel.md31
-rw-r--r--site/docs/documentation/Input/Topology/PowerSource.md20
-rw-r--r--site/docs/documentation/Input/Topology/Topology.md183
-rw-r--r--site/docs/documentation/Input/TopologySchema.md160
-rw-r--r--site/docs/documentation/Input/Workload.md47
-rw-r--r--site/docs/documentation/M3SA/M3SA.md (renamed from site/docs/documentation/Input/M3SA.md)0
-rw-r--r--site/docs/documentation/M3SA/M3SASchema.md (renamed from site/docs/documentation/Input/M3SASchema.md)0
-rw-r--r--site/docs/documentation/Output.md173
-rw-r--r--site/docs/getting-started/1-start-using-intellij.md (renamed from site/docs/getting-started/4-start-using-intellij.md)0
-rw-r--r--site/docs/getting-started/2-first-experiment.md (renamed from site/docs/getting-started/1-first-experiment.md)19
-rw-r--r--site/docs/getting-started/3-whats-next.md2
-rw-r--r--site/static/img/failureModels.png (renamed from site/docs/documentation/Input/img.png)bin110177 -> 110177 bytes
41 files changed, 972 insertions, 820 deletions
diff --git a/opendc-compute/opendc-compute-simulator/src/main/java/org/opendc/compute/simulator/service/ComputeService.java b/opendc-compute/opendc-compute-simulator/src/main/java/org/opendc/compute/simulator/service/ComputeService.java
index 6d973b3f..11394ce9 100644
--- a/opendc-compute/opendc-compute-simulator/src/main/java/org/opendc/compute/simulator/service/ComputeService.java
+++ b/opendc-compute/opendc-compute-simulator/src/main/java/org/opendc/compute/simulator/service/ComputeService.java
@@ -418,7 +418,8 @@ public final class ComputeService implements AutoCloseable, CarbonReceiver {
LOGGER.debug("Enqueueing task {} to be assigned to host", task.getUid());
if (task.getNumFailures() >= maxNumFailures) {
- LOGGER.warn("task {} has been terminated because it failed {} times", task, task.getNumFailures());
+ LOGGER.warn("task {} has been terminated because it failed {} times", (Object) task, (Object)
+ task.getNumFailures());
tasksTerminated++;
task.setState(TaskState.TERMINATED);
@@ -493,20 +494,6 @@ public final class ComputeService implements AutoCloseable, CarbonReceiver {
final ServiceFlavor flavor = task.getFlavor();
- // if (task.getNumFailures() >= maxNumFailures) {
- // LOGGER.warn("task {} has been terminated because it failed {} times", task,
- // task.getNumFailures());
- //
- // taskQueue.remove(req);
- // tasksPending--;
- // tasksTerminated++;
- // task.setState(TaskState.TERMINATED);
- //
- // scheduler.removeTask(task, hv);
- // this.setTaskToBeRemoved(task);
- // continue;
- // }
-
if (result.getResultType() == SchedulingResultType.FAILURE) {
LOGGER.trace("Task {} selected for scheduling but no capacity available for it at the moment", task);
diff --git a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/scheduler/ComputeSchedulers.kt b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/scheduler/ComputeSchedulers.kt
index 35c66e44..e70cec58 100644
--- a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/scheduler/ComputeSchedulers.kt
+++ b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/scheduler/ComputeSchedulers.kt
@@ -48,7 +48,6 @@ public enum class ComputeSchedulerEnum {
Random,
TaskNumMemorizing,
Timeshift,
- TimeshiftNoPeak,
}
public fun createPrefabComputeScheduler(
@@ -129,13 +128,5 @@ public fun createPrefabComputeScheduler(
clock = clock,
random = SplittableRandom(seeder.nextLong()),
)
- ComputeSchedulerEnum.TimeshiftNoPeak ->
- TimeshiftScheduler(
- filters = listOf(ComputeFilter(), VCpuFilter(cpuAllocationRatio), RamFilter(ramAllocationRatio)),
- weighers = listOf(RamWeigher(multiplier = 1.0)),
- windowSize = 168,
- clock = clock,
- random = SplittableRandom(seeder.nextLong()),
- )
}
}
diff --git a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/scheduler/MemorizingScheduler.kt b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/scheduler/MemorizingScheduler.kt
index 4108ed3d..72d9199f 100644
--- a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/scheduler/MemorizingScheduler.kt
+++ b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/scheduler/MemorizingScheduler.kt
@@ -35,7 +35,7 @@ public class MemorizingScheduler(
private val filters: List<HostFilter>,
private val maxTimesSkipped: Int = 7,
) : ComputeScheduler {
- // We assume that there will be max 200 tasks per host.
+ // We assume that there will be max 100 tasks per host.
// The index of a host list is the number of tasks on that host.
private val hostsQueue = List(100, { mutableListOf<HostView>() })
private var minAvailableHost = 0
@@ -78,6 +78,10 @@ public class MemorizingScheduler(
return SchedulingResult(SchedulingResultType.FAILURE)
}
+// if (minAvailableHost == 1) {
+// return SchedulingResult(SchedulingResultType.EMPTY);
+// }
+
val maxIters = 10000
var numIters = 0
diff --git a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/ComputeExportConfig.kt b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/ComputeExportConfig.kt
index 7c753ebf..c7549433 100644
--- a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/ComputeExportConfig.kt
+++ b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/ComputeExportConfig.kt
@@ -106,7 +106,7 @@ public data class ComputeExportConfig(
/**
* Config that includes all columns defined in [DfltHostExportColumns], [DfltTaskExportColumns],
- * [DfltPowerSourceExportColumns], [batteryExportColumns], [DfltServiceExportColumns] among all other loaded
+ * [DfltPowerSourceExportColumns], [DfltBatteryExportColumns], [DfltServiceExportColumns] among all other loaded
* columns for [HostTableReader], [TaskTableReader] and [ServiceTableReader].
*/
public val ALL_COLUMNS: ComputeExportConfig by lazy {
diff --git a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltBatteryExportColumns.kt b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltBatteryExportColumns.kt
index 594d518c..29e0f899 100644
--- a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltBatteryExportColumns.kt
+++ b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltBatteryExportColumns.kt
@@ -57,12 +57,12 @@ public object DfltBatteryExportColumns {
field = Types.required(INT64).named("timestamp_absolute"),
) { it.timestampAbsolute.toEpochMilli() }
- public val NAME: ExportColumn<BatteryTableReader> =
+ public val BATTERY_NAME: ExportColumn<BatteryTableReader> =
ExportColumn(
field =
Types.required(BINARY)
.`as`(LogicalTypeAnnotation.stringType())
- .named("name"),
+ .named("battery_name"),
) { Binary.fromString(it.batteryInfo.name) }
public val CLUSTER_NAME: ExportColumn<BatteryTableReader> =
@@ -111,9 +111,9 @@ public object DfltBatteryExportColumns {
*/
internal val BASE_EXPORT_COLUMNS =
setOf(
- TIMESTAMP_ABS,
- TIMESTAMP,
- NAME,
+ BATTERY_NAME,
CLUSTER_NAME,
+ TIMESTAMP,
+ TIMESTAMP_ABS,
)
}
diff --git a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltHostExportColumns.kt b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltHostExportColumns.kt
index a691bc45..00f7854d 100644
--- a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltHostExportColumns.kt
+++ b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltHostExportColumns.kt
@@ -58,12 +58,12 @@ public object DfltHostExportColumns {
field = Types.required(INT64).named("timestamp_absolute"),
) { it.timestampAbsolute.toEpochMilli() }
- public val NAME: ExportColumn<HostTableReader> =
+ public val HOST_NAME: ExportColumn<HostTableReader> =
ExportColumn(
field =
Types.required(BINARY)
.`as`(LogicalTypeAnnotation.stringType())
- .named("name"),
+ .named("host_name"),
) { Binary.fromString(it.hostInfo.name) }
public val CLUSTER_NAME: ExportColumn<HostTableReader> =
@@ -84,30 +84,30 @@ public object DfltHostExportColumns {
field = Types.required(INT64).named("mem_capacity"),
) { it.hostInfo.memCapacity }
- public val GUESTS_TERMINATED: ExportColumn<HostTableReader> =
+ public val TASKS_TERMINATED: ExportColumn<HostTableReader> =
ExportColumn(
- field = Types.required(INT32).named("guests_terminated"),
+ field = Types.required(INT32).named("tasks_terminated"),
) { it.tasksTerminated }
- public val GUESTS_RUNNING: ExportColumn<HostTableReader> =
+ public val TASKS_RUNNING: ExportColumn<HostTableReader> =
ExportColumn(
- field = Types.required(INT32).named("guests_running"),
+ field = Types.required(INT32).named("tasks_running"),
) { it.tasksActive }
- public val GUESTS_ERROR: ExportColumn<HostTableReader> =
+ public val TASKS_ERROR: ExportColumn<HostTableReader> =
ExportColumn(
- field = Types.required(INT32).named("guests_error"),
+ field = Types.required(INT32).named("tasks_error"),
) { it.guestsError }
- public val GUESTS_INVALID: ExportColumn<HostTableReader> =
+ public val TASKS_INVALID: ExportColumn<HostTableReader> =
ExportColumn(
- field = Types.required(INT32).named("guests_invalid"),
+ field = Types.required(INT32).named("tasks_invalid"),
) { it.guestsInvalid }
- public val CPU_LIMIT: ExportColumn<HostTableReader> =
+ public val CPU_CAPACITY: ExportColumn<HostTableReader> =
ExportColumn(
- field = Types.required(FLOAT).named("cpu_limit"),
- ) { it.cpuLimit }
+ field = Types.required(FLOAT).named("cpu_capacity"),
+ ) { it.cpuCapacity }
public val CPU_USAGE: ExportColumn<HostTableReader> =
ExportColumn(
@@ -179,9 +179,9 @@ public object DfltHostExportColumns {
*/
internal val BASE_EXPORT_COLUMNS =
setOf(
- TIMESTAMP_ABS,
- TIMESTAMP,
- NAME,
+ HOST_NAME,
CLUSTER_NAME,
+ TIMESTAMP,
+ TIMESTAMP_ABS,
)
}
diff --git a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltPowerSourceExportColumns.kt b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltPowerSourceExportColumns.kt
index 192667b9..e221a41a 100644
--- a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltPowerSourceExportColumns.kt
+++ b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltPowerSourceExportColumns.kt
@@ -57,12 +57,12 @@ public object DfltPowerSourceExportColumns {
field = Types.required(INT64).named("timestamp_absolute"),
) { it.timestampAbsolute.toEpochMilli() }
- public val NAME: ExportColumn<PowerSourceTableReader> =
+ public val SOURCE_NAME: ExportColumn<PowerSourceTableReader> =
ExportColumn(
field =
Types.required(BINARY)
.`as`(LogicalTypeAnnotation.stringType())
- .named("name"),
+ .named("source_name"),
) { Binary.fromString(it.powerSourceInfo.name) }
public val CLUSTER_NAME: ExportColumn<PowerSourceTableReader> =
@@ -98,9 +98,9 @@ public object DfltPowerSourceExportColumns {
*/
internal val BASE_EXPORT_COLUMNS =
setOf(
- TIMESTAMP_ABS,
- TIMESTAMP,
- NAME,
+ SOURCE_NAME,
CLUSTER_NAME,
+ TIMESTAMP,
+ TIMESTAMP_ABS,
)
}
diff --git a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltServiceExportColumns.kt b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltServiceExportColumns.kt
index 374b2d31..5f3dccf4 100644
--- a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltServiceExportColumns.kt
+++ b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltServiceExportColumns.kt
@@ -59,16 +59,21 @@ public object DfltServiceExportColumns {
field = Types.required(INT32).named("hosts_up"),
) { it.hostsUp }
- public val TASKS_PENDING: ExportColumn<ServiceTableReader> =
+ public val HOSTS_DOWN: ExportColumn<ServiceTableReader> =
ExportColumn(
- field = Types.required(INT32).named("tasks_pending"),
- ) { it.tasksPending }
+ field = Types.required(INT32).named("hosts_down"),
+ ) { it.hostsDown }
public val TASKS_TOTAL: ExportColumn<ServiceTableReader> =
ExportColumn(
field = Types.required(INT32).named("tasks_total"),
) { it.tasksTotal }
+ public val TASKS_PENDING: ExportColumn<ServiceTableReader> =
+ ExportColumn(
+ field = Types.required(INT32).named("tasks_pending"),
+ ) { it.tasksPending }
+
public val TASKS_ACTIVE: ExportColumn<ServiceTableReader> =
ExportColumn(
field = Types.required(INT32).named("tasks_active"),
@@ -79,27 +84,17 @@ public object DfltServiceExportColumns {
field = Types.required(INT32).named("tasks_completed"),
) { it.tasksCompleted }
- public val TASKS_FAILED: ExportColumn<ServiceTableReader> =
+ public val TASKS_TERMINATED: ExportColumn<ServiceTableReader> =
ExportColumn(
field = Types.required(INT32).named("tasks_terminated"),
) { it.tasksTerminated }
- public val ATTEMPTS_SUCCESS: ExportColumn<ServiceTableReader> =
- ExportColumn(
- field = Types.required(INT32).named("attempts_success"),
- ) { it.attemptsSuccess }
-
- public val ATTEMPTS_FAILURE: ExportColumn<ServiceTableReader> =
- ExportColumn(
- field = Types.required(INT32).named("attempts_failure"),
- ) { it.attemptsFailure }
-
/**
* The columns that are always included in the output file.
*/
internal val BASE_EXPORT_COLUMNS =
setOf(
- TIMESTAMP_ABS,
TIMESTAMP,
+ TIMESTAMP_ABS,
)
}
diff --git a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltTaskExportColumns.kt b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltTaskExportColumns.kt
index 6f6b5bdd..5c3ef3bf 100644
--- a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltTaskExportColumns.kt
+++ b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltTaskExportColumns.kt
@@ -74,16 +74,24 @@ public object DfltTaskExportColumns {
.named("task_name"),
) { Binary.fromString(it.taskInfo.name) }
- public val CPU_COUNT: ExportColumn<TaskTableReader> =
+ public val HOST_NAME: ExportColumn<TaskTableReader> =
ExportColumn(
- field = Types.required(INT32).named("cpu_count"),
- ) { it.taskInfo.cpuCount }
+ field =
+ Types.required(BINARY)
+ .`as`(LogicalTypeAnnotation.stringType())
+ .named("host_name"),
+ ) { Binary.fromString(it.host?.name) }
public val MEM_CAPACITY: ExportColumn<TaskTableReader> =
ExportColumn(
field = Types.required(INT64).named("mem_capacity"),
) { it.taskInfo.memCapacity }
+ public val CPU_COUNT: ExportColumn<TaskTableReader> =
+ ExportColumn(
+ field = Types.required(INT32).named("cpu_count"),
+ ) { it.taskInfo.cpuCount }
+
public val CPU_LIMIT: ExportColumn<TaskTableReader> =
ExportColumn(
field = Types.required(FLOAT).named("cpu_limit"),
@@ -167,9 +175,9 @@ public object DfltTaskExportColumns {
*/
internal val BASE_EXPORT_COLUMNS =
setOf(
- TIMESTAMP_ABS,
- TIMESTAMP,
TASK_ID,
TASK_NAME,
+ TIMESTAMP,
+ TIMESTAMP_ABS,
)
}
diff --git a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/table/host/HostTableReader.kt b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/table/host/HostTableReader.kt
index 5cbdcd28..ff0115df 100644
--- a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/table/host/HostTableReader.kt
+++ b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/table/host/HostTableReader.kt
@@ -75,7 +75,7 @@ public interface HostTableReader : Exportable {
/**
* The capacity of the CPUs in the host (in MHz).
*/
- public val cpuLimit: Double
+ public val cpuCapacity: Double
/**
* The usage of all CPUs in the host (in MHz).
diff --git a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/table/host/HostTableReaderImpl.kt b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/table/host/HostTableReaderImpl.kt
index 913e0a17..6e1dac48 100644
--- a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/table/host/HostTableReaderImpl.kt
+++ b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/table/host/HostTableReaderImpl.kt
@@ -49,7 +49,7 @@ public class HostTableReaderImpl(
_tasksActive = table.tasksActive
_guestsError = table.guestsError
_guestsInvalid = table.guestsInvalid
- _cpuLimit = table.cpuLimit
+ _cpuCapacity = table.cpuCapacity
_cpuDemand = table.cpuDemand
_cpuUsage = table.cpuUsage
_cpuUtilization = table.cpuUtilization
@@ -99,9 +99,9 @@ public class HostTableReaderImpl(
get() = _guestsInvalid
private var _guestsInvalid = 0
- override val cpuLimit: Double
- get() = _cpuLimit
- private var _cpuLimit = 0.0
+ override val cpuCapacity: Double
+ get() = _cpuCapacity
+ private var _cpuCapacity = 0.0
override val cpuUsage: Double
get() = _cpuUsage
@@ -176,7 +176,7 @@ public class HostTableReaderImpl(
_tasksActive = hostSysStats.guestsRunning
_guestsError = hostSysStats.guestsError
_guestsInvalid = hostSysStats.guestsInvalid
- _cpuLimit = hostCpuStats.capacity
+ _cpuCapacity = hostCpuStats.capacity
_cpuDemand = hostCpuStats.demand
_cpuUsage = hostCpuStats.usage
_cpuUtilization = hostCpuStats.utilization
@@ -211,7 +211,7 @@ public class HostTableReaderImpl(
_guestsError = 0
_guestsInvalid = 0
- _cpuLimit = 0.0
+ _cpuCapacity = 0.0
_cpuUsage = 0.0
_cpuDemand = 0.0
_cpuUtilization = 0.0
diff --git a/opendc-compute/opendc-compute-topology/src/main/kotlin/org/opendc/compute/topology/TopologyFactories.kt b/opendc-compute/opendc-compute-topology/src/main/kotlin/org/opendc/compute/topology/TopologyFactories.kt
index cc2c4b4e..afc3ee62 100644
--- a/opendc-compute/opendc-compute-topology/src/main/kotlin/org/opendc/compute/topology/TopologyFactories.kt
+++ b/opendc-compute/opendc-compute-topology/src/main/kotlin/org/opendc/compute/topology/TopologyFactories.kt
@@ -129,7 +129,7 @@ private fun ClusterJSONSpec.toClusterSpec(): ClusterSpec {
val powerSourceSpec =
PowerSourceSpec(
createUniqueName(this.powerSource.name, powerSourceNames),
- totalPower = this.powerSource.totalPower,
+ totalPower = this.powerSource.maxPower,
carbonTracePath = this.powerSource.carbonTracePath,
)
@@ -140,8 +140,8 @@ private fun ClusterJSONSpec.toClusterSpec(): ClusterSpec {
createUniqueName(this.battery.name, batteryNames),
this.battery.capacity,
this.battery.chargingSpeed,
- this.battery.batteryPolicy,
this.battery.initialCharge,
+ this.battery.batteryPolicy,
this.battery.embodiedCarbon,
this.battery.expectedLifetime,
)
diff --git a/opendc-compute/opendc-compute-topology/src/main/kotlin/org/opendc/compute/topology/specs/TopologySpecs.kt b/opendc-compute/opendc-compute-topology/src/main/kotlin/org/opendc/compute/topology/specs/TopologySpecs.kt
index 920d8373..f9675b31 100644
--- a/opendc-compute/opendc-compute-topology/src/main/kotlin/org/opendc/compute/topology/specs/TopologySpecs.kt
+++ b/opendc-compute/opendc-compute-topology/src/main/kotlin/org/opendc/compute/topology/specs/TopologySpecs.kt
@@ -44,7 +44,6 @@ import org.opendc.simulator.engine.engine.FlowEngine
@Serializable
public data class TopologySpec(
val clusters: List<ClusterJSONSpec>,
- val schemaVersion: Int = 1,
)
/**
@@ -52,7 +51,6 @@ public data class TopologySpec(
*
* @param name The name of the cluster.
* @param hosts List of the different hosts (nodes) available in this cluster
- * @param location Location of the cluster. This can impact the carbon intensity
*/
@Serializable
public data class ClusterJSONSpec(
@@ -61,7 +59,6 @@ public data class ClusterJSONSpec(
val hosts: List<HostJSONSpec>,
val powerSource: PowerSourceJSONSpec = PowerSourceJSONSpec.DFLT,
val battery: BatteryJSONSpec? = null,
- val location: String = "NL",
)
/**
@@ -77,46 +74,47 @@ public data class ClusterJSONSpec(
public data class HostJSONSpec(
val name: String = "Host",
val cpu: CPUJSONSpec,
+ val count: Int = 1,
val memory: MemoryJSONSpec,
val powerModel: PowerModelSpec = PowerModelSpec.DFLT,
- val count: Int = 1,
)
/**
* Definition of a compute CPU modeled in the simulation.
*
- * @param vendor The vendor of the storage device.
* @param modelName The model name of the device.
+ * @param vendor The vendor of the storage device.
* @param arch The micro-architecture of the processor node.
- * @param coreCount The number of cores in the CPU
- * @param coreSpeed The speed of the cores
+ * @param count The number of CPUs of this type in the host.
+ * @param coreCount The number of cores in the CPU.
+ * @param coreSpeed The speed of the cores.
*/
@Serializable
public data class CPUJSONSpec(
- val vendor: String = "unknown",
val modelName: String = "unknown",
+ val vendor: String = "unknown",
val arch: String = "unknown",
+ val count: Int = 1,
val coreCount: Int,
val coreSpeed: Frequency,
- val count: Int = 1,
)
/**
* Definition of a compute Memory modeled in the simulation.
*
- * @param vendor The vendor of the storage device.
* @param modelName The model name of the device.
+ * @param vendor The vendor of the storage device.
* @param arch The micro-architecture of the processor node.
- * @param memorySpeed The speed of the cores
* @param memorySize The size of the memory Unit
+ * @param memorySpeed The speed of the cores
*/
@Serializable
public data class MemoryJSONSpec(
- val vendor: String = "unknown",
val modelName: String = "unknown",
+ val vendor: String = "unknown",
val arch: String = "unknown",
- val memorySpeed: Frequency = Frequency.ofMHz(-1),
val memorySize: DataSize,
+ val memorySpeed: Frequency = Frequency.ofMHz(-1),
)
@Serializable
@@ -145,25 +143,17 @@ public data class PowerModelSpec(
/**
* Definition of a power source used for JSON input.
*
- * @property vendor
- * @property modelName
- * @property arch
- * @property totalPower
+ * @property maxPower in Watt
*/
@Serializable
public data class PowerSourceJSONSpec(
val name: String = "PowerSource",
- val vendor: String = "unknown",
- val modelName: String = "unknown",
- val arch: String = "unknown",
- val totalPower: Long = Long.MAX_VALUE,
+ val maxPower: Long = Long.MAX_VALUE,
val carbonTracePath: String? = null,
) {
public companion object {
public val DFLT: PowerSourceJSONSpec =
- PowerSourceJSONSpec(
- totalPower = Long.MAX_VALUE,
- )
+ PowerSourceJSONSpec()
}
}
@@ -172,9 +162,9 @@ public data class PowerSourceJSONSpec(
*
* @property name The name of the battery
* @property capacity The capacity of the battery in kWh
- * @property chargingSpeed The charging speed of the battery in J
- * @property batteryPolicy The policy used to decide when the battery charges and discharges
+ * @property chargingSpeed The charging speed of the battery in W
* @property initialCharge The initial charge in the battery
+ * @property batteryPolicy The policy used to decide when the battery charges and discharges
* @property embodiedCarbon The embodied carbon needed to create the battery in gram
* @property expectedLifetime The expected lifetime of the battery in years
*
@@ -184,8 +174,8 @@ public data class BatteryJSONSpec(
val name: String = "Battery",
var capacity: Double,
val chargingSpeed: Double,
- val batteryPolicy: BatteryPolicyJSONSpec,
var initialCharge: Double = 0.0,
+ val batteryPolicy: BatteryPolicyJSONSpec,
var embodiedCarbon: Double = 0.0,
var expectedLifetime: Double = 0.0,
)
diff --git a/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/ExperimentSpec.kt b/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/ExperimentSpec.kt
index df0a862d..71ab4002 100644
--- a/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/ExperimentSpec.kt
+++ b/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/ExperimentSpec.kt
@@ -23,10 +23,7 @@
package org.opendc.experiments.base.experiment.specs
import kotlinx.serialization.Serializable
-import org.opendc.common.logger.infoNewLine
-import org.opendc.common.logger.logger
import org.opendc.compute.simulator.scheduler.ComputeSchedulerEnum
-import org.opendc.compute.simulator.telemetry.parquet.ComputeExportConfig
import org.opendc.experiments.base.experiment.specs.allocation.AllocationPolicySpec
import org.opendc.experiments.base.experiment.specs.allocation.PrefabAllocationPolicySpec
import java.util.UUID
@@ -42,25 +39,22 @@ import java.util.UUID
* @property outputFolder
* @property initialSeed
* @property runs
- * @property computeExportConfig configures which parquet columns are to
* be included in the output files.
*/
@Serializable
public data class ExperimentSpec(
- var id: Int = -1,
var name: String = "",
val outputFolder: String = "output",
val initialSeed: Int = 0,
val runs: Int = 1,
- val exportModels: Set<ExportModelSpec> = setOf(ExportModelSpec()),
- val computeExportConfig: ComputeExportConfig = ComputeExportConfig.ALL_COLUMNS,
- val maxNumFailures: Set<Int> = setOf(10),
val topologies: Set<ScenarioTopologySpec>,
val workloads: Set<WorkloadSpec>,
val allocationPolicies: Set<AllocationPolicySpec> = setOf(PrefabAllocationPolicySpec(ComputeSchedulerEnum.Mem)),
val failureModels: Set<FailureModelSpec?> = setOf(null),
+ val maxNumFailures: Set<Int> = setOf(10),
val checkpointModels: Set<CheckpointModelSpec?> = setOf(null),
+ val exportModels: Set<ExportModelSpec> = setOf(ExportModelSpec()),
) {
init {
require(runs > 0) { "The number of runs should always be positive" }
@@ -71,8 +65,6 @@ public data class ExperimentSpec(
name = "unnamed-simulation-${UUID.randomUUID().toString().substring(0, 4)}"
// "workload=${workloads[0].name}_topology=${topologies[0].name}_allocationPolicy=${allocationPolicies[0].name}"
}
-
- LOG.infoNewLine(computeExportConfig.fmt())
}
public fun getCartesian(): Sequence<ScenarioSpec> {
@@ -96,7 +88,6 @@ public data class ExperimentSpec(
for (i in 0 until numScenarios) {
yield(
ScenarioSpec(
- id,
name,
outputFolder,
topologyList[(i / topologyDiv) % topologyList.size],
@@ -111,8 +102,4 @@ public data class ExperimentSpec(
}
}
}
-
- internal companion object {
- private val LOG by logger()
- }
}
diff --git a/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/ExportModelSpec.kt b/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/ExportModelSpec.kt
index 6d1ee0f0..4230f9aa 100644
--- a/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/ExportModelSpec.kt
+++ b/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/ExportModelSpec.kt
@@ -23,8 +23,11 @@
package org.opendc.experiments.base.experiment.specs
import kotlinx.serialization.Serializable
+import org.opendc.common.logger.infoNewLine
+import org.opendc.common.logger.logger
import org.opendc.compute.simulator.telemetry.OutputFiles
import org.opendc.compute.simulator.telemetry.parquet.ComputeExportConfig
+import kotlin.getValue
/**
* specification describing how the results should be exported
@@ -34,10 +37,10 @@ import org.opendc.compute.simulator.telemetry.parquet.ComputeExportConfig
@Serializable
public data class ExportModelSpec(
val exportInterval: Long = 5 * 60,
+ var printFrequency: Int? = 24,
val computeExportConfig: ComputeExportConfig = ComputeExportConfig.ALL_COLUMNS,
val filesToExport: List<OutputFiles> = OutputFiles.entries.toList(),
var filesToExportDict: MutableMap<OutputFiles, Boolean> = OutputFiles.entries.associateWith { false }.toMutableMap(),
- var printFrequency: Int? = 24,
) {
init {
require(exportInterval > 0) { "The Export interval has to be higher than 0" }
@@ -47,5 +50,11 @@ public data class ExportModelSpec(
for (file in filesToExport) {
filesToExportDict[file] = true
}
+
+ LOG.infoNewLine(computeExportConfig.fmt())
+ }
+
+ internal companion object {
+ private val LOG by logger()
}
}
diff --git a/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/ScenarioSpec.kt b/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/ScenarioSpec.kt
index d9df6511..6025eaa8 100644
--- a/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/ScenarioSpec.kt
+++ b/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/ScenarioSpec.kt
@@ -30,7 +30,6 @@ import org.opendc.experiments.base.experiment.specs.allocation.TaskStopperSpec
@Serializable
public data class ScenarioSpec(
- var id: Int = -1,
var name: String = "",
val outputFolder: String = "output",
val topology: ScenarioTopologySpec,
diff --git a/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/allocation/AllocationPolicySpec.kt b/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/allocation/AllocationPolicySpec.kt
index bc96562c..8ea0a31e 100644
--- a/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/allocation/AllocationPolicySpec.kt
+++ b/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/allocation/AllocationPolicySpec.kt
@@ -52,16 +52,16 @@ public data class PrefabAllocationPolicySpec(
@Serializable
@SerialName("filter")
public data class FilterAllocationPolicySpec(
- val filters: List<HostFilterSpec>,
- val weighers: List<HostWeigherSpec>,
+ val filters: List<HostFilterSpec> = listOf(ComputeFilterSpec()),
+ val weighers: List<HostWeigherSpec> = emptyList(),
val subsetSize: Int = 1,
) : AllocationPolicySpec
@Serializable
@SerialName("timeshift")
public data class TimeShiftAllocationPolicySpec(
- val filters: List<HostFilterSpec>,
- val weighers: List<HostWeigherSpec>,
+ val filters: List<HostFilterSpec> = listOf(ComputeFilterSpec()),
+ val weighers: List<HostWeigherSpec> = emptyList(),
val windowSize: Int = 168,
val subsetSize: Int = 1,
val forecast: Boolean = true,
@@ -110,10 +110,10 @@ public fun createComputeScheduler(
@Serializable
@SerialName("taskstopper")
public data class TaskStopperSpec(
+ val windowSize: Int = 168,
val forecast: Boolean = true,
val forecastThreshold: Double = 0.6,
val forecastSize: Int = 24,
- val windowSize: Int = 168,
)
public fun createTaskStopper(
diff --git a/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/allocation/HostFilterSpec.kt b/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/allocation/HostFilterSpec.kt
index 6a0ed2b8..1ae88d3c 100644
--- a/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/allocation/HostFilterSpec.kt
+++ b/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/allocation/HostFilterSpec.kt
@@ -60,6 +60,12 @@ public data class ComputeFilterSpec(
) : HostFilterSpec()
@Serializable
+@SerialName("SameHost")
+public data class SameHostHostFilterSpec(
+ val filterName: HostFilterEnum = HostFilterEnum.SameHost,
+) : HostFilterSpec()
+
+@Serializable
@SerialName("DifferentHost")
public data class DifferentHostFilterSpec(
val filterName: HostFilterEnum = HostFilterEnum.DifferentHost,
@@ -80,12 +86,6 @@ public data class RamHostFilterSpec(
) : HostFilterSpec()
@Serializable
-@SerialName("SameHost")
-public data class SameHostHostFilterSpec(
- val filterName: HostFilterEnum = HostFilterEnum.SameHost,
-) : HostFilterSpec()
-
-@Serializable
@SerialName("VCpuCapacity")
public data class VCpuCapacityHostFilterSpec(
val filterName: HostFilterEnum = HostFilterEnum.VCpuCapacity,
@@ -101,10 +101,10 @@ public data class VCpuHostFilterSpec(
public fun createHostFilter(filterSpec: HostFilterSpec): HostFilter {
return when (filterSpec) {
is ComputeFilterSpec -> ComputeFilter()
+ is SameHostHostFilterSpec -> SameHostFilter()
is DifferentHostFilterSpec -> DifferentHostFilter()
is InstanceCountHostFilterSpec -> InstanceCountFilter(filterSpec.limit)
is RamHostFilterSpec -> RamFilter(filterSpec.allocationRatio)
- is SameHostHostFilterSpec -> SameHostFilter()
is VCpuCapacityHostFilterSpec -> VCpuCapacityFilter()
is VCpuHostFilterSpec -> VCpuFilter(filterSpec.allocationRatio)
}
diff --git a/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/allocation/HostWeigherSpec.kt b/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/allocation/HostWeigherSpec.kt
index 60f5ff8b..819f3ae2 100644
--- a/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/allocation/HostWeigherSpec.kt
+++ b/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/allocation/HostWeigherSpec.kt
@@ -42,40 +42,40 @@ import org.opendc.compute.simulator.scheduler.weights.VCpuWeigher
public sealed class HostWeigherSpec
@Serializable
+@SerialName("Ram")
+public data class RamWeigherSpec(
+ val multiplier: Double = 1.0,
+) : HostWeigherSpec()
+
+@Serializable
@SerialName("CoreRam")
public data class CoreRamWeigherSpec(
- val multiplier: Double,
+ val multiplier: Double = 1.0,
) : HostWeigherSpec()
@Serializable
@SerialName("InstanceCount")
public data class InstanceCountWeigherSpec(
- val multiplier: Double,
-) : HostWeigherSpec()
-
-@Serializable
-@SerialName("Ram")
-public data class RamWeigherSpec(
- val multiplier: Double,
+ val multiplier: Double = 1.0,
) : HostWeigherSpec()
@Serializable
@SerialName("VCpuCapacity")
public data class VCpuCapacityWeigherSpec(
- val multiplier: Double,
+ val multiplier: Double = 1.0,
) : HostWeigherSpec()
@Serializable
@SerialName("VCpu")
public data class VCpuWeigherSpec(
- val multiplier: Double,
+ val multiplier: Double = 1.0,
) : HostWeigherSpec()
public fun createHostWeigher(weigherSpec: HostWeigherSpec): HostWeigher {
return when (weigherSpec) {
+ is RamWeigherSpec -> RamWeigher(weigherSpec.multiplier)
is CoreRamWeigherSpec -> CoreRamWeigher(weigherSpec.multiplier)
is InstanceCountWeigherSpec -> InstanceCountWeigher(weigherSpec.multiplier)
- is RamWeigherSpec -> RamWeigher(weigherSpec.multiplier)
is VCpuCapacityWeigherSpec -> VCpuCapacityWeigher(weigherSpec.multiplier)
is VCpuWeigherSpec -> VCpuWeigher(weigherSpec.multiplier)
}
diff --git a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/ExperimentTest.kt b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/ExperimentTest.kt
index 2fb5ece8..d4729350 100644
--- a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/ExperimentTest.kt
+++ b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/ExperimentTest.kt
@@ -25,6 +25,10 @@ package org.opendc.experiments.base
import org.junit.jupiter.api.Assertions.assertEquals
import org.junit.jupiter.api.Test
import org.junit.jupiter.api.assertAll
+import org.opendc.compute.simulator.scheduler.MemorizingScheduler
+import org.opendc.compute.simulator.scheduler.filters.ComputeFilter
+import org.opendc.compute.simulator.scheduler.filters.RamFilter
+import org.opendc.compute.simulator.scheduler.filters.VCpuFilter
import org.opendc.compute.workload.Task
import org.opendc.simulator.compute.workload.trace.TraceFragment
import java.util.ArrayList
@@ -101,7 +105,15 @@ class ExperimentTest {
val topology = createTopology("single_1_2000.json")
- val monitor = runTest(topology, workload)
+ val monitor =
+ runTest(
+ topology,
+ workload,
+ computeScheduler =
+ MemorizingScheduler(
+ filters = listOf(ComputeFilter(), VCpuFilter(1.0), RamFilter(1.0)),
+ ),
+ )
assertAll(
{ assertEquals(15 * 60 * 1000, monitor.maxTimestamp) { "Total runtime incorrect" } },
diff --git a/site/docs/documentation/Input/AllocationPolicy.md b/site/docs/documentation/Input/AllocationPolicy.md
new file mode 100644
index 00000000..96aacc9c
--- /dev/null
+++ b/site/docs/documentation/Input/AllocationPolicy.md
@@ -0,0 +1,265 @@
+Allocation policies define how, when and where a task is executed.
+
+There are two types of allocation policies:
+1. **[Filter](#filter-policy)** - The basic allocation policy that selects a host for each task based on filters and weighters
+2. **[TimeShift](#timeshift-policy)** - Extends the Filter scheduler allowing tasks to be delayed to better align with the availability of low-carbon power.
+
+In the following section we discuss the different allocation policies, and how to define them in an Experiment file.
+
+## Filter policy
+To use a filter scheduler, the user has to set the type of the policy to "filter".
+A filter policy requires a list of filters and weighters which characterize the policy.
+
+A filter policy consists of two main components:
+1. **[Filters](#filters)** - Filters select all hosts that are eligible to execute the given task.
+2. **[Weighters](#weighters)** - Weighters are used to rank the eligible hosts. The host with the highest weight is selected to execute the task.
+
+:::info Code
+All code related to reading Allocation policies can be found [here](https://github.com/atlarge-research/opendc/blob/master/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/allocation/AllocationPolicySpec.kt)
+:::
+
+### Filters
+Filters select all hosts that are eligible to execute the given task.
+Filters are defined as JSON objects in the experiment file.
+
+The user defines which filter to use by setting the "type".
+OpenDC currently supports the following 7 filters:
+
+#### ComputeFilter
+Returns host if it is running.
+Does not require any more parameters.
+
+```json
+{
+ "type": "Compute"
+}
+```
+
+#### SameHostHostFilter
+Ensures that after failure, a task is executed on the same host again.
+Does not require any more parameters.
+
+```json
+{
+ "type": "DifferentHost"
+}
+```
+
+#### DifferentHostFilter
+Ensures that after failure, a task is *not* executed on the same host again.
+Does not require any more parameters.
+
+```json
+{
+ "type": "DifferentHost"
+}
+```
+
+#### InstanceCountHostFilter
+Returns host if the number of instances running on the host is less than the maximum number of instances allowed.
+The User needs to provide the maximum number of instances that can be run on a host.
+```json
+{
+ "type": "InstanceCount",
+ "limit": 1
+}
+```
+
+#### RamHostFilter
+Returns hosts if the amount of RAM available on the host is greater than the amount of RAM required by the task.
+The user can provide an allocationRatio which is multiplied with the amount of RAM available on the host.
+This can be used to allow for over subscription.
+```json
+{
+ "type": "Ram",
+ "allocationRatio": 2.5
+}
+```
+
+#### VCpuCapacityHostFilter
+Returns hosts if CPU capacity available on the host is greater than the CPU capacity required by the task.
+
+```json
+{
+ "type": "VCpuCapacity"
+}
+```
+
+#### VCpuHostFilter
+Returns host if the number of cores available on the host is greater than the number of cores required by the task.
+The user can provide an allocationRatio which is multiplied with the amount of RAM available on the host.
+This can be used to allow for over subscription.
+
+```json
+{
+ "type": "VCpu",
+ "allocationRatio": 2.5
+}
+```
+
+:::info Code
+All code related to reading Filters can be found [here](https://github.com/atlarge-research/opendc/blob/master/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/allocation/HostFilterSpec.kt)
+:::
+
+### Weighters
+Weighters are used to rank the eligible hosts. The host with the highest weight is selected to execute the task.
+Weighters are defined as JSON objects in the experiment file.
+
+The user defines which filter to use by setting the "type".
+The user can also provide a multiplying that is multiplied with the weight of the host.
+This can be used to increase or decrease the importance of the host.
+Negative multipliers are also allowed, and can be used to invert the ranking of the host.
+OpenDC currently supports the following 5 weighters:
+
+#### RamWeigherSpec
+Order the hosts by the amount of RAM available on the host.
+
+```json
+{
+ "type": "Ram",
+ "multiplier": 2.0
+}
+```
+
+#### CoreRamWeighter
+Order the hosts by the amount of RAM available per core on the host.
+
+```json
+{
+ "type": "CoreRam",
+ "multiplier": 0.5
+}
+```
+
+#### InstanceCountWeigherSpec
+Order the hosts by the number of instances running on the host.
+
+```json
+{
+ "type": "InstanceCount",
+ "multiplier": -1.0
+}
+```
+
+#### VCpuCapacityWeigherSpec
+Order the hosts by the capacity per core on the host.
+
+```json
+{
+ "type": "VCpuCapacity",
+ "multiplier": 0.5
+}
+```
+
+#### VCpuWeigherSpec
+Order the hosts by the number of cores available on the host.
+
+```json
+{
+ "type": "VCpu",
+ "multiplier": 2.5
+}
+```
+
+:::info Code
+All code related to reading Weighters can be found [here](https://github.com/atlarge-research/opendc/blob/master/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment/specs/allocation/HostWeigherSpec.kt)
+:::
+
+### Examples
+Following is an example of a Filter policy:
+```json
+{
+ "type": "filter",
+ "filters": [
+ {
+ "type": "Compute"
+ },
+ {
+ "type": "VCpu",
+ "allocationRatio": 1.0
+ },
+ {
+ "type": "Ram",
+ "allocationRatio": 1.5
+ }
+ ],
+ "weighers": [
+ {
+ "type": "Ram",
+ "multiplier": 1.0
+ }
+ ]
+}
+```
+
+## TimeShift policy
+Timeshift extends the Filter policy by allowing tasks to be delayed to better align with the availability of low-carbon power.
+A user can define a timeshift policy by setting the type to "timeshift".
+
+task is scheduled when the current carbon intensity is below the carbon threshold. Otherwise, they are delayed. The
+carbon threshold is determined by taking the 35 percentile of next week’s carbon forecast. When used, tasks can be interrupted
+when the carbon intensity exceeds the threshold during execution. All tasks have a maximum delay time defined in the workload. When the maximum delay is reached,
+tasks cannot be delayed anymore.
+
+
+Similar to the filter policy, the user can define a list of filters and weighters.
+However, in addittion, the user can provide parameters that influence how tasks are delayed:
+
+| Variable | Type | Required? | Default | Description |
+|------------------------|-----------------------------|-----------|-----------------|-----------------------------------------------------------------------------------|
+| filters | List[Filter] | no | [ComputeFilter] | Filters used to select eligible hosts. |
+| weighters | List[Weighter] | no | [] | Weighters used to rank hosts. |
+| windowSize | integer | no | 168 | How far back does the scheduler look to determine the Carbon Intensity threshold? |
+| forecast | boolean | no | true | Does the the policy use carbon forecasts? |
+| shortForecastThreshold | double | no | 0.2 | Threshold is used for short tasks (<2hours) |
+| longForecastThreshold | double | no | 0.35 | Threshold is used for long tasks (>2hours) |
+| forecastSize | integer | no | 24 | The number of hours of forecasts that is taken into account |
+| taskStopper | [TaskStopper](#taskstopper) | no | null | Policy for interrupting tasks. If not provided, tasks are never interrupted |
+
+### TaskStopper
+
+Aside from delaying tasks, users might want to interrupt tasks that are running.
+For example, if a tasks is running when only high-carbon energy is available, the task can be interrupted and rescheduled to a later time.
+
+A TaskStopper is defined as a JSON object in the Timeshift policy.
+A TasksStopper consists of the following components:
+
+| Variable | Type | Required? | Default | Description |
+|-----------------------|-----------------------------|-----------|---------|-----------------------------------------------------------------------------------|
+| windowSize | integer | no | 168 | How far back does the scheduler look to determine the Carbon Intensity threshold? |
+| forecast | boolean | no | true | Does the the policy use carbon forecasts? |
+| forecastThreshold | double | no | 0.6 | Threshold is used for short tasks (<2hours) |
+| forecastSize | integer | no | 24 | The number of hours of forecasts that is taken into account |
+
+
+## Prefabs
+Aside from custom policies, OpenDC also provides a set of pre-defined policies that can be used.
+A prefab can be defined by setting the type to "prefab" and providing the name of the prefab.
+
+Example:
+```json
+{
+ "type": "prefab",
+ "policyName": "Mem"
+}
+```
+
+The following prefabs are available:
+
+| Name | Filters | Weighters | Timeshifting |
+|---------------------|----------------------------------------------|----------------------------|--------------|
+| Mem | ComputeFilter <br/>VCpuFilter<br/> RamFilter | RamWeigher(1.0) | No |
+| MemInv | ComputeFilter <br/>VCpuFilter<br/> RamFilter | RamWeigher(-1.0) | No |
+| CoreMem | ComputeFilter <br/>VCpuFilter<br/> RamFilter | CoreRamWeigher(1.0) | No |
+| CoreMemInv | ComputeFilter <br/>VCpuFilter<br/> RamFilter | CoreRamWeigher(-1.0) | No |
+| ActiveServers | ComputeFilter <br/>VCpuFilter<br/> RamFilter | InstanceCountWeigher(1.0) | No |
+| ActiveServersInv | ComputeFilter <br/>VCpuFilter<br/> RamFilter | InstanceCountWeigher(-1.0) | No |
+| ProvisionedCores | ComputeFilter <br/>VCpuFilter<br/> RamFilter | VCpuWeigher(1.0) | No |
+| ProvisionedCoresInv | ComputeFilter <br/>VCpuFilter<br/> RamFilter | VCpuWeigher(-1.0) | No |
+| Random | ComputeFilter <br/>VCpuFilter<br/> RamFilter | [] | No |
+| TimeShift | ComputeFilter <br/>VCpuFilter<br/> RamFilter | RamWeigher(1.0) | Yes |
+
+:::info Code
+All code related to prefab schedulers can be found [here](https://github.com/atlarge-research/opendc/blob/master/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/scheduler/ComputeSchedulers.kt)
+:::
+
diff --git a/site/docs/documentation/Input/CheckpointModel.md b/site/docs/documentation/Input/CheckpointModel.md
new file mode 100644
index 00000000..7c622ea0
--- /dev/null
+++ b/site/docs/documentation/Input/CheckpointModel.md
@@ -0,0 +1,25 @@
+Checkpointing is a technique to reduce the impact of machine failure.
+When using Checkpointing, tasks make periodical snapshots of their state.
+If a task fails, it can be restarted from the last snapshot instead of starting from the beginning.
+
+A user can define a checkpoint model using the following parameters:
+
+| Variable | Type | Required? | Default | Description |
+|---------------------------|--------|-----------|---------|----------------------------------------------------------------------------------------------------------------------|
+| checkpointInterval | Int64 | no | 3600000 | The time between checkpoints in ms |
+| checkpointDuration | Int64 | no | 300000 | The time to create a snapshot in ms |
+| checkpointIntervalScaling | Double | no | 1.0 | The scaling of the checkpointInterval after each successful checkpoint. The default of 1.0 means no scaling happens. |
+
+### Example
+
+```json
+{
+ "checkpointInterval": 3600000,
+ "checkpointDuration": 300000,
+ "checkpointIntervalScaling": 1.5
+}
+```
+
+In this example, a snapshot is created every hour, and the snapshot creation takes 5 minutes.
+The checkpointIntervalScaling is set to 1.5, which means that after each successful checkpoint,
+the interval between checkpoints will be increased by 50% (for example from 1 to 1.5 hours).
diff --git a/site/docs/documentation/Input/Experiment.md b/site/docs/documentation/Input/Experiment.md
index a4212ddf..8d3462a9 100644
--- a/site/docs/documentation/Input/Experiment.md
+++ b/site/docs/documentation/Input/Experiment.md
@@ -5,113 +5,40 @@ In this page, we will discuss how to properly define experiments for OpenDC.
:::info Code
All code related to reading and processing Experiment files can be found [here](https://github.com/atlarge-research/opendc/tree/master/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/experiment)
-
-The code used to run a given experiment can be found [here](https://github.com/atlarge-research/opendc/tree/master/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/runner)
+The code used to run experiments can be found [here](https://github.com/atlarge-research/opendc/tree/master/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/runner)
:::
## Schema
-The schema for the scenario file is provided in [schema](ExperimentSchema)
-In the following section, we describe the different components of the schema.
-Some components of an experiment are not single values, but lists. This is used to run multiple scenarios using
-a single experiment file. OpenDC will execute all permutations of the different values.
-This means that if all list based values have a single value, only one Scenario will be run.
-
-| Variable | Type | Required? | Default | Description |
-|---------------------|----------------------------------------------|-----------|----------|-------------------------------------------------------------------|
-| name | string | no | "" | Name of the scenario, used for identification and referencing. |
-| outputFolder | string | no | "output" | Directory where the simulation outputs will be stored. |
-| initialSeed | integer | no | 0 | Seed used for random number generation to ensure reproducibility. |
-| runs | integer | no | 1 | Number of times the scenario should be run. |
-| exportModels | List[[ExportModel](#exportmodel)] | no | Default | Specifications for exporting data from the simulation. |
-| maxNumFailures | List[integer] | no | [10] | The max number of times a task can fail before being terminated. |
-| topologies | List[[Topology](#topology)] | yes | N/A | List of topologies used in the scenario. |
-| workloads | List[[Workload](#workload)] | yes | N/A | List of workloads to be executed within the scenario. |
-| allocationPolicies | List[[AllocationPolicy](#allocation-policy)] | yes | N/A | Allocation policies used for resource management in the scenario. |
-| failureModels | List[[FailureModel](#failuremodel)] | no | Default | List of failure models to simulate various types of failures. |
-| checkpointModels | List[[CheckpointModel](#checkpointmodel)] | no | null | Paths to carbon footprint trace files. |
-| carbonTracePaths | List[string] | no | null | Paths to carbon footprint trace files. |
-
-
-Many of the input fields of the experiment file are complex objects themselves. Next, we will describe the required input
-type of each of these fields.
-
-### ExportModel
-
-| Variable | Type | Required? | Default | Description |
-|---------------------|-----------------------------------------|-----------|-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| exportInterval | Int64 | no | 300 | The duration between two exports in seconds |
-| computeExportConfig | [ComputeExportConfig](#checkpointmodel) | no | Default | The features that should be exported during the simulation |
-| filesToExport | List[string] | no | all files | List of the files that should be exported during simulation. The elements should be picked from the set ("host", "task", "powerSource", "battery", "service") |
-
-
-
-### ComputeExportConfig
-The features that should be exported by OpenDC
-
-| Variable | Type | Required? | Default | Description |
-|--------------------------|--------------|-----------|--------------|-----------------------------------------------------------------------|
-| hostExportColumns | List[String] | no | All features | The features that should be exported to the host output file. |
-| taskExportColumns | List[String] | no | All features | The features that should be exported to the task output file. |
-| powerSourceExportColumns | List[String] | no | All features | The features that should be exported to the power source output file. |
-| serviceExportColumns | List[String] | no | All features | The features that should be exported to the service output file. |
-
-
-### Topology
-Defines the topology on which the workload will be run.
-
-:::info
-For more information about the Topology go [here](Topology)
-:::
-
-| Variable | Type | Required? | Default | Description |
-|-------------|--------|-----------|---------|---------------------------------------------------------------------|
-| pathToFile | string | yes | N/A | Path to the JSON file defining the topology. |
-
-### Workload
-Defines the workload that needs to be executed.
-
-:::info
-For more information about workloads go [here](Workload)
-:::
-
-| Variable | Type | Required? | Default | Description |
-|-------------|--------|-----------|---------|---------------------------------------------------------------------|
-| pathToFile | string | yes | N/A | Path to the file containing the workload trace. |
-| type | string | yes | N/A | Type of the workload (e.g., "ComputeWorkload"). |
-
-### Allocation Policy
-Defines the allocation policy that should be used to decide on which host each task should be executed
-
-:::info Code
-The different allocation policies that can be used can be found [here](https://github.com/atlarge-research/opendc/blob/master/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/scheduler/ComputeSchedulers.kt)
-:::
-
-| Variable | Type | Required? | Default | Description |
-|------------|--------|-----------|---------|----------------------------|
-| policyType | string | yes | N/A | Type of allocation policy. |
-
-### FailureModel
-The failure model that should be used during the simulation
-See [FailureModels](FailureModel) for detailed instructions.
-
-### CheckpointModel
-The checkpoint model that should be used to create snapshots.
-
-| Variable | Type | Required? | Default | Description |
-|---------------------------|--------|-----------|---------|---------------------------------------------------------------------------------------------------------------------|
-| checkpointInterval | Int64 | no | 3600000 | The time between checkpoints in ms |
-| checkpointDuration | Int64 | no | 300000 | The time to create a snapshot in ms |
-| checkpointIntervalScaling | Double | no | 1.0 | The scaling of the checkpointInterval after each succesful checkpoint. The default of 1.0 means no scaling happens. |
-
+In the following section, we describe the different components of an experiment. Following is a table with all experiment components:
+
+| Variable | Type | Required? | Default | Description |
+|--------------------|----------------------------------------------------------------------|-----------|---------------|-------------------------------------------------------------------------------------------------------|
+| name | string | no | "" | Name of the scenario, used for identification and referencing. |
+| outputFolder | string | no | "output" | Directory where the simulation outputs will be stored. |
+| runs | integer | no | 1 | Number of times the same scenario should be run. Each scenario is run with a different seed. |
+| initialSeed | integer | no | 0 | The seed used for random number generation during a scenario. Setting a seed ensures reproducability. |
+| topologies | List[path/to/file] | yes | N/A | Paths to the JSON files defining the topologies. |
+| workloads | List[[Workload](/docs/documentation/Input/Workload)] | yes | N/A | Paths to the files defining the workloads executed. |
+| allocationPolicies | List[[AllocationPolicy](/docs/documentation/Input/AllocationPolicy)] | yes | N/A | Allocation policies used for resource management in the scenario. |
+| failureModels | List[[FailureModel](/docs/documentation/Input/FailureModel)] | no | List[null] | List of failure models to simulate various types of failures. |
+| maxNumFailures | List[integer] | no | [10] | The max number of times a task can fail before being terminated. |
+| checkpointModels | List[[CheckpointModel](/docs/documentation/Input/CheckpointModel)] | no | List[null] | Paths to carbon footprint trace files. |
+| exportModels | List[[ExportModel](/docs/documentation/Input/ExportModel)] | no | List[default] | Specifications for exporting data from the simulation. |
+
+Most components of an experiment are not single values, but lists of values.
+This allows users to run multiple scenarios using a single experiment file.
+OpenDC will generate and execute all permutations of the different values.
+
+Some of the components in an experiment file are paths to files, or complicated objects. The format of these components
+are defined in their respective pages.
## Examples
-In the following section, we discuss several examples of Scenario files. Any scenario file can be verified using the
-JSON schema defined in [schema](TopologySchema).
+In the following section, we discuss several examples of experiment files.
### Simple
-The simplest scneario that can be provided to OpenDC is shown below:
+The simplest experiment that can be provided to OpenDC is shown below:
```json
{
"topologies": [
@@ -127,18 +54,19 @@ The simplest scneario that can be provided to OpenDC is shown below:
],
"allocationPolicies": [
{
- "policyType": "Mem"
+ "type": "prefab",
+ "policyName": "Mem"
}
]
}
```
-This scenario creates a simulation from file topology1, located in the topologies folder, with a workload trace from the
+This experiment creates a simulation from file topology1, located in the topologies folder, with a workload trace from the
bitbrains-small file, and an allocation policy of type Mem. The simulation is run once (by default), and the default
name is "".
### Complex
-Following is an example of a more complex topology:
+Following is an example of a more complex experiment:
```json
{
"topologies": [
@@ -164,10 +92,12 @@ Following is an example of a more complex topology:
],
"allocationPolicies": [
{
- "policyType": "Mem"
+ "type": "prefab",
+ "policyName": "Mem"
},
{
- "policyType": "Mem-Inv"
+ "type": "prefab",
+ "policyName": "Mem-Inv"
}
]
}
diff --git a/site/docs/documentation/Input/ExperimentSchema.md b/site/docs/documentation/Input/ExperimentSchema.md
deleted file mode 100644
index 78ec55f7..00000000
--- a/site/docs/documentation/Input/ExperimentSchema.md
+++ /dev/null
@@ -1,81 +0,0 @@
-Below is the schema for the Scenario JSON file. This schema can be used to validate a scenario file.
-A scenario file can be validated using a JSON schema validator, such as https://www.jsonschemavalidator.net/.
-
-```json
-{
- "$schema": "OpenDC/Scenario",
- "$defs": {
- "topology": {
- "type": "object",
- "properties": {
- "pathToFile": {
- "type": "string"
- }
- },
- "required": [
- "pathToFile"
- ]
- },
- "workload": {
- "type": "object",
- "properties": {
- "pathToFile": {
- "type": "string"
- },
- "type": {
- "type": "string"
- }
- },
- "required": [
- "pathToFile",
- "type"
- ]
- },
- "allocationPolicy": {
- "type": "object",
- "properties": {
- "policyType": {
- "type": "string"
- }
- },
- "required": [
- "policyType"
- ]
- }
- },
- "properties": {
- "name": {
- "type": "string"
- },
- "topologies": {
- "type": "array",
- "items": {
- "$ref": "#/$defs/topology"
- },
- "minItems": 1
- },
- "workloads": {
- "type": "array",
- "items": {
- "$ref": "#/$defs/workload"
- },
- "minItems": 1
- },
- "allocationPolicies": {
- "type": "array",
- "items": {
- "$ref": "#/$defs/allocationPolicy"
- },
- "minItems": 1
- },
- "runs": {
- "type": "integer"
- }
- },
- "required": [
- "topologies",
- "workloads",
- "allocationPolicies"
- ]
-}
-```
diff --git a/site/docs/documentation/Input/ExportModel.md b/site/docs/documentation/Input/ExportModel.md
new file mode 100644
index 00000000..12e7eba2
--- /dev/null
+++ b/site/docs/documentation/Input/ExportModel.md
@@ -0,0 +1,50 @@
+During simulation, OpenDC exports data to files (see [Output](/docs/documentation/Output.md)).
+The user can define what and how data is exported using the `exportModels` parameter in the experiment file.
+
+## ExportModel
+
+
+
+| Variable | Type | Required? | Default | Description |
+|---------------------|-----------------------------------------|-----------|-----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| exportInterval | Int64 | no | 300 | The duration between two exports in seconds |
+| filesToExport | Int64 | no | 24 | How often OpenDC prints an update during simulation. | |
+| computeExportConfig | [ComputeExportConfig](#checkpointmodel) | no | Default | The features that should be exported during the simulation |
+| filesToExport | List[string] | no | all files | List of the files that should be exported during simulation. The elements should be picked from the set ("host", "task", "powerSource", "battery", "service") |
+
+
+
+### ComputeExportConfig
+The ComputeExportConfig defines which features should be exported during the simulation.
+Several features will always be exported, regardless of the configuration.
+When not provided, all features are exported.
+
+
+| Variable | Type | Required? | Base | Default | Description |
+|--------------------------|--------------|-----------|------------------------------------------------------------------------|--------------|-----------------------------------------------------------------------|
+| hostExportColumns | List[String] | no | name <br/> cluster_name <br/> timestamp <br/> timestamp_absolute <br/> | All features | The features that should be exported to the host output file. |
+| taskExportColumns | List[String] | no | task_id <br/> task_name <br/> timestamp <br/> timestamp_absolute <br/> | All features | The features that should be exported to the task output file. |
+| powerSourceExportColumns | List[String] | no | name <br/> cluster_name <br/> timestamp <br/> timestamp_absolute <br/> | All features | The features that should be exported to the power source output file. |
+| batteryExportColumns | List[String] | no | name <br/> cluster_name <br/> timestamp <br/> timestamp_absolute <br/> | All features | The features that should be exported to the battery output file. |
+| serviceExportColumns | List[String] | no | timestamp <br/> timestamp_absolute <br/> | All features | The features that should be exported to the service output file. |
+
+### Example
+
+```json
+{
+ "exportInterval": 3600,
+ "printFrequency": 168,
+ "filesToExport": ["host", "task", "service"],
+ "computeExportConfig": {
+ "hostExportColumns": ["power_draw", "energy_usage", "cpu_usage", "cpu_utilization"],
+ "taskExportColumns": ["submission_time", "schedule_time", "finish_time", "task_state"],
+ "serviceExportColumns": ["tasks_total", "tasks_pending", "tasks_active", "tasks_completed", "tasks_terminated", "hosts_up"]
+ }
+}
+```
+In this example:
+- the simulation will export data every hour (3600 seconds).
+- The simulation will print an update every 168 seconds.
+- Only the host, task and service files will be exported.
+- Only a selection of features are exported for each file.
+
diff --git a/site/docs/documentation/Input/FailureModel.md b/site/docs/documentation/Input/FailureModel.md
index ecaf7c03..714d2157 100644
--- a/site/docs/documentation/Input/FailureModel.md
+++ b/site/docs/documentation/Input/FailureModel.md
@@ -1,3 +1,9 @@
+### FailureModel
+The failure model that should be used during the simulation
+See [FailureModels](FailureModel.md) for detailed instructions.
+
+
+
OpenDC provides three types of failure models: [Trace-based](#trace-based-failure-models), [Sample-based](#sample-based-failure-models),
and [Prefab](#prefab-failure-models).
@@ -159,7 +165,7 @@ Example:
The final type of failure models is the prefab models. These are models that are predefined in OpenDC and are based on
research. Currently, OpenDC has 9 prefab models based on [The Failure Trace Archive: Enabling the comparison of failure measurements and models of distributed systems](https://www-sciencedirect-com.vu-nl.idm.oclc.org/science/article/pii/S0743731513000634)
The figure below shows the values used to define the failure models.
-![img.png](img.png)
+![failureModels.png](../../../static/img/failureModels.png)
Each failure model is defined four times, on for each of the four distribution.
The final list of available prefabs is thus:
diff --git a/site/docs/documentation/Input/Topology.md b/site/docs/documentation/Input/Topology.md
deleted file mode 100644
index 0d2479bd..00000000
--- a/site/docs/documentation/Input/Topology.md
+++ /dev/null
@@ -1,220 +0,0 @@
-The topology of a datacenter is defined using a JSON file. A topology consist of one or more clusters.
-Each cluster consist of at least one host on which jobs can be executed. Each host consist of one or more CPUs,
-a memory unit and a power model.
-
-:::info Code
-The code related to reading and processing topology files can be found [here](https://github.com/atlarge-research/opendc/tree/master/opendc-compute/opendc-compute-topology/src/main/kotlin/org/opendc/compute/topology)
-:::
-
-
-## Schema
-
-The schema for the topology file is provided in [schema](TopologySchema).
-In the following section, we describe the different components of the schema.
-
-### Cluster
-
-| variable | type | required? | default | description |
-|----------|---------------------|-----------|---------|-----------------------------------------------------------------------------------|
-| name | string | no | Cluster | The name of the cluster. This is only important for debugging and post-processing |
-| count | integer | no | 1 | The amount of clusters of this type are in the data center |
-| hosts | List[[Host](#host)] | yes | N/A | A list of the hosts in a cluster. |
-
-### Host
-
-| variable | type | required? | default | description |
-|-------------|-----------------------------|-----------|---------|--------------------------------------------------------------------------------|
-| name | string | no | Host | The name of the host. This is only important for debugging and post-processing |
-| count | integer | no | 1 | The amount of hosts of this type are in the cluster |
-| cpuModel | [CPU](#cpu) | yes | N/A | The CPUs in the host |
-| memory | [Memory](#memory) | yes | N/A | The memory used by the host |
-| power model | [Power Model](#power-model) | yes | N/A | The power model used to determine the power draw of the host |
-
-### CPU
-
-| variable | type | Unit | required? | default | description |
-|-----------|---------|-------|-----------|---------|--------------------------------------------------|
-| name | string | N/A | no | unknown | The name of the CPU. |
-| vendor | string | N/A | no | unknown | The vendor of the CPU |
-| arch | string | N/A | no | unknown | the micro-architecture of the CPU |
-| count | integer | N/A | no | 1 | The amount of cpus of this type used by the host |
-| coreCount | integer | count | yes | N/A | The number of cores in the CPU |
-| coreSpeed | Double | Mhz | yes | N/A | The speed of each core in Mhz |
-
-### Memory
-
-| variable | type | Unit | required? | default | description |
-|-------------|---------|------|-----------|---------|--------------------------------------------------------------------------|
-| name | string | N/A | no | unknown | The name of the CPU. |
-| vendor | string | N/A | no | unknown | The vendor of the CPU |
-| arch | string | N/A | no | unknown | the micro-architecture of the CPU |
-| count | integer | N/A | no | 1 | The amount of cpus of this type used by the host |
-| memorySize | integer | Byte | yes | N/A | The number of cores in the CPU |
-| memorySpeed | Double | ? | no | -1 | The speed of each core in Mhz. PLACEHOLDER: this currently does nothing. |
-
-### Power Model
-
-| variable | type | Unit | required? | default | description |
-|-----------------|--------|------|-----------|----------|-------------------------------------------------------------------------------|
-| vendor | string | N/A | yes | N/A | The type of model used to determine power draw |
-| modelName | string | N/A | yes | N/A | The type of model used to determine power draw |
-| arch | string | N/A | yes | N/A | The type of model used to determine power draw |
-| totalPower | Int64 | Watt | no | max long | The power draw of a host when using max capacity in Watt |
-| carbonTracePath | string | N/A | no | null | Path to a carbon intensity trace. If not given, carbon intensity is always 0. |
-
-## Examples
-
-In the following section, we discuss several examples of topology files. Any topology file can be verified using the
-JSON schema defined in [schema](TopologySchema).
-
-### Simple
-
-The simplest data center that can be provided to OpenDC is shown below:
-
-```json
-{
- "clusters": [
- {
- "hosts": [
- {
- "cpu":
- {
- "coreCount": 16,
- "coreSpeed": 1000
- },
- "memory": {
- "memorySize": 100000
- }
- }
- ]
- }
- ]
-}
-```
-
-This creates a data center with a single cluster containing a single host. This host consist of a single 16 core CPU
-with a speed of 1 Ghz, and 100 MiB RAM memory.
-
-### Count
-
-Duplicating clusters, hosts, or CPUs is easy using the "count" keyword:
-
-```json
-{
- "clusters": [
- {
- "count": 2,
- "hosts": [
- {
- "count": 5,
- "cpu":
- {
- "coreCount": 16,
- "coreSpeed": 1000,
- "count": 10
- },
- "memory":
- {
- "memorySize": 100000
- }
- }
- ]
- }
- ]
-}
-```
-
-This topology creates a datacenter consisting of 2 clusters, both containing 5 hosts. Each host contains 10 16 core
-CPUs.
-Using "count" saves a lot of copying.
-
-### Complex
-
-Following is an example of a more complex topology:
-
-```json
-{
- "clusters": [
- {
- "name": "C01",
- "count": 2,
- "hosts": [
- {
- "name": "H01",
- "count": 2,
- "cpus": [
- {
- "coreCount": 16,
- "coreSpeed": 1000
- }
- ],
- "memory": {
- "memorySize": 1000000
- },
- "powerModel": {
- "modelType": "linear",
- "idlePower": 200.0,
- "maxPower": 400.0
- }
- },
- {
- "name": "H02",
- "count": 2,
- "cpus": [
- {
- "coreCount": 8,
- "coreSpeed": 3000
- }
- ],
- "memory": {
- "memorySize": 100000
- },
- "powerModel": {
- "modelType": "square",
- "idlePower": 300.0,
- "maxPower": 500.0
- }
- }
- ]
- }
- ]
-}
-```
-
-This topology defines two types of hosts with different coreCount, and coreSpeed.
-Both types of hosts are created twice.
-
-
-### With Units of Measure
-
-Aside from using number to indicate values it is also possible to define values using strings. This allows the user to define the unit of the input parameter.
-```json
-{
- "clusters": [
- {
- "count": 2,
- "hosts" :
- [
- {
- "name": "H01",
- "cpuModel":
- {
- "coreCount": 8,
- "coreSpeed": "3.2 Ghz"
- },
- "memory": {
- "memorySize": "128e3 MiB",
- "memorySpeed": "1 Mhz"
- },
- "powerModel": {
- "modelType": "linear",
- "power": "400 Watts",
- "maxPower": "1 KW",
- "idlePower": "0.4 W"
- }
- }
- ]
- }
- ]
-}
-```
diff --git a/site/docs/documentation/Input/Topology/Battery.md b/site/docs/documentation/Input/Topology/Battery.md
new file mode 100644
index 00000000..70492694
--- /dev/null
+++ b/site/docs/documentation/Input/Topology/Battery.md
@@ -0,0 +1,37 @@
+Batteries can be used to store energy for later use.
+In previous work, we have used batteries to store energy from the grid when the carbon intensity is low,
+and use this energy when the carbon intensity is high.
+
+Batteries are defined using the following parameters:
+
+| variable | type | Unit | required? | default | description |
+|------------------|---------------------------|-------|-----------|---------|-----------------------------------------------------------------------------------|
+| name | string | N/A | no | Battery | The name of the battery. This is only important for debugging and post-processing |
+| capacity | Double | kWh | yes | N/A | The total amount of energy that the battery can hold. |
+| chargingSpeed | Double | W | yes | N/A | Charging speed of the battery. |
+| initialCharge | Double | kWh | no | 0.0 | The initial charge of the battery. If not given, the battery starts empty. |
+| batteryPolicy | [Policy](#battery-policy) | N/A | yes | N/A | The policy which decides when to charge and discharge. |
+| embodiedCarbon | Double | gram | no | 0.0 | The embodied carbon emitted while creating this battery. |
+| expectedLifetime | Double | Years | yes | 0.0 | The expected lifetime of the battery. |
+
+## Battery Policy
+To determine when to charge and discharge the battery, a policy is required.
+Currently, all policies for batteries are based on the carbon intensity of the grid.
+
+The best performing policy is called "runningMeanPlus" and is based on the running mean of the carbon intensity.
+it can be defined with the following JSON:
+
+```json
+{
+ "type": "runningMeanPlus",
+ "startingThreshold": 123.2,
+ "windowSize": 168
+}
+```
+
+In which `startingThreshold` is the initial carbon threshold used.
+`windowSize` is the size of the window used to calculate the running mean.
+
+:::info Alert
+This page with be extended with more text and policies in the future.
+:::
diff --git a/site/docs/documentation/Input/Topology/Host.md b/site/docs/documentation/Input/Topology/Host.md
new file mode 100644
index 00000000..7b5b8394
--- /dev/null
+++ b/site/docs/documentation/Input/Topology/Host.md
@@ -0,0 +1,55 @@
+A host is a machine that can execute tasks. A host consist of the following components:
+
+| variable | type | required? | default | description |
+|-------------|:-------------------------------------------------------------|:----------|---------|--------------------------------------------------------------------------------|
+| name | string | no | Host | The name of the host. This is only important for debugging and post-processing |
+| count | integer | no | 1 | The amount of hosts of this type are in the cluster |
+| cpuModel | [CPU](#cpu) | yes | N/A | The CPUs in the host |
+| memory | [Memory](#memory) | yes | N/A | The memory used by the host |
+| power model | [Power Model](/docs/documentation/Input/Topology/PowerModel) | no | Default | The power model used to determine the power draw of the host |
+
+## CPU
+
+| variable | type | Unit | required? | default | description |
+|-----------|---------|-------|-----------|---------|--------------------------------------------------|
+| modelName | string | N/A | no | unknown | The name of the CPU. |
+| vendor | string | N/A | no | unknown | The vendor of the CPU |
+| arch | string | N/A | no | unknown | the micro-architecture of the CPU |
+| count | integer | N/A | no | 1 | The number of CPUs of this type used by the host |
+| coreCount | integer | count | yes | N/A | The number of cores in the CPU |
+| coreSpeed | Double | Mhz | yes | N/A | The speed of each core in Mhz |
+
+## Memory
+
+| variable | type | Unit | required? | default | description |
+|-------------|---------|------|-----------|---------|--------------------------------------------------------------------------|
+| modelName | string | N/A | no | unknown | The name of the CPU. |
+| vendor | string | N/A | no | unknown | The vendor of the CPU |
+| arch | string | N/A | no | unknown | the micro-architecture of the CPU |
+| memorySize | integer | Byte | yes | N/A | The number of cores in the CPU |
+| memorySpeed | Double | Mhz | no | -1 | The speed of each core in Mhz. PLACEHOLDER: this currently does nothing. |
+
+## Example
+
+```json
+{
+ "name": "H01",
+ "cpu": {
+ "coreCount": 16,
+ "coreSpeed": 2100
+ },
+ "memory": {
+ "memorySize": 100000
+ },
+ "powerModel": {
+ "modelType": "sqrt",
+ "idlePower": 32.0,
+ "maxPower": 180.0
+ },
+ "count": 100
+}
+```
+
+This example creates 100 hosts with 16 cores and 2.1 Ghz CPU speed, and 100 GB of memory.
+The power model used is a square root model with a power of 400 W, idle power of 32 W, and max power of 180 W.
+For more information on the power model, see [Power Model](/docs/documentation/Input/Topology/PowerModel).
diff --git a/site/docs/documentation/Input/Topology/PowerModel.md b/site/docs/documentation/Input/Topology/PowerModel.md
new file mode 100644
index 00000000..06f4a4da
--- /dev/null
+++ b/site/docs/documentation/Input/Topology/PowerModel.md
@@ -0,0 +1,31 @@
+OpenDC uses power models to determine the power draw based on the utilization of a host.
+All models in OpenDC are based on linear models interpolated between the idle and max power draw.
+OpenDC currently supports the following power models:
+1. **Constant**: The power draw is constant and does not depend on the utilization of the host.
+2. **Sqrt**: The power draw interpolates between idle and max using a square root function.
+3. **Linear**: The power draw interpolates between idle and max using a linear function.
+4. **Square**: The power draw interpolates between idle and max using a square function.
+5. **Cubic**The power draw interpolates between idle and max using a cubic function.
+
+The power model is defined using the following parameters:
+
+| variable | type | Unit | required? | default | description |
+|-----------|--------|------|-----------|---------|--------------------------------------------------------------------|
+| modelType | string | N/A | yes | N/A | The type of model used to determine power draw |
+| power | double | Mhz | no | 400 | The power draw of a host when using the constant power draw model. |
+| idlePower | double | Mhz | yes | N/A | The power draw of a host when idle in Watt. |
+| maxPower | double | Mhz | yes | N/A | The power draw of a host when using max capacity in Watt. |
+
+
+## Example
+
+```json
+{
+ "modelType": "sqrt",
+ "idlePower": 32.0,
+ "maxPower": 180.0
+}
+```
+
+This creates a power model that uses a square root function to determine the power draw of a host.
+The model uses an idle and max power of 32 W and 180 W respectively.
diff --git a/site/docs/documentation/Input/Topology/PowerSource.md b/site/docs/documentation/Input/Topology/PowerSource.md
new file mode 100644
index 00000000..993083dd
--- /dev/null
+++ b/site/docs/documentation/Input/Topology/PowerSource.md
@@ -0,0 +1,20 @@
+Each cluster has a power source that provides power to the hosts in the cluster.
+A user can connect a power source to a carbon trace to determine the carbon emissions during a workload.
+
+The power source consist of the following components:
+
+| variable | type | Unit | required? | default | description |
+|-----------------|--------------|------|-----------|----------------|-----------------------------------------------------------------------------------|
+| name | string | N/A | no | PowerSource | The name of the cluster. This is only important for debugging and post-processing |
+| maxPower | integer | Watt | no | Long.Max_Value | The total power that the power source can provide in Watt. |
+| carbonTracePath | path/to/file | N/A | no | null | A list of the hosts in a cluster. |
+
+## Example
+
+```json
+{
+ "carbonTracePath": "carbon_traces/AT_2021-2024.parquet"
+}
+```
+
+This example creates a power source with infinite power draw that uses the carbon trace from the file `carbon_traces/AT_2021-2024.parquet`.
diff --git a/site/docs/documentation/Input/Topology/Topology.md b/site/docs/documentation/Input/Topology/Topology.md
new file mode 100644
index 00000000..afc94e08
--- /dev/null
+++ b/site/docs/documentation/Input/Topology/Topology.md
@@ -0,0 +1,183 @@
+The topology of a datacenter defines all available hardware. Topologies are defined using a JSON file.
+A topology consist of one or more clusters. Each cluster consist of at least one host on which jobs can be executed.
+Each host consist of one or more CPUs, a memory unit and a power model.
+
+:::info Code
+The code related to reading and processing topology files can be found [here](https://github.com/atlarge-research/opendc/tree/master/opendc-compute/opendc-compute-topology/src/main/kotlin/org/opendc/compute/topology)
+:::
+
+In the following section, we describe the different components of a topology file.
+
+### Cluster
+
+| variable | type | required? | default | description |
+|-------------|---------------------------------------------------------------|-----------|---------|-----------------------------------------------------------------------------------|
+| name | string | no | Cluster | The name of the cluster. This is only important for debugging and post-processing |
+| count | integer | no | 1 | The amount of clusters of this type are in the data center |
+| hosts | List[[Host](/docs/documentation/Input/Topology/Host)] | yes | N/A | A list of the hosts in a cluster. |
+| powerSource | [PowerSource](/docs/documentation/Input/Topology/PowerSource) | no | N/A | The power source used by all hosts connected to this cluster. |
+| battery | [Battery](/docs/documentation/Input/Topology/Battery) | no | null | The battery used by a cluster to store energy. When null, no batteries are used. |
+
+Hosts, power sources and batteries all require objects to use. See their respective pages for more information.
+
+## Examples
+
+In the following section, we discuss several examples of topology files.
+
+### Simple
+
+The simplest data center that can be provided to OpenDC is shown below:
+
+```json
+{
+ "clusters": [
+ {
+ "hosts": [
+ {
+ "cpu":
+ {
+ "coreCount": 16,
+ "coreSpeed": 1000
+ },
+ "memory": {
+ "memorySize": 100000
+ }
+ }
+ ],
+ "powerSource": {
+ "carbonTracePath": "carbon_traces/AT_2021-2024.parquet"
+ }
+ }
+ ]
+}
+```
+
+This creates a data center with a single cluster containing a single host. This host consist of a single 16 core CPU
+with a speed of 1 Ghz, and 100 MiB RAM memory.
+
+### Count
+
+Duplicating clusters, hosts, or CPUs is easy using the "count" keyword:
+
+```json
+{
+ "clusters": [
+ {
+ "count": 2,
+ "hosts": [
+ {
+ "count": 5,
+ "cpu":
+ {
+ "coreCount": 16,
+ "coreSpeed": 1000,
+ "count": 10
+ },
+ "memory":
+ {
+ "memorySize": 100000
+ }
+ }
+ ],
+ "powerSource": {
+ "carbonTracePath": "carbon_traces/AT_2021-2024.parquet"
+ }
+ }
+ ]
+}
+```
+
+This topology creates a datacenter consisting of 2 clusters, both containing 5 hosts. Each host contains 10 16 core
+CPUs.
+Using "count" saves a lot of copying.
+
+### Complex
+
+Following is an example of a more complex topology:
+
+```json
+{
+ "clusters": [
+ {
+ "name": "C01",
+ "count": 2,
+ "hosts": [
+ {
+ "name": "H01",
+ "count": 2,
+ "cpus": [
+ {
+ "coreCount": 16,
+ "coreSpeed": 1000
+ }
+ ],
+ "memory": {
+ "memorySize": 1000000
+ },
+ "powerModel": {
+ "modelType": "linear",
+ "idlePower": 200.0,
+ "maxPower": 400.0
+ }
+ },
+ {
+ "name": "H02",
+ "count": 2,
+ "cpus": [
+ {
+ "coreCount": 8,
+ "coreSpeed": 3000
+ }
+ ],
+ "memory": {
+ "memorySize": 100000
+ },
+ "powerModel": {
+ "modelType": "square",
+ "idlePower": 300.0,
+ "maxPower": 500.0
+ }
+ }
+ ]
+ }
+ ]
+}
+```
+
+This topology defines two types of hosts with different coreCount, and coreSpeed.
+Both types of hosts are created twice.
+
+
+### With Units of Measure
+
+Aside from using number to indicate values it is also possible to define values using strings. This allows the user to define the unit of the input parameter.
+```json
+{
+ "clusters": [
+ {
+ "count": 2,
+ "hosts" :
+ [
+ {
+ "name": "H01",
+ "cpuModel":
+ {
+ "coreCount": 8,
+ "coreSpeed": "3.2 Ghz"
+ },
+ "memory": {
+ "memorySize": "128e3 MiB",
+ "memorySpeed": "1 Mhz"
+ },
+ "powerModel": {
+ "modelType": "linear",
+ "power": "400 Watts",
+ "maxPower": "1 KW",
+ "idlePower": "0.4 W"
+ }
+ }
+ ]
+ }
+ ]
+}
+```
diff --git a/site/docs/documentation/Input/TopologySchema.md b/site/docs/documentation/Input/TopologySchema.md
deleted file mode 100644
index d0199568..00000000
--- a/site/docs/documentation/Input/TopologySchema.md
+++ /dev/null
@@ -1,160 +0,0 @@
-Below is the schema for the Topology JSON file. This schema can be used to validate a topology file.
-A topology file can be validated using a JSON schema validator, such as https://www.jsonschemavalidator.net/.
-
-```json
-{
- "$schema": "OpenDC/Topology",
- "$defs": {
- "cpuModel": {
- "description": "definition of a cpuModel",
- "type": "object",
- "properties": {
- "vendor": {
- "type": "string",
- "default": "unknown"
- },
- "modelName": {
- "type": "string",
- "default": "unknown"
- },
- "arch": {
- "type": "string",
- "default": "unknown"
- },
- "coreCount": {
- "type": "integer"
- },
- "coreSpeed": {
- "description": "The core speed of the cpuModel in Mhz",
- "type": "number"
- },
- "count": {
- "description": "The amount CPUs of this type present in the cluster",
- "type": "integer"
- }
- },
- "required": [
- "coreCount",
- "coreSpeed"
- ]
- },
- "memory": {
- "type": "object",
- "properties": {
- "vendor": {
- "type": "string",
- "default": "unknown"
- },
- "modelName": {
- "type": "string",
- "default": "unknown"
- },
- "arch": {
- "type": "string",
- "default": "unknown"
- },
- "memorySize": {
- "description": "The amount of the memory in B",
- "type": "integer"
- },
- "memorySpeed": {
- "description": "The speed of the memory in Mhz. Note: currently, this does nothing",
- "type": "number",
- "default": -1
- }
- },
- "required": [
- "memorySize"
- ]
- },
- "powerModel": {
- "type": "object",
- "properties": {
- "modelType": {
- "description": "The type of model used to determine power draw",
- "type": "string"
- },
- "power": {
- "description": "The constant power draw when using the 'constant' power model type in Watt",
- "type": "number",
- "default": 400
- },
- "maxPower": {
- "description": "The power draw of a host when idle in Watt",
- "type": "number"
- },
- "idlePower": {
- "description": "The power draw of a host when using max capacity in Watt",
- "type": "number"
- }
- },
- "required": [
- "modelType",
- "maxPower",
- "idlePower"
- ]
- },
- "host": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string",
- "default": "Host"
- },
- "count": {
- "description": "The amount hosts of this type present in the cluster",
- "type": "integer",
- "default": 1
- },
- "cpuModel": {
- "$ref": "#/$defs/cpuModel"
- },
- "memory": {
- "$ref": "#/$defs/memory"
- }
- },
- "required": [
- "cpuModel",
- "memory"
- ]
- },
- "cluster": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string",
- "default": "Cluster"
- },
- "count": {
- "description": "The amount clusters of this type present in the Data center",
- "type": "integer",
- "default": 1
- },
- "hosts": {
- "type": "array",
- "items": {
- "$ref": "#/$defs/host"
- },
- "minItems": 1
- }
- },
- "required": [
- "hosts"
- ]
- }
- },
- "properties": {
- "clusters": {
- "description": "Clusters present in the data center",
- "type": "array",
- "items": {
- "$ref": "#/$defs/cluster"
- },
- "minItems": 1
- }
- },
- "required": [
- "clusters"
- ]
-}
-```
diff --git a/site/docs/documentation/Input/Workload.md b/site/docs/documentation/Input/Workload.md
index b0a45942..73f39e60 100644
--- a/site/docs/documentation/Input/Workload.md
+++ b/site/docs/documentation/Input/Workload.md
@@ -1,24 +1,31 @@
-OpenDC works with two types of traces that describe the tasks that need to be run. Both traces have to be provided as
-parquet files.
+Workloads define what tasks in the simulation, when they were submitted, and their computational requirements.
+Workload are defined using two files:
-#### Task
-The meta trace provides an overview of the tasks:
+- **[Tasks](#tasks)**: The Tasks file contains the metadata of the tasks
+- **[Fragments](#fragments)**: The Fragments file contains the computational demand of each task over time
-| Metric | Datatype | Unit | Summary |
-|-----------------|----------|----------|------------------------------------------------|
-| id | string | | The id of the server |
-| submission_time | int64 | datetime | The submission time of the server |
-| duration | int64 | datetime | The finish time of the submission |
-| cpu_count | int32 | count | The number of CPUs required to run this task |
-| cpu_capacity | float64 | MHz | The amount of CPU required to run this task |
-| mem_capacity | int64 | MB | The amount of memory required to run this task |
+Both files are provided using the parquet format.
-#### Fragment
-The Fragment file provides information about the computational demand of each task over time:
+#### Tasks
+The Tasks file provides an overview of the tasks:
-| Metric | Datatype | Unit | Summary |
-|-----------|------------|---------------|---------------------------------------------|
-| id | string | | The id of the task |
-| duration | int64 | milli seconds | The duration since the last sample |
-| cpu_count | int32 | count | The number of cpus required |
-| cpu_usage | float64 | MHz | The amount of computational power required. |
+| Metric | Required? | Datatype | Unit | Summary |
+|-----------------|-----------|----------|------------------------------|--------------------------------------------------------|
+| id | Yes | string | | The id of the server |
+| submission_time | Yes | int64 | datetime | The submission time of the server |
+| nature | No | string | [deferrable, non-deferrable] | Defines if a task can be delayed |
+| deadline | No | string | datetime | The latest the scheduling of a task can be delayed to. |
+| duration | Yes | int64 | datetime | The finish time of the submission |
+| cpu_count | Yes | int32 | count | The number of CPUs required to run this task |
+| cpu_capacity | Yes | float64 | MHz | The amount of CPU required to run this task |
+| mem_capacity | Yes | int64 | MB | The amount of memory required to run this task |
+
+#### Fragments
+The Fragments file provides information about the computational demand of each task over time:
+
+| Metric | Required? | Datatype | Unit | Summary |
+|-----------|-----------|----------|---------------|---------------------------------------------|
+| id | Yes | string | | The id of the task |
+| duration | Yes | int64 | milli seconds | The duration since the last sample |
+| cpu_count | Yes | int32 | count | The number of cpus required |
+| cpu_usage | Yes | float64 | MHz | The amount of computational power required. |
diff --git a/site/docs/documentation/Input/M3SA.md b/site/docs/documentation/M3SA/M3SA.md
index 6c97d207..6c97d207 100644
--- a/site/docs/documentation/Input/M3SA.md
+++ b/site/docs/documentation/M3SA/M3SA.md
diff --git a/site/docs/documentation/Input/M3SASchema.md b/site/docs/documentation/M3SA/M3SASchema.md
index 5a3503ca..5a3503ca 100644
--- a/site/docs/documentation/Input/M3SASchema.md
+++ b/site/docs/documentation/M3SA/M3SASchema.md
diff --git a/site/docs/documentation/Output.md b/site/docs/documentation/Output.md
index 339ac615..584b0702 100644
--- a/site/docs/documentation/Output.md
+++ b/site/docs/documentation/Output.md
@@ -1,91 +1,114 @@
-Running OpenDC results in three output files. The first file ([Task](#task)) contains metrics related to the jobs being executed.
-The second file ([Host](#host)) contains all metrics related to the hosts on which jobs can be executed. The third file ([Power](#power))
-contains all metrics related to the power sources that power the hosts. Finally, the third file ([Service](#service))
-contains metrics describing the overall performance. An experiment in OpenDC has
+Running OpenDC results in five output files:
+1. [Task](#task) contains metrics related to the jobs being executed.
+2. [Host](#host) contains all metrics related to the hosts on which jobs can be executed.
+3. [Power Source](#power-source) contains all metrics related to the power sources that power the hosts.
+4. [Battery](#battery) contains all metrics related to the batteries that power the hosts.
+5. [Service](#service) contains metrics describing the overall performance.
+
+User can define which files, and features are to be included in the output in the experiment file (see [ExportModel](/docs/documentation/Input/ExportModel.md)).
### Task
The task output file, contains all metrics of related to the tasks that are being executed.
-| Metric | Datatype | Unit | Summary |
-|--------------------|----------|-----------|-------------------------------------------------------------------------------|
-| timestamp | int64 | ms | Timestamp of the sample since the start of the workload |
-| absolute timestamp | int64 | ms | The absolute timestamp based on the given workload |
-| server_id | binary | string | The id of the server determined during runtime |
-| server_name | binary | string | The name of the server provided by the Trace |
-| host_id | binary | string | The id of the host on which the server is hosted or `null` if it has no host. |
-| mem_capacity | int64 | Mb | |
-| cpu_count | int32 | count | |
-| cpu_limit | double | MHz | The capacity of the CPUs of Host on which the server is running. |
-| cpu_time_active | int64 | ms | The duration that a CPU was active in the server. |
-| cpu_time_idle | int64 | ms | The duration that a CPU was idle in the server. |
-| cpu_time_steal | int64 | ms | The duration that a vCPU wanted to run, but no capacity was available. |
-| cpu_time_lost | int64 | ms | The duration of CPU time that was lost due to interference. |
-| uptime | int64 | ms | The uptime of the host since last sample. |
-| downtime | int64 | ms | The downtime of the host since last sample. |
-| provision_time | int64 | ms | The time for which the server was enqueued for the scheduler. |
-| boot_time | int64 | ms | The time a task got booted. |
-| boot_time_absolute | int64 | ms | The absolute time a task got booted. |
-| creation_time | int64 | ms | The time at which the task was created by the ComputeService |
-| finish_time | int64 | ms | The time at which the task was finished (either completed or terminated) |
-| task_state | String | TaskState | The status of the Task |
+| Metric | Datatype | Unit | Summary |
+|--------------------|----------|-----------|-----------------------------------------------------------------------------|
+| timestamp | int64 | ms | Timestamp of the sample since the start of the workload. |
+| timestamp_absolute | int64 | ms | The absolute timestamp based on the given workload. |
+| task_id | binary | string | The id of the task determined during runtime. |
+| task_name | binary | string | The name of the task provided by the Trace. |
+| host_name | binary | string | The id of the host on which the task is hosted or `null` if it has no host. |
+| mem_capacity | int64 | Mb | The memory required by the task. |
+| cpu_count | int32 | count | The number of CPUs required by the task. |
+| cpu_limit | double | MHz | The capacity of the CPUs of Host on which the task is running. |
+| cpu_usage | double | MHz | The cpu capacity provided by the CPU to the task. |
+| cpu_demand | double | MHz | The cpu capacity demanded of the CPU by the task. |
+| cpu_time_active | int64 | ms | The duration that a CPU was active in the task. |
+| cpu_time_idle | int64 | ms | The duration that a CPU was idle in the task. |
+| cpu_time_steal | int64 | ms | The duration that a vCPU wanted to run, but no capacity was available. |
+| cpu_time_lost | int64 | ms | The duration of CPU time that was lost due to interference. |
+| uptime | int64 | ms | The uptime of the host since last sample. |
+| downtime | int64 | ms | The downtime of the host since last sample. |
+| num_failures | int64 | count | How many times was a task interrupted due to machine failure. |
+| num_pauses | int64 | ms | How many times was a task interrupted due to the TaskStopper. |
+| submission_time | int64 | ms | The time for which the task was enqueued for the scheduler. |
+| schedule_time | int64 | ms | The time at which task got booted. |
+| finish_time | int64 | ms | The time at which the task was finished (either completed or terminated). |
+| task_state | String | TaskState | The current state of the Task. |
### Host
-The host output file, contains all metrics of related to the host run.
+The host output file, contains all metrics of related to the hosts that are running.
-| Metric | DataType | Unit | Summary |
-|--------------------|----------|------------|-------------------------------------------------------------------------------------------------|
-| timestamp | int64 | ms | Timestamp of the sample |
-| absolute timestamp | int64 | ms | The absolute timestamp based on the given workload |
-| host_id | binary | string | The id of the host given by OpenDC |
-| cpu_count | int32 | count | The number of available cpuModel cores |
-| mem_capacity | int64 | Mb | The amount of available memory |
-| guests_terminated | int32 | count | The number of guests that are in a terminated state. |
-| guests_running | int32 | count | The number of guests that are in a running state. |
-| guests_error | int32 | count | The number of guests that are in an error state. |
-| guests_invalid | int32 | count | The number of guests that are in an unknown state. |
-| cpu_limit | double | MHz | The capacity of the CPUs in the host. |
-| cpu_usage | double | MHz | The usage of all CPUs in the host. |
-| cpu_demand | double | MHz | The demand of all vCPUs of the guests |
-| cpu_utilization | double | ratio | The CPU utilization of the host. This is calculated by dividing the cpu_usage, by the cpu_limit |
-| cpu_time_active | int64 | ms | The duration that a CPU was active in the host. |
-| cpu_time_idle | int64 | ms | The duration that a CPU was idle in the host. |
-| cpu_time_steal | int64 | ms | The duration that a vCPU wanted to run, but no capacity was available. |
-| cpu_time_lost | int64 | ms | The duration of CPU time that was lost due to interference. |
-| power_draw | double | Watt | The current power draw of the host. |
-| energy_usage | double | Joule (Ws) | The total energy consumption of the host since last sample. |
-| uptime | int64 | ms | The uptime of the host since last sample. |
-| downtime | int64 | ms | The downtime of the host since last sample. |
-| boot_time | int64 | ms | The time a host got booted. |
-| boot_time_absolute | int64 | ms | The absolute time a host got booted. |
+| Metric | DataType | Unit | Summary |
+|--------------------|----------|------------|-----------------------------------------------------------------------------------------------------|
+| timestamp | int64 | ms | Timestamp of the sample. |
+| timestamp_absolute | int64 | ms | The absolute timestamp based on the given workload. |
+| host_name | binary | string | The name of the host. |
+| cluster_name | binary | string | The name of the cluster that this host is part of. |
+| cpu_count | int32 | count | The number of cores in this host. |
+| mem_capacity | int64 | Mb | The amount of available memory. |
+| tasks_terminated | int32 | count | The number of tasks that are in a terminated state. |
+| tasks_running | int32 | count | The number of tasks that are in a running state. |
+| tasks_error | int32 | count | The number of tasks that are in an error state. |
+| tasks_invalid | int32 | count | The number of tasks that are in an unknown state. |
+| cpu_capacity | double | MHz | The total capacity of the CPUs in the host. |
+| cpu_usage | double | MHz | The total CPU capacity provided to all tasks on this host. |
+| cpu_demand | double | MHz | The total CPU capacity demanded by all tasks on this host. |
+| cpu_utilization | double | ratio | The CPU utilization of the host. This is calculated by dividing the cpu_usage, by the cpu_capacity. |
+| cpu_time_active | int64 | ms | The duration that a CPU was active in the host. |
+| cpu_time_idle | int64 | ms | The duration that a CPU was idle in the host. |
+| cpu_time_steal | int64 | ms | The duration that a vCPU wanted to run, but no capacity was available. |
+| cpu_time_lost | int64 | ms | The duration of CPU time that was lost due to interference. |
+| power_draw | double | Watt | The current power draw of the host. |
+| energy_usage | double | Joule (Ws) | The total energy consumption of the host since last sample. |
+| embodied_carbon | double | gram | The total embodied carbon emitted since the last sample. |
+| uptime | int64 | ms | The uptime of the host since last sample. |
+| downtime | int64 | ms | The downtime of the host since last sample. |
+| boot_time | int64 | ms | The time a host got booted. |
+| boot_time_absolute | int64 | ms | The absolute time a host got booted. |
### Power Source
-The host output file, contains all metrics of related to the host run.
+The power source output file, contains all metrics of related to the power sources.
+
+| Metric | DataType | Unit | Summary |
+|--------------------|----------|------------|-------------------------------------------------------------------|
+| timestamp | int64 | ms | Timestamp of the sample. |
+| timestamp_absolute | int64 | ms | The absolute timestamp based on the given workload. |
+| source_name | binary | string | The name of the power source. |
+| cluster_name | binary | string | The name of the cluster that this power source is part of. |
+| power_draw | double | Watt | The current power draw of the host. |
+| energy_usage | double | Joule (Ws) | The total energy consumption of the host since last sample. |
+| carbon_intensity | double | gCO2/kW | The amount of carbon that is emitted when using a unit of energy. |
+| carbon_emission | double | gram | The amount of carbon emitted since the previous sample. |
-| Metric | DataType | Unit | Summary |
-|--------------------|----------|------------|------------------------------------------------------------------------------------------|
-| timestamp | int64 | ms | Timestamp of the sample |
-| absolute timestamp | int64 | ms | The absolute timestamp based on the given workload |
-| hosts_connected | int | Count | The number of hosts connected to the power Source (WARNING: does not work at the moment) |
-| power_draw | double | Watt | The current power draw of the host. |
-| energy_usage | double | Joule (Ws) | The total energy consumption of the host since last sample. |
-| carbon_intensity | double | gCO2/kW | The amount of carbon that is emitted when using a unit of energy |
-| carbon_emission | double | gram | The amount of carbon emitted since the previous sample |
+### Battery
+The host output file, contains all metrics of related batteries.
+| Metric | DataType | Unit | Summary |
+|--------------------|----------|--------------|-------------------------------------------------------------------|
+| timestamp | int64 | ms | Timestamp of the sample. |
+| timestamp_absolute | int64 | ms | The absolute timestamp based on the given workload. |
+| battery_name | binary | string | The name of the battery. |
+| cluster_name | binary | string | The name of the cluster that this battery is part of. |
+| power_draw | double | Watt | The current power draw of the host. |
+| energy_usage | double | Joule (Ws) | The total energy consumption of the host since last sample. |
+| carbon_intensity | double | gCO2/kW | The amount of carbon that is emitted when using a unit of energy. |
+| embodied_carbon | double | gram | The total embodied carbon emitted since the last sample. |
+| charge | double | Joule | The current charge of the battery. |
+| capacity | double | Joule | The total capacity of the battery. |
+| battery_state | String | BatteryState | The current state of the battery. |
### Service
The service output file, contains metrics providing an overview of the performance.
-| Metric | DataType | Unit | Summary |
-|--------------------|----------|-------|------------------------------------------------------------------------|
-| timestamp | int64 | ms | Timestamp of the sample |
-| absolute timestamp | int64 | ms | The absolute timestamp based on the given workload |
-| hosts_up | int32 | count | The number of hosts that are up at this instant. |
-| hosts_down | int32 | count | The number of hosts that are down at this instant. |
-| tasks_total | int32 | count | The number of servers that are currently active. |
-| tasks_pending | int32 | count | The number of servers that are pending to be scheduled. |
-| tasks_active | int32 | count | The number of servers that are currently active. |
-| tasks_terminated | int32 | count | The number of servers that are currently active. |
-| tasks_completed | int32 | count | The number of servers that are currently active. |
-| attempts_success | int32 | count | The scheduling attempts that were successful. |
-| attempts_failure | int32 | count | The scheduling attempts that were unsuccessful due to client error. |
+| Metric | DataType | Unit | Summary |
+|--------------------|----------|-------|-------------------------------------------------------|
+| timestamp | int64 | ms | Timestamp of the sample |
+| timestamp_absolute | int64 | ms | The absolute timestamp based on the given workload |
+| hosts_up | int32 | count | The number of hosts that are up at this instant. |
+| hosts_down | int32 | count | The number of hosts that are down at this instant. |
+| tasks_total | int32 | count | The number of tasks seen by the service. |
+| tasks_pending | int32 | count | The number of tasks that are pending to be scheduled. |
+| tasks_active | int32 | count | The number of tasks that are currently active. |
+| tasks_terminated | int32 | count | The number of tasks that were terminated. |
+| tasks_completed | int32 | count | The number of tasks that finished successfully |
diff --git a/site/docs/getting-started/4-start-using-intellij.md b/site/docs/getting-started/1-start-using-intellij.md
index 6aec91f1..6aec91f1 100644
--- a/site/docs/getting-started/4-start-using-intellij.md
+++ b/site/docs/getting-started/1-start-using-intellij.md
diff --git a/site/docs/getting-started/1-first-experiment.md b/site/docs/getting-started/2-first-experiment.md
index 9c84c435..79fd6424 100644
--- a/site/docs/getting-started/1-first-experiment.md
+++ b/site/docs/getting-started/2-first-experiment.md
@@ -6,17 +6,16 @@ description: Designing a simple experiment
Now that you have downloaded OpenDC, we will start creating a simple experiment.
In this experiment we will compare the performance of a small, and a big data center on the same workload.
-<details>
-<summary>Expand this</summary>
-This is content
-</details>
+[//]: # (:::tip Answer)
-:::tip Answer
-<details>
-<summary>Expand for the Answer</summary>
-</details>
-:::
+[//]: # (<details>)
+
+[//]: # (<summary>Expand for the Answer</summary>)
+
+[//]: # (</details>)
+
+[//]: # (:::)
:::info Learning goal
During this tutorial, we will learn how to create and execute a simple experiment in OpenDC.
@@ -149,7 +148,7 @@ For this experiment, we will use the bitbrains-small workload. This is a small w
spanning over a bit more than a month time. You can download the workload [here](documents/workloads/bitbrains-small.zip "download")
:::info
-For more in depth information about Workloads, see [Workload](../documentation/Input/Workload)
+For more in depth information about Workloads, see [Workload](../documentation/Input/Workload.md)
:::
## Executing an experiment
diff --git a/site/docs/getting-started/3-whats-next.md b/site/docs/getting-started/3-whats-next.md
index 03737629..b7598022 100644
--- a/site/docs/getting-started/3-whats-next.md
+++ b/site/docs/getting-started/3-whats-next.md
@@ -9,4 +9,4 @@ Congratulations! You have just learned how to design and experiment with a (virt
- Follow one of the [tutorials](/docs/category/tutorials) using OpenDC.
- Read about [existing work using OpenDC](/community/research).
- Get involved in the [OpenDC Community](/community/support).
-- If you are interested in contributing to OpenDC you can find a How-To here [4-start-using-intellij](4-start-using-intellij.md), please also read https://github.com/atlarge-research/opendc/blob/master/CONTRIBUTING.md.
+- If you are interested in contributing to OpenDC you can find a How-To here [4-start-using-intellij](1-start-using-intellij.md), please also read https://github.com/atlarge-research/opendc/blob/master/CONTRIBUTING.md.
diff --git a/site/docs/documentation/Input/img.png b/site/static/img/failureModels.png
index 5ad3a85b..5ad3a85b 100644
--- a/site/docs/documentation/Input/img.png
+++ b/site/static/img/failureModels.png
Binary files differ