summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDante Niewenhuis <d.niewenhuis@hotmail.com>2024-12-06 09:21:19 +0100
committerGitHub <noreply@github.com>2024-12-06 09:21:19 +0100
commitb4f694d9083e28f67e1746a37f4761cda6699263 (patch)
tree540a3b54f18c26068010b77c63b0c0f1a47c0c5c
parenta49a3878758438fe8d04bf4c4d3e3ffc5873aace (diff)
Added 9 new tests specifically testing the Multiplexer. This assumes the Multiplexer is using MaxMinFairness given that this is currently the default and only fairness available in OpenDC. (#280)
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/ExperimentTest.kt (renamed from opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/ScenarioIntegrationTest.kt)20
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/MultiplexerTest.kt544
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/ScenarioRunnerTest.kt79
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Experiment/failureTraces/11_failures.parquet (renamed from opendc-experiments/opendc-experiments-base/src/test/resources/failureTraces/11_failures.parquet)bin2786 -> 2786 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Experiment/failureTraces/single_failure.parquet (renamed from opendc-experiments/opendc-experiments-base/src/test/resources/failureTraces/single_failure.parquet)bin2786 -> 2786 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Experiment/topologies/multi.json (renamed from opendc-experiments/opendc-experiments-base/src/test/resources/topologies/multi.json)0
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Experiment/topologies/single.json (renamed from opendc-experiments/opendc-experiments-base/src/test/resources/topologies/single.json)0
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Experiment/traces/bitbrains-small/fragments.parquet (renamed from opendc-experiments/opendc-experiments-base/src/test/resources/traces/bitbrains-small/fragments.parquet)bin717069 -> 717069 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Experiment/traces/bitbrains-small/interference-model.json (renamed from opendc-experiments/opendc-experiments-base/src/test/resources/traces/bitbrains-small/interference-model.json)0
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Experiment/traces/bitbrains-small/tasks.parquet (renamed from opendc-experiments/opendc-experiments-base/src/test/resources/traces/bitbrains-small/tasks.parquet)bin5525 -> 5525 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Experiment/traces/single_task/fragments.parquet (renamed from opendc-experiments/opendc-experiments-base/src/test/resources/traces/single_task/fragments.parquet)bin3012 -> 3012 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Experiment/traces/single_task/tasks.parquet (renamed from opendc-experiments/opendc-experiments-base/src/test/resources/traces/single_task/tasks.parquet)bin4471 -> 4471 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/topologies/single_1_2000.json22
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/topologies/single_2_2000.json22
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_1/fragments.parquetbin0 -> 2684 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_1/tasks.parquetbin0 -> 3919 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_2/fragments.parquetbin0 -> 2684 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_2/tasks.parquetbin0 -> 3919 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_3/fragments.parquetbin0 -> 2684 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_3/tasks.parquetbin0 -> 3919 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_4/fragments.parquetbin0 -> 2684 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_4/tasks.parquetbin0 -> 3919 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_5/fragments.parquetbin0 -> 2689 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_5/tasks.parquetbin0 -> 3924 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_6/fragments.parquetbin0 -> 2689 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_6/tasks.parquetbin0 -> 3924 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_7/fragments.parquetbin0 -> 2689 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_7/tasks.parquetbin0 -> 3940 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_8/fragments.parquetbin0 -> 2697 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_8/tasks.parquetbin0 -> 3940 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_9/fragments.parquetbin0 -> 2709 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_9/tasks.parquetbin0 -> 3924 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/model.json15
-rw-r--r--opendc-simulator/opendc-simulator-flow/src/main/java/org/opendc/simulator/Multiplexer.java19
34 files changed, 610 insertions, 111 deletions
diff --git a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/ScenarioIntegrationTest.kt b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/ExperimentTest.kt
index 10478174..8e9a3ad7 100644
--- a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/ScenarioIntegrationTest.kt
+++ b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/ExperimentTest.kt
@@ -54,7 +54,7 @@ import java.util.Random
/**
* An integration test suite for the Scenario experiments.
*/
-class ScenarioIntegrationTest {
+class ExperimentTest {
/**
* The monitor used to keep track of the metrics.
*/
@@ -70,6 +70,8 @@ class ScenarioIntegrationTest {
*/
private lateinit var workloadLoader: ComputeWorkloadLoader
+ private val basePath = "src/test/resources/Experiment"
+
/**
* Set up the experimental environment.
*/
@@ -81,7 +83,7 @@ class ScenarioIntegrationTest {
filters = listOf(ComputeFilter(), VCpuFilter(16.0), RamFilter(1.0)),
weighers = listOf(CoreRamWeigher(multiplier = 1.0)),
)
- workloadLoader = ComputeWorkloadLoader(File("src/test/resources/traces"), 0L, 0L, 0.0)
+ workloadLoader = ComputeWorkloadLoader(File("$basePath/traces"), 0L, 0L, 0.0)
}
/**
@@ -137,7 +139,7 @@ class ScenarioIntegrationTest {
val monitor = monitor
val failureModelSpec =
TraceBasedFailureModelSpec(
- "src/test/resources/failureTraces/single_failure.parquet",
+ "$basePath/failureTraces/single_failure.parquet",
repeat = false,
)
@@ -183,7 +185,7 @@ class ScenarioIntegrationTest {
val workload = createTestWorkload("single_task", 1.0, seed)
val topology = createTopology("single.json")
val monitor = monitor
- val failureModelSpec = TraceBasedFailureModelSpec("src/test/resources/failureTraces/11_failures.parquet")
+ val failureModelSpec = TraceBasedFailureModelSpec("$basePath/failureTraces/11_failures.parquet")
Provisioner(dispatcher, seed).use { provisioner ->
provisioner.runSteps(
@@ -223,11 +225,11 @@ class ScenarioIntegrationTest {
fun testSingleTaskCheckpoint() =
runSimulation {
val seed = 1L
- workloadLoader = ComputeWorkloadLoader(File("src/test/resources/traces"), 1000000L, 1000L, 1.0)
+ workloadLoader = ComputeWorkloadLoader(File("$basePath/traces"), 1000000L, 1000L, 1.0)
val workload = createTestWorkload("single_task", 1.0, seed)
val topology = createTopology("single.json")
val monitor = monitor
- val failureModelSpec = TraceBasedFailureModelSpec("src/test/resources/failureTraces/11_failures.parquet")
+ val failureModelSpec = TraceBasedFailureModelSpec("$basePath/failureTraces/11_failures.parquet")
Provisioner(dispatcher, seed).use { provisioner ->
provisioner.runSteps(
@@ -341,8 +343,8 @@ class ScenarioIntegrationTest {
{ assertEquals(0, monitor.tasksActive, "All VMs should finish after a run") },
{ assertEquals(0, monitor.attemptsFailure, "No VM should be unscheduled") },
{ assertEquals(0, monitor.tasksPending, "No VM should not be in the queue") },
- { assertEquals(43101787496, monitor.idleTime) { "Incorrect idle time" } },
- { assertEquals(3489412504, monitor.activeTime) { "Incorrect active time" } },
+ { assertEquals(43101787447, monitor.idleTime) { "Incorrect idle time" } },
+ { assertEquals(3489412553, monitor.activeTime) { "Incorrect active time" } },
{ assertEquals(0, monitor.stealTime) { "Incorrect steal time" } },
{ assertEquals(0, monitor.lostTime) { "Incorrect lost time" } },
{ assertEquals(1.0016123392181786E10, monitor.energyUsage, 1E4) { "Incorrect energy usage" } },
@@ -365,7 +367,7 @@ class ScenarioIntegrationTest {
* Obtain the topology factory for the test.
*/
private fun createTopology(name: String): List<ClusterSpec> {
- val stream = checkNotNull(object {}.javaClass.getResourceAsStream("/topologies/$name"))
+ val stream = checkNotNull(object {}.javaClass.getResourceAsStream("/Experiment/topologies/$name"))
return stream.use { clusterTopology(stream) }
}
diff --git a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/MultiplexerTest.kt b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/MultiplexerTest.kt
new file mode 100644
index 00000000..1c0afd7f
--- /dev/null
+++ b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/MultiplexerTest.kt
@@ -0,0 +1,544 @@
+/*
+ * Copyright (c) 2020 AtLarge Research
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package org.opendc.experiments.base
+
+import org.junit.jupiter.api.Assertions.assertEquals
+import org.junit.jupiter.api.BeforeEach
+import org.junit.jupiter.api.Test
+import org.junit.jupiter.api.assertAll
+import org.opendc.compute.simulator.provisioner.Provisioner
+import org.opendc.compute.simulator.provisioner.registerComputeMonitor
+import org.opendc.compute.simulator.provisioner.setupComputeService
+import org.opendc.compute.simulator.provisioner.setupHosts
+import org.opendc.compute.simulator.scheduler.FilterScheduler
+import org.opendc.compute.simulator.scheduler.filters.ComputeFilter
+import org.opendc.compute.simulator.scheduler.filters.RamFilter
+import org.opendc.compute.simulator.scheduler.filters.VCpuFilter
+import org.opendc.compute.simulator.scheduler.weights.CoreRamWeigher
+import org.opendc.compute.simulator.service.ComputeService
+import org.opendc.compute.simulator.telemetry.ComputeMonitor
+import org.opendc.compute.simulator.telemetry.table.HostTableReader
+import org.opendc.compute.simulator.telemetry.table.TaskTableReader
+import org.opendc.compute.topology.clusterTopology
+import org.opendc.compute.topology.specs.ClusterSpec
+import org.opendc.compute.workload.ComputeWorkloadLoader
+import org.opendc.compute.workload.Task
+import org.opendc.compute.workload.sampleByLoad
+import org.opendc.compute.workload.trace
+import org.opendc.experiments.base.runner.replay
+import org.opendc.simulator.kotlin.runSimulation
+import java.io.File
+import java.time.Duration
+import java.util.ArrayList
+import java.util.Random
+
+/**
+ * Testing suite containing tests that specifically test the Multiplexer
+ */
+class MultiplexerTest {
+ /**
+ * The monitor used to keep track of the metrics.
+ */
+ private lateinit var monitor: TestComputeMonitor
+
+ /**
+ * The [FilterScheduler] to use for all experiments.
+ */
+ private lateinit var computeScheduler: FilterScheduler
+
+ /**
+ * The [ComputeWorkloadLoader] responsible for loading the traces.
+ */
+ private lateinit var workloadLoader: ComputeWorkloadLoader
+
+ private val basePath = "src/test/resources/Multiplexer"
+
+ /**
+ * Set up the experimental environment.
+ */
+ @BeforeEach
+ fun setUp() {
+ monitor = TestComputeMonitor()
+ computeScheduler =
+ FilterScheduler(
+ filters = listOf(ComputeFilter(), VCpuFilter(16.0), RamFilter(1.0)),
+ weighers = listOf(CoreRamWeigher(multiplier = 1.0)),
+ )
+ workloadLoader = ComputeWorkloadLoader(File("$basePath/traces"), 0L, 0L, 0.0)
+ }
+
+ /**
+ * Multiplexer test 1: A single fitting task
+ * In this test, a single task is scheduled that should fit the Multiplexer
+ * We check if both the host and the Task show the correct cpu usage and demand during the two fragments.
+ */
+ @Test
+ fun testMultiplexer1() =
+ runSimulation {
+ val seed = 1L
+ val workload = createTestWorkload("multiplexer_test_1", 1.0, seed)
+ val topology = createTopology("single_1_2000.json")
+ val monitor = monitor
+
+ Provisioner(dispatcher, seed).use { provisioner ->
+ provisioner.runSteps(
+ setupComputeService(serviceDomain = "compute.opendc.org", { computeScheduler }),
+ registerComputeMonitor(serviceDomain = "compute.opendc.org", monitor, exportInterval = Duration.ofMinutes(1)),
+ setupHosts(serviceDomain = "compute.opendc.org", topology),
+ )
+
+ val service = provisioner.registry.resolve("compute.opendc.org", ComputeService::class.java)!!
+ service.replay(timeSource, workload, seed = seed)
+ }
+
+ assertAll(
+ { assertEquals(1000.0, monitor.taskCpuDemands["0"]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuDemands["0"]?.get(10)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuSupplied["0"]?.get(1)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied["0"]?.get(10)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.hostCpuDemands[1]) { "The cpu demanded by the host is incorrect" } },
+ { assertEquals(2000.0, monitor.hostCpuDemands[10]) { "The cpu demanded by the host is incorrect" } },
+ { assertEquals(1000.0, monitor.hostCpuSupplied[1]) { "The cpu used by the host is incorrect" } },
+ { assertEquals(2000.0, monitor.hostCpuSupplied[10]) { "The cpu used by the host is incorrect" } },
+ { assertEquals(0.0, monitor.hostCpuDemands.last()) { "When the task is finished, the host should have 0.0 demand" } },
+ { assertEquals(0.0, monitor.hostCpuSupplied.last()) { "When the task is finished, the host should have 0.0 demand" } },
+ )
+ }
+
+ /**
+ * Multiplexer test 2: A single overloaded task
+ * In this test, a single task is scheduled that does not fit the Multiplexer
+ * In this test we expect the usage to be lower than the demand.
+ * We check if both the host and the Task show the correct cpu usage and demand during the two fragments.
+ */
+ @Test
+ fun testMultiplexer2() =
+ runSimulation {
+ val seed = 1L
+ val workload = createTestWorkload("multiplexer_test_2", 1.0, seed)
+ val topology = createTopology("single_1_2000.json")
+ val monitor = monitor
+
+ Provisioner(dispatcher, seed).use { provisioner ->
+ provisioner.runSteps(
+ setupComputeService(serviceDomain = "compute.opendc.org", { computeScheduler }),
+ registerComputeMonitor(serviceDomain = "compute.opendc.org", monitor, exportInterval = Duration.ofMinutes(1)),
+ setupHosts(serviceDomain = "compute.opendc.org", topology),
+ )
+
+ val service = provisioner.registry.resolve("compute.opendc.org", ComputeService::class.java)!!
+ service.replay(timeSource, workload, seed = seed)
+ }
+
+ assertAll(
+ { assertEquals(3000.0, monitor.taskCpuDemands["0"]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(4000.0, monitor.taskCpuDemands["0"]?.get(10)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied["0"]?.get(1)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied["0"]?.get(10)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(3000.0, monitor.hostCpuDemands[1]) { "The cpu demanded by the host is incorrect" } },
+ { assertEquals(4000.0, monitor.hostCpuDemands[10]) { "The cpu demanded by the host is incorrect" } },
+ { assertEquals(2000.0, monitor.hostCpuSupplied[1]) { "The cpu used by the host is incorrect" } },
+ { assertEquals(2000.0, monitor.hostCpuSupplied[10]) { "The cpu used by the host is incorrect" } },
+ { assertEquals(0.0, monitor.hostCpuDemands.last()) { "When the task is finished, the host should have 0.0 demand" } },
+ { assertEquals(0.0, monitor.hostCpuSupplied.last()) { "When the task is finished, the host should have 0.0 demand" } },
+ )
+ }
+
+ /**
+ * Multiplexer test 3: A single task transition fit to overloaded
+ * In this test, a single task is scheduled where the first fragment fits, but the second does not.
+ * For the first fragment, we expect the usage of the second fragment to be lower than the demand
+ * We check if both the host and the Task show the correct cpu usage and demand during the two fragments.
+ */
+ @Test
+ fun testMultiplexer3() =
+ runSimulation {
+ val seed = 1L
+ val workload = createTestWorkload("multiplexer_test_3", 1.0, seed)
+ val topology = createTopology("single_1_2000.json")
+ val monitor = monitor
+
+ Provisioner(dispatcher, seed).use { provisioner ->
+ provisioner.runSteps(
+ setupComputeService(serviceDomain = "compute.opendc.org", { computeScheduler }),
+ registerComputeMonitor(serviceDomain = "compute.opendc.org", monitor, exportInterval = Duration.ofMinutes(1)),
+ setupHosts(serviceDomain = "compute.opendc.org", topology),
+ )
+
+ val service = provisioner.registry.resolve("compute.opendc.org", ComputeService::class.java)!!
+ service.replay(timeSource, workload, seed = seed)
+ }
+
+ assertAll(
+ { assertEquals(1000.0, monitor.taskCpuDemands["0"]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(4000.0, monitor.taskCpuDemands["0"]?.get(10)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuSupplied["0"]?.get(1)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied["0"]?.get(10)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.hostCpuDemands[1]) { "The cpu demanded by the host is incorrect" } },
+ { assertEquals(4000.0, monitor.hostCpuDemands[10]) { "The cpu demanded by the host is incorrect" } },
+ { assertEquals(1000.0, monitor.hostCpuSupplied[1]) { "The cpu used by the host is incorrect" } },
+ { assertEquals(2000.0, monitor.hostCpuSupplied[10]) { "The cpu used by the host is incorrect" } },
+ { assertEquals(0.0, monitor.hostCpuDemands.last()) { "When the task is finished, the host should have 0.0 demand" } },
+ { assertEquals(0.0, monitor.hostCpuSupplied.last()) { "When the task is finished, the host should have 0.0 demand" } },
+ )
+ }
+
+ /**
+ * Multiplexer test 4: A single task transition overload to fit
+ * In this test, a single task is scheduled where the first fragment does not fit, and the second does.
+ * For the first fragment, we expect the usage of the first fragment to be lower than the demand
+ * We check if both the host and the Task show the correct cpu usage and demand during the two fragments.
+ */
+ @Test
+ fun testMultiplexer4() =
+ runSimulation {
+ val seed = 1L
+ val workload = createTestWorkload("multiplexer_test_4", 1.0, seed)
+ val topology = createTopology("single_1_2000.json")
+ val monitor = monitor
+
+ Provisioner(dispatcher, seed).use { provisioner ->
+ provisioner.runSteps(
+ setupComputeService(serviceDomain = "compute.opendc.org", { computeScheduler }),
+ registerComputeMonitor(serviceDomain = "compute.opendc.org", monitor, exportInterval = Duration.ofMinutes(1)),
+ setupHosts(serviceDomain = "compute.opendc.org", topology),
+ )
+
+ val service = provisioner.registry.resolve("compute.opendc.org", ComputeService::class.java)!!
+ service.replay(timeSource, workload, seed = seed)
+ }
+
+ assertAll(
+ { assertEquals(4000.0, monitor.taskCpuDemands["0"]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuDemands["0"]?.get(10)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied["0"]?.get(1)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuSupplied["0"]?.get(10)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(4000.0, monitor.hostCpuDemands[1]) { "The cpu demanded by the host is incorrect" } },
+ { assertEquals(1000.0, monitor.hostCpuDemands[10]) { "The cpu demanded by the host is incorrect" } },
+ { assertEquals(2000.0, monitor.hostCpuSupplied[1]) { "The cpu used by the host is incorrect" } },
+ { assertEquals(1000.0, monitor.hostCpuSupplied[10]) { "The cpu used by the host is incorrect" } },
+ { assertEquals(0.0, monitor.hostCpuDemands.last()) { "When the task is finished, the host should have 0.0 demand" } },
+ { assertEquals(0.0, monitor.hostCpuSupplied.last()) { "When the task is finished, the host should have 0.0 demand" } },
+ )
+ }
+
+ /**
+ * Multiplexer test 5: Two task, same time, both fit
+ * In this test, two tasks are scheduled, and they fit together on the host . The tasks start and finish at the same time
+ * This test shows how the multiplexer handles two tasks that can fit and no redistribution is required.
+ * We check if both the host and the Tasks show the correct cpu usage and demand during the two fragments.
+ */
+ @Test
+ fun testMultiplexer5() =
+ runSimulation {
+ val seed = 1L
+ val workload = createTestWorkload("multiplexer_test_5", 1.0, seed)
+ val topology = createTopology("single_2_2000.json")
+ val monitor = monitor
+
+ Provisioner(dispatcher, seed).use { provisioner ->
+ provisioner.runSteps(
+ setupComputeService(serviceDomain = "compute.opendc.org", { computeScheduler }),
+ registerComputeMonitor(serviceDomain = "compute.opendc.org", monitor, exportInterval = Duration.ofMinutes(1)),
+ setupHosts(serviceDomain = "compute.opendc.org", topology),
+ )
+
+ val service = provisioner.registry.resolve("compute.opendc.org", ComputeService::class.java)!!
+ service.replay(timeSource, workload, seed = seed)
+ }
+
+ assertAll(
+ { assertEquals(1000.0, monitor.taskCpuDemands["0"]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuDemands["0"]?.get(10)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuSupplied["0"]?.get(1)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuSupplied["0"]?.get(10)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuDemands["1"]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuDemands["1"]?.get(10)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuSupplied["1"]?.get(1)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuSupplied["1"]?.get(10)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(4000.0, monitor.hostCpuDemands[1]) { "The cpu demanded by the host is incorrect" } },
+ { assertEquals(4000.0, monitor.hostCpuDemands[10]) { "The cpu demanded by the host is incorrect" } },
+ { assertEquals(4000.0, monitor.hostCpuSupplied[1]) { "The cpu used by the host is incorrect" } },
+ { assertEquals(4000.0, monitor.hostCpuSupplied[10]) { "The cpu used by the host is incorrect" } },
+ { assertEquals(0.0, monitor.hostCpuDemands.last()) { "When the task is finished, the host should have 0.0 demand" } },
+ { assertEquals(0.0, monitor.hostCpuSupplied.last()) { "When the task is finished, the host should have 0.0 demand" } },
+ )
+ }
+
+ /**
+ * Multiplexer test 6: Two task, same time, can not fit
+ * In this test, two tasks are scheduled, and they can not both fit. The tasks start and finish at the same time
+ * This test shows how the multiplexer handles two tasks that both do not fit and redistribution is required.
+ * We check if both the host and the Tasks show the correct cpu usage and demand during the two fragments.
+ */
+ @Test
+ fun testMultiplexer6() =
+ runSimulation {
+ val seed = 1L
+ val workload = createTestWorkload("multiplexer_test_6", 1.0, seed)
+ val topology = createTopology("single_2_2000.json")
+ val monitor = monitor
+
+ Provisioner(dispatcher, seed).use { provisioner ->
+ provisioner.runSteps(
+ setupComputeService(serviceDomain = "compute.opendc.org", { computeScheduler }),
+ registerComputeMonitor(serviceDomain = "compute.opendc.org", monitor, exportInterval = Duration.ofMinutes(1)),
+ setupHosts(serviceDomain = "compute.opendc.org", topology),
+ )
+
+ val service = provisioner.registry.resolve("compute.opendc.org", ComputeService::class.java)!!
+ service.replay(timeSource, workload, seed = seed)
+ }
+
+ println(monitor.taskCpuDemands)
+ println(monitor.hostCpuDemands)
+
+ assertAll(
+ { assertEquals(6000.0, monitor.taskCpuDemands["0"]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(5000.0, monitor.taskCpuDemands["0"]?.get(10)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied["0"]?.get(1)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied["0"]?.get(10)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(5000.0, monitor.taskCpuDemands["1"]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(6000.0, monitor.taskCpuDemands["1"]?.get(10)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied["1"]?.get(1)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied["1"]?.get(10)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(11000.0, monitor.hostCpuDemands[1]) { "The cpu demanded by the host is incorrect" } },
+ { assertEquals(11000.0, monitor.hostCpuDemands[10]) { "The cpu demanded by the host is incorrect" } },
+ { assertEquals(4000.0, monitor.hostCpuSupplied[1]) { "The cpu used by the host is incorrect" } },
+ { assertEquals(4000.0, monitor.hostCpuSupplied[10]) { "The cpu used by the host is incorrect" } },
+ { assertEquals(0.0, monitor.hostCpuDemands.last()) { "When the task is finished, the host should have 0.0 demand" } },
+ { assertEquals(0.0, monitor.hostCpuSupplied.last()) { "When the task is finished, the host should have 0.0 demand" } },
+ )
+ }
+
+ /**
+ * Multiplexer test 7: Two task, both fit, second task is delayed
+ * In this test, two tasks are scheduled, the second task is delayed.
+ * We check if both the host and the Tasks show the correct cpu usage and demand during the two fragments.
+ */
+ @Test
+ fun testMultiplexer7() =
+ runSimulation {
+ val seed = 1L
+ val workload = createTestWorkload("multiplexer_test_7", 1.0, seed)
+ val topology = createTopology("single_2_2000.json")
+ val monitor = monitor
+
+ Provisioner(dispatcher, seed).use { provisioner ->
+ provisioner.runSteps(
+ setupComputeService(serviceDomain = "compute.opendc.org", { computeScheduler }),
+ registerComputeMonitor(serviceDomain = "compute.opendc.org", monitor, exportInterval = Duration.ofMinutes(1)),
+ setupHosts(serviceDomain = "compute.opendc.org", topology),
+ )
+
+ val service = provisioner.registry.resolve("compute.opendc.org", ComputeService::class.java)!!
+ service.replay(timeSource, workload, seed = seed)
+ }
+
+ println(monitor.taskCpuDemands)
+ println(monitor.hostCpuDemands)
+
+ assertAll(
+ { assertEquals(1000.0, monitor.taskCpuDemands["0"]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuDemands["0"]?.get(5)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuDemands["0"]?.get(9)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuDemands["0"]?.get(14)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuSupplied["0"]?.get(1)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuSupplied["0"]?.get(5)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied["0"]?.get(9)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied["0"]?.get(14)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuDemands["1"]?.get(1)) { "The cpu demanded by task 1 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuDemands["1"]?.get(6)) { "The cpu demanded by task 1 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied["1"]?.get(1)) { "The cpu used by task 1 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied["1"]?.get(6)) { "The cpu used by task 1 is incorrect" } },
+ { assertEquals(1000.0, monitor.hostCpuDemands[1]) { "The cpu demanded by the host is incorrect" } },
+ { assertEquals(3000.0, monitor.hostCpuDemands[5]) { "The cpu demanded by the host is incorrect" } },
+ { assertEquals(4000.0, monitor.hostCpuDemands[9]) { "The cpu demanded by the host is incorrect" } },
+ { assertEquals(2000.0, monitor.hostCpuDemands[14]) { "The cpu demanded by the host is incorrect" } },
+ { assertEquals(1000.0, monitor.hostCpuSupplied[1]) { "The cpu used by the host is incorrect" } },
+ { assertEquals(3000.0, monitor.hostCpuSupplied[5]) { "The cpu used by the host is incorrect" } },
+ { assertEquals(4000.0, monitor.hostCpuSupplied[9]) { "The cpu used by the host is incorrect" } },
+ { assertEquals(2000.0, monitor.hostCpuSupplied[14]) { "The cpu used by the host is incorrect" } },
+ { assertEquals(0.0, monitor.hostCpuDemands.last()) { "When the task is finished, the host should have 0.0 demand" } },
+ { assertEquals(0.0, monitor.hostCpuSupplied.last()) { "When the task is finished, the host should have 0.0 demand" } },
+ )
+ }
+
+ /**
+ * Multiplexer test 8: Two task, both fit on their own but not together, second task is delayed
+ * In this test, two tasks are scheduled, the second task is delayed.
+ * We check if both the host and the Tasks show the correct cpu usage and demand during the two fragments.
+ * When the second task comes in, the host is overloaded.
+ * This test shows how the multiplexer can handle redistribution when a new task comes in.
+ * We check if both the host and the Tasks show the correct cpu usage and demand during the two fragments.
+ */
+ @Test
+ fun testMultiplexer8() =
+ runSimulation {
+ val seed = 1L
+ val workload = createTestWorkload("multiplexer_test_8", 1.0, seed)
+ val topology = createTopology("single_2_2000.json")
+ val monitor = monitor
+
+ Provisioner(dispatcher, seed).use { provisioner ->
+ provisioner.runSteps(
+ setupComputeService(serviceDomain = "compute.opendc.org", { computeScheduler }),
+ registerComputeMonitor(serviceDomain = "compute.opendc.org", monitor, exportInterval = Duration.ofMinutes(1)),
+ setupHosts(serviceDomain = "compute.opendc.org", topology),
+ )
+
+ val service = provisioner.registry.resolve("compute.opendc.org", ComputeService::class.java)!!
+ service.replay(timeSource, workload, seed = seed)
+ }
+
+ println(monitor.taskCpuDemands)
+ println(monitor.hostCpuDemands)
+
+ assertAll(
+ { assertEquals(3000.0, monitor.taskCpuDemands["0"]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuDemands["0"]?.get(5)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuDemands["0"]?.get(9)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuDemands["0"]?.get(14)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuSupplied["0"]?.get(1)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(2500.0, monitor.taskCpuSupplied["0"]?.get(5)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(2500.0, monitor.taskCpuSupplied["0"]?.get(9)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuSupplied["0"]?.get(14)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(1500.0, monitor.taskCpuDemands["1"]?.get(1)) { "The cpu demanded by task 1 is incorrect" } },
+ { assertEquals(1500.0, monitor.taskCpuDemands["1"]?.get(6)) { "The cpu demanded by task 1 is incorrect" } },
+ { assertEquals(1500.0, monitor.taskCpuSupplied["1"]?.get(1)) { "The cpu used by task 1 is incorrect" } },
+ { assertEquals(1500.0, monitor.taskCpuSupplied["1"]?.get(6)) { "The cpu used by task 1 is incorrect" } },
+ { assertEquals(3000.0, monitor.hostCpuDemands[1]) { "The cpu demanded by the host is incorrect" } },
+ { assertEquals(4500.0, monitor.hostCpuDemands[5]) { "The cpu demanded by the host is incorrect" } },
+ { assertEquals(3000.0, monitor.hostCpuDemands[14]) { "The cpu demanded by the host is incorrect" } },
+ { assertEquals(3000.0, monitor.hostCpuSupplied[1]) { "The cpu used by the host is incorrect" } },
+ { assertEquals(4000.0, monitor.hostCpuSupplied[5]) { "The cpu used by the host is incorrect" } },
+ { assertEquals(3000.0, monitor.hostCpuSupplied[14]) { "The cpu used by the host is incorrect" } },
+ { assertEquals(0.0, monitor.hostCpuDemands.last()) { "When the task is finished, the host should have 0.0 demand" } },
+ { assertEquals(0.0, monitor.hostCpuSupplied.last()) { "When the task is finished, the host should have 0.0 demand" } },
+ )
+ }
+
+ /**
+ * Multiplexer test 9: Two task, one changes demand, causing overload
+ * In this test, two tasks are scheduled, and they can both fit.
+ * However, task 0 increases its demand which overloads the multiplexer.
+ * This test shows how the multiplexer handles transition from fitting to overloading when multiple tasks are running.
+ * We check if both the host and the Tasks show the correct cpu usage and demand during the two fragments.
+ */
+ @Test
+ fun testMultiplexer9() =
+ runSimulation {
+ val seed = 1L
+ val workload = createTestWorkload("multiplexer_test_9", 1.0, seed)
+ val topology = createTopology("single_2_2000.json")
+ val monitor = monitor
+
+ Provisioner(dispatcher, seed).use { provisioner ->
+ provisioner.runSteps(
+ setupComputeService(serviceDomain = "compute.opendc.org", { computeScheduler }),
+ registerComputeMonitor(serviceDomain = "compute.opendc.org", monitor, exportInterval = Duration.ofMinutes(1)),
+ setupHosts(serviceDomain = "compute.opendc.org", topology),
+ )
+
+ val service = provisioner.registry.resolve("compute.opendc.org", ComputeService::class.java)!!
+ service.replay(timeSource, workload, seed = seed)
+ }
+
+ println(monitor.taskCpuDemands)
+ println(monitor.hostCpuDemands)
+
+ assertAll(
+ { assertEquals(1000.0, monitor.taskCpuDemands["0"]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1500.0, monitor.taskCpuDemands["0"]?.get(5)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(2500.0, monitor.taskCpuDemands["0"]?.get(9)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuDemands["0"]?.get(14)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuSupplied["0"]?.get(1)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(1500.0, monitor.taskCpuSupplied["0"]?.get(5)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied["0"]?.get(9)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuSupplied["0"]?.get(14)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuDemands["1"]?.get(1)) { "The cpu demanded by task 1 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuDemands["1"]?.get(5)) { "The cpu demanded by task 1 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuDemands["1"]?.get(9)) { "The cpu demanded by task 1 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuDemands["1"]?.get(14)) { "The cpu demanded by task 1 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuSupplied["1"]?.get(1)) { "The cpu used by task 1 is incorrect" } },
+ { assertEquals(2500.0, monitor.taskCpuSupplied["1"]?.get(5)) { "The cpu used by task 1 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied["1"]?.get(9)) { "The cpu used by task 1 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuSupplied["1"]?.get(14)) { "The cpu used by task 1 is incorrect" } },
+ { assertEquals(4000.0, monitor.hostCpuDemands[1]) { "The cpu demanded by the host is incorrect" } },
+ { assertEquals(4500.0, monitor.hostCpuDemands[5]) { "The cpu demanded by the host is incorrect" } },
+ { assertEquals(5500.0, monitor.hostCpuDemands[9]) { "The cpu demanded by the host is incorrect" } },
+ { assertEquals(4000.0, monitor.hostCpuDemands[14]) { "The cpu demanded by the host is incorrect" } },
+ { assertEquals(4000.0, monitor.hostCpuSupplied[1]) { "The cpu used by the host is incorrect" } },
+ { assertEquals(4000.0, monitor.hostCpuSupplied[5]) { "The cpu used by the host is incorrect" } },
+ { assertEquals(4000.0, monitor.hostCpuSupplied[9]) { "The cpu used by the host is incorrect" } },
+ { assertEquals(4000.0, monitor.hostCpuSupplied[14]) { "The cpu used by the host is incorrect" } },
+ { assertEquals(0.0, monitor.hostCpuDemands.last()) { "When the task is finished, the host should have 0.0 demand" } },
+ { assertEquals(0.0, monitor.hostCpuSupplied.last()) { "When the task is finished, the host should have 0.0 demand" } },
+ )
+ }
+
+ /**
+ * Obtain the trace reader for the test.
+ */
+ private fun createTestWorkload(
+ traceName: String,
+ fraction: Double,
+ seed: Long,
+ ): List<Task> {
+ val source = trace(traceName).sampleByLoad(fraction)
+ return source.resolve(workloadLoader, Random(seed))
+ }
+
+ /**
+ * Obtain the topology factory for the test.
+ */
+ private fun createTopology(name: String): List<ClusterSpec> {
+ val stream = checkNotNull(object {}.javaClass.getResourceAsStream("/Multiplexer/topologies/$name"))
+ return stream.use { clusterTopology(stream) }
+ }
+
+ class TestComputeMonitor : ComputeMonitor {
+ var hostCpuDemands = ArrayList<Double>()
+ var hostCpuSupplied = ArrayList<Double>()
+
+ override fun record(reader: HostTableReader) {
+ hostCpuDemands.add(reader.cpuDemand)
+ hostCpuSupplied.add(reader.cpuUsage)
+ }
+
+ var taskCpuDemands = mutableMapOf<String, ArrayList<Double>>()
+ var taskCpuSupplied = mutableMapOf<String, ArrayList<Double>>()
+
+ override fun record(reader: TaskTableReader) {
+ val taskName: String = reader.taskInfo.name
+
+ if (taskName in taskCpuDemands) {
+ taskCpuDemands[taskName]?.add(reader.cpuDemand)
+ taskCpuSupplied[taskName]?.add(reader.cpuUsage)
+ } else {
+ taskCpuDemands[taskName] = arrayListOf(reader.cpuDemand)
+ taskCpuSupplied[taskName] = arrayListOf(reader.cpuUsage)
+ }
+ }
+ }
+}
diff --git a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/ScenarioRunnerTest.kt b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/ScenarioRunnerTest.kt
deleted file mode 100644
index 0b32b8f6..00000000
--- a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/ScenarioRunnerTest.kt
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (c) 2022 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.experiments.base
-
-import java.io.File
-
-/**
- * Test suite for [ScenarioRunner].
- */
-class ScenarioRunnerTest {
- /**
- * The path to the environments.
- */
- private val envPath = File("src/test/resources/env")
-
- /**
- * The path to the traces.
- */
- private val tracePath = File("src/test/resources/trace")
-
- /**
- * Smoke test with output.
- * fixme: Fix failures and enable
- *
- fun testSmoke() {
- val outputPath = Files.createTempDirectory("output").toFile()
-
- try {
- val runner = ScenarioRunner(envPath, tracePath, outputPath)
- val scenario = Scenario(
- Topology("topology"),
- Workload("bitbrains-small", trace("bitbrains-small")),
- OperationalPhenomena(failureFrequency = 24.0 * 7, hasInterference = true),
- "active-tasks"
- )
-
- assertDoesNotThrow { runner.runScenario(scenario, seed = 0L) }
- } finally {
- outputPath.delete()
- }
- }
-
- /**
- * Smoke test without output.
- * fixme: Fix failures and enable
- */
- fun testSmokeNoOutput() {
- val runner = ScenarioRunner(envPath, tracePath, null)
- val scenario = Scenario(
- Topology("topology"),
- Workload("bitbrains-small", trace("bitbrains-small")),
- OperationalPhenomena(failureFrequency = 24.0 * 7, hasInterference = true),
- "active-tasks"
- )
-
- assertDoesNotThrow { runner.runScenario(scenario, seed = 0L) }
- }
- */
-}
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/failureTraces/11_failures.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/Experiment/failureTraces/11_failures.parquet
index dbd93acb..dbd93acb 100644
--- a/opendc-experiments/opendc-experiments-base/src/test/resources/failureTraces/11_failures.parquet
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Experiment/failureTraces/11_failures.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/failureTraces/single_failure.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/Experiment/failureTraces/single_failure.parquet
index d1f8b853..d1f8b853 100644
--- a/opendc-experiments/opendc-experiments-base/src/test/resources/failureTraces/single_failure.parquet
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Experiment/failureTraces/single_failure.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/topologies/multi.json b/opendc-experiments/opendc-experiments-base/src/test/resources/Experiment/topologies/multi.json
index c3a060cc..c3a060cc 100644
--- a/opendc-experiments/opendc-experiments-base/src/test/resources/topologies/multi.json
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Experiment/topologies/multi.json
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/topologies/single.json b/opendc-experiments/opendc-experiments-base/src/test/resources/Experiment/topologies/single.json
index de66bfc2..de66bfc2 100644
--- a/opendc-experiments/opendc-experiments-base/src/test/resources/topologies/single.json
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Experiment/topologies/single.json
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/traces/bitbrains-small/fragments.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/Experiment/traces/bitbrains-small/fragments.parquet
index 240f58e3..240f58e3 100644
--- a/opendc-experiments/opendc-experiments-base/src/test/resources/traces/bitbrains-small/fragments.parquet
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Experiment/traces/bitbrains-small/fragments.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/traces/bitbrains-small/interference-model.json b/opendc-experiments/opendc-experiments-base/src/test/resources/Experiment/traces/bitbrains-small/interference-model.json
index 51fc6366..51fc6366 100644
--- a/opendc-experiments/opendc-experiments-base/src/test/resources/traces/bitbrains-small/interference-model.json
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Experiment/traces/bitbrains-small/interference-model.json
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/traces/bitbrains-small/tasks.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/Experiment/traces/bitbrains-small/tasks.parquet
index 8e9dcea7..8e9dcea7 100644
--- a/opendc-experiments/opendc-experiments-base/src/test/resources/traces/bitbrains-small/tasks.parquet
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Experiment/traces/bitbrains-small/tasks.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/traces/single_task/fragments.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/Experiment/traces/single_task/fragments.parquet
index 94a2d69e..94a2d69e 100644
--- a/opendc-experiments/opendc-experiments-base/src/test/resources/traces/single_task/fragments.parquet
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Experiment/traces/single_task/fragments.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/traces/single_task/tasks.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/Experiment/traces/single_task/tasks.parquet
index 2a7da2eb..2a7da2eb 100644
--- a/opendc-experiments/opendc-experiments-base/src/test/resources/traces/single_task/tasks.parquet
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Experiment/traces/single_task/tasks.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/topologies/single_1_2000.json b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/topologies/single_1_2000.json
new file mode 100644
index 00000000..6790a10f
--- /dev/null
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/topologies/single_1_2000.json
@@ -0,0 +1,22 @@
+{
+ "clusters":
+ [
+ {
+ "name": "C01",
+ "hosts" :
+ [
+ {
+ "name": "H01",
+ "cpu":
+ {
+ "coreCount": 1,
+ "coreSpeed": 2000
+ },
+ "memory": {
+ "memorySize": 140457600000
+ }
+ }
+ ]
+ }
+ ]
+}
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/topologies/single_2_2000.json b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/topologies/single_2_2000.json
new file mode 100644
index 00000000..4bab620a
--- /dev/null
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/topologies/single_2_2000.json
@@ -0,0 +1,22 @@
+{
+ "clusters":
+ [
+ {
+ "name": "C01",
+ "hosts" :
+ [
+ {
+ "name": "H01",
+ "cpu":
+ {
+ "coreCount": 2,
+ "coreSpeed": 2000
+ },
+ "memory": {
+ "memorySize": 140457600000
+ }
+ }
+ ]
+ }
+ ]
+}
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_1/fragments.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_1/fragments.parquet
new file mode 100644
index 00000000..5d222b5c
--- /dev/null
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_1/fragments.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_1/tasks.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_1/tasks.parquet
new file mode 100644
index 00000000..fa881645
--- /dev/null
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_1/tasks.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_2/fragments.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_2/fragments.parquet
new file mode 100644
index 00000000..9fcf78f2
--- /dev/null
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_2/fragments.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_2/tasks.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_2/tasks.parquet
new file mode 100644
index 00000000..fa881645
--- /dev/null
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_2/tasks.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_3/fragments.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_3/fragments.parquet
new file mode 100644
index 00000000..238bad8f
--- /dev/null
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_3/fragments.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_3/tasks.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_3/tasks.parquet
new file mode 100644
index 00000000..fa881645
--- /dev/null
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_3/tasks.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_4/fragments.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_4/fragments.parquet
new file mode 100644
index 00000000..3e4bcc2a
--- /dev/null
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_4/fragments.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_4/tasks.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_4/tasks.parquet
new file mode 100644
index 00000000..fa881645
--- /dev/null
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_4/tasks.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_5/fragments.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_5/fragments.parquet
new file mode 100644
index 00000000..e0a76334
--- /dev/null
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_5/fragments.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_5/tasks.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_5/tasks.parquet
new file mode 100644
index 00000000..0982b0f7
--- /dev/null
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_5/tasks.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_6/fragments.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_6/fragments.parquet
new file mode 100644
index 00000000..84d982da
--- /dev/null
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_6/fragments.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_6/tasks.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_6/tasks.parquet
new file mode 100644
index 00000000..0982b0f7
--- /dev/null
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_6/tasks.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_7/fragments.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_7/fragments.parquet
new file mode 100644
index 00000000..0cc276ef
--- /dev/null
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_7/fragments.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_7/tasks.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_7/tasks.parquet
new file mode 100644
index 00000000..efd72165
--- /dev/null
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_7/tasks.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_8/fragments.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_8/fragments.parquet
new file mode 100644
index 00000000..eaa964e3
--- /dev/null
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_8/fragments.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_8/tasks.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_8/tasks.parquet
new file mode 100644
index 00000000..efd72165
--- /dev/null
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_8/tasks.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_9/fragments.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_9/fragments.parquet
new file mode 100644
index 00000000..d5e7e0ae
--- /dev/null
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_9/fragments.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_9/tasks.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_9/tasks.parquet
new file mode 100644
index 00000000..0982b0f7
--- /dev/null
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/Multiplexer/traces/multiplexer_test_9/tasks.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/model.json b/opendc-experiments/opendc-experiments-base/src/test/resources/model.json
deleted file mode 100644
index 91e2657f..00000000
--- a/opendc-experiments/opendc-experiments-base/src/test/resources/model.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "version": "1.0",
- "defaultSchema": "trace",
- "schemas": [
- {
- "name": "trace",
- "type": "custom",
- "factory": "org.opendc.trace.calcite.TraceSchemaFactory",
- "operand": {
- "path": "trace",
- "format": "opendc-vm"
- }
- }
- ]
-}
diff --git a/opendc-simulator/opendc-simulator-flow/src/main/java/org/opendc/simulator/Multiplexer.java b/opendc-simulator/opendc-simulator-flow/src/main/java/org/opendc/simulator/Multiplexer.java
index ece90c20..48177412 100644
--- a/opendc-simulator/opendc-simulator-flow/src/main/java/org/opendc/simulator/Multiplexer.java
+++ b/opendc-simulator/opendc-simulator-flow/src/main/java/org/opendc/simulator/Multiplexer.java
@@ -40,7 +40,7 @@ public class Multiplexer extends FlowNode implements FlowSupplier, FlowConsumer
private double totalDemand; // The total demand of all the consumers
private double totalSupply; // The total supply from the supplier
- private boolean overProvisioned = false;
+ private boolean overLoaded = false;
private int currentConsumerIdx = -1;
private double capacity; // What is the max capacity
@@ -68,11 +68,10 @@ public class Multiplexer extends FlowNode implements FlowSupplier, FlowConsumer
private void distributeSupply() {
// if supply >= demand -> push supplies to all tasks
- // TODO: possible optimization -> Only has to be done for the specific consumer that changed demand
- if (this.totalSupply >= this.totalDemand) {
+ if (this.totalSupply > this.totalDemand) {
// If this came from a state of over provisioning, provide all consumers with their demand
- if (this.overProvisioned) {
+ if (this.overLoaded) {
for (int idx = 0; idx < this.consumerEdges.size(); idx++) {
this.pushSupply(this.consumerEdges.get(idx), this.demands.get(idx));
}
@@ -84,12 +83,12 @@ public class Multiplexer extends FlowNode implements FlowSupplier, FlowConsumer
this.currentConsumerIdx = -1;
}
- this.overProvisioned = false;
+ this.overLoaded = false;
}
// if supply < demand -> distribute the supply over all consumers
else {
- this.overProvisioned = true;
+ this.overLoaded = true;
double[] supplies = redistributeSupply(this.demands, this.totalSupply);
for (int idx = 0; idx < this.consumerEdges.size(); idx++) {
@@ -178,6 +177,10 @@ public class Multiplexer extends FlowNode implements FlowSupplier, FlowConsumer
this.currentConsumerIdx = -1;
+ if (this.overLoaded) {
+ this.distributeSupply();
+ }
+
this.pushDemand(this.supplierEdge, this.totalDemand);
}
@@ -205,7 +208,7 @@ public class Multiplexer extends FlowNode implements FlowSupplier, FlowConsumer
demands.set(idx, newDemand);
this.totalDemand += (newDemand - prevDemand);
- if (overProvisioned) {
+ if (overLoaded) {
distributeSupply();
}
@@ -216,7 +219,7 @@ public class Multiplexer extends FlowNode implements FlowSupplier, FlowConsumer
@Override
public void handleSupply(FlowEdge supplierEdge, double newSupply) {
- this.totalSupply = newSupply; // Currently this is from a single supply, might turn into multiple suppliers
+ this.totalSupply = newSupply;
this.distributeSupply();
}