summaryrefslogtreecommitdiff
path: root/opendc-compute/opendc-compute-workload
diff options
context:
space:
mode:
authorFabian Mastenbroek <mail.fabianm@gmail.com>2021-10-25 20:57:51 +0200
committerGitHub <noreply@github.com>2021-10-25 20:57:51 +0200
commita8e2d460a3b6803845687585ae0b34e67a9445a3 (patch)
tree6249023f8f0d56392400c7ebb72238ee848f740a /opendc-compute/opendc-compute-workload
parentb4bf7268cbb6d22d3966f469a6b7721b04d91907 (diff)
parent86c65e875b7dde8872dc81a37aa9dca72eee7782 (diff)
merge: Improve the OpenDC compute model (#37)
This pull request contains various improvements to the OpenDC compute simulation model. - Support filtering hosts based on CPU capacity - Do not allocate lambda in fast-path - Redesign VM interference algorithm - Report provisioning time of virtual machines - Prevent allocations during collection cycle - Use correct flow input capacity for counters - Support running workloads without coroutines **Breaking API Changes** - `VirtualMachine` now requires `cpuCapacity` parameter. - `VmInterferenceModel` needs to be constructed using `VmInterferenceModel.Builder` and can't be passed a list of groups anymore. - Scheduling latency is not collected anymore. Instead, use the boot time and provisioning time to derive the scheduling latency. - Telemetry data is recorded using `*TableReader` interfaces as opposed to the `*Data` classes. These classes are re-used per row and should not be shared with other threads, since the underlying data may change. - `SimMachine` does not implement `AutoCloseable` anymore. Machines can be removed from a `SimHypervisor` using the `removeMachine` method. - `SimMachine.run` is moved to an extension method called `runWorkload`. Users can now also choose to run a workload using the asynchronous `SimMachine.startWorkload`.
Diffstat (limited to 'opendc-compute/opendc-compute-workload')
-rw-r--r--opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/ComputeWorkloadLoader.kt13
-rw-r--r--opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/ComputeWorkloadRunner.kt3
-rw-r--r--opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/VirtualMachine.kt4
-rw-r--r--opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetComputeMetricExporter.kt18
-rw-r--r--opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetDataWriter.kt23
-rw-r--r--opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetHostDataWriter.kt8
-rw-r--r--opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetServerDataWriter.kt15
-rw-r--r--opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetServiceDataWriter.kt8
-rw-r--r--opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/util/PerformanceInterferenceReader.kt68
-rw-r--r--opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/util/VmInterferenceModelReader.kt128
-rw-r--r--opendc-compute/opendc-compute-workload/src/test/kotlin/org/opendc/compute/workload/util/VmInterferenceModelReaderTest.kt (renamed from opendc-compute/opendc-compute-workload/src/test/kotlin/org/opendc/compute/workload/util/PerformanceInterferenceReaderTest.kt)18
11 files changed, 178 insertions, 128 deletions
diff --git a/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/ComputeWorkloadLoader.kt b/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/ComputeWorkloadLoader.kt
index 1a6624f7..f23becda 100644
--- a/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/ComputeWorkloadLoader.kt
+++ b/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/ComputeWorkloadLoader.kt
@@ -92,7 +92,8 @@ public class ComputeWorkloadLoader(private val baseDir: File) {
val idCol = reader.resolve(RESOURCE_ID)
val startTimeCol = reader.resolve(RESOURCE_START_TIME)
val stopTimeCol = reader.resolve(RESOURCE_STOP_TIME)
- val coresCol = reader.resolve(RESOURCE_CPU_COUNT)
+ val cpuCountCol = reader.resolve(RESOURCE_CPU_COUNT)
+ val cpuCapacityCol = reader.resolve(RESOURCE_CPU_CAPACITY)
val memCol = reader.resolve(RESOURCE_MEM_CAPACITY)
var counter = 0
@@ -108,8 +109,9 @@ public class ComputeWorkloadLoader(private val baseDir: File) {
val submissionTime = reader.get(startTimeCol) as Instant
val endTime = reader.get(stopTimeCol) as Instant
- val maxCores = reader.getInt(coresCol)
- val requiredMemory = reader.getDouble(memCol) / 1000.0 // Convert from KB to MB
+ val cpuCount = reader.getInt(cpuCountCol)
+ val cpuCapacity = reader.getDouble(cpuCapacityCol)
+ val memCapacity = reader.getDouble(memCol) / 1000.0 // Convert from KB to MB
val uid = UUID.nameUUIDFromBytes("$id-${counter++}".toByteArray())
val builder = fragments.getValue(id)
@@ -119,8 +121,9 @@ public class ComputeWorkloadLoader(private val baseDir: File) {
VirtualMachine(
uid,
id,
- maxCores,
- requiredMemory.roundToLong(),
+ cpuCount,
+ cpuCapacity,
+ memCapacity.roundToLong(),
totalLoad,
submissionTime,
endTime,
diff --git a/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/ComputeWorkloadRunner.kt b/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/ComputeWorkloadRunner.kt
index 283f82fe..90ee56cb 100644
--- a/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/ComputeWorkloadRunner.kt
+++ b/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/ComputeWorkloadRunner.kt
@@ -128,7 +128,8 @@ public class ComputeWorkloadRunner(
client.newFlavor(
entry.name,
entry.cpuCount,
- entry.memCapacity
+ entry.memCapacity,
+ meta = if (entry.cpuCapacity > 0.0) mapOf("cpu-capacity" to entry.cpuCapacity) else emptyMap()
),
meta = mapOf("workload" to workload)
)
diff --git a/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/VirtualMachine.kt b/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/VirtualMachine.kt
index 5dd239f6..88e80719 100644
--- a/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/VirtualMachine.kt
+++ b/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/VirtualMachine.kt
@@ -31,8 +31,9 @@ import java.util.*
*
* @param uid The unique identifier of the virtual machine.
* @param name The name of the virtual machine.
+ * @param cpuCapacity The required CPU capacity for the VM in MHz.
* @param cpuCount The number of vCPUs in the VM.
- * @param memCapacity The provisioned memory for the VM.
+ * @param memCapacity The provisioned memory for the VM in MB.
* @param startTime The start time of the VM.
* @param stopTime The stop time of the VM.
* @param trace The trace that belong to this VM.
@@ -41,6 +42,7 @@ public data class VirtualMachine(
val uid: UUID,
val name: String,
val cpuCount: Int,
+ val cpuCapacity: Double,
val memCapacity: Long,
val totalLoad: Double,
val startTime: Instant,
diff --git a/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetComputeMetricExporter.kt b/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetComputeMetricExporter.kt
index ad182d67..a46885f4 100644
--- a/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetComputeMetricExporter.kt
+++ b/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetComputeMetricExporter.kt
@@ -25,9 +25,9 @@ package org.opendc.compute.workload.export.parquet
import io.opentelemetry.sdk.common.CompletableResultCode
import org.opendc.telemetry.compute.ComputeMetricExporter
import org.opendc.telemetry.compute.ComputeMonitor
-import org.opendc.telemetry.compute.table.HostData
-import org.opendc.telemetry.compute.table.ServerData
-import org.opendc.telemetry.compute.table.ServiceData
+import org.opendc.telemetry.compute.table.HostTableReader
+import org.opendc.telemetry.compute.table.ServerTableReader
+import org.opendc.telemetry.compute.table.ServiceTableReader
import java.io.File
/**
@@ -49,16 +49,16 @@ public class ParquetComputeMetricExporter(base: File, partition: String, bufferS
bufferSize
)
- override fun record(data: ServerData) {
- serverWriter.write(data)
+ override fun record(reader: ServerTableReader) {
+ serverWriter.write(reader)
}
- override fun record(data: HostData) {
- hostWriter.write(data)
+ override fun record(reader: HostTableReader) {
+ hostWriter.write(reader)
}
- override fun record(data: ServiceData) {
- serviceWriter.write(data)
+ override fun record(reader: ServiceTableReader) {
+ serviceWriter.write(reader)
}
override fun shutdown(): CompletableResultCode {
diff --git a/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetDataWriter.kt b/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetDataWriter.kt
index 4172d729..84387bbc 100644
--- a/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetDataWriter.kt
+++ b/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetDataWriter.kt
@@ -50,9 +50,9 @@ public abstract class ParquetDataWriter<in T>(
private val logger = KotlinLogging.logger {}
/**
- * The queue of commands to process.
+ * The queue of records to process.
*/
- private val queue: BlockingQueue<T> = ArrayBlockingQueue(bufferSize)
+ private val queue: BlockingQueue<GenericData.Record> = ArrayBlockingQueue(bufferSize)
/**
* An exception to be propagated to the actual writer.
@@ -72,20 +72,20 @@ public abstract class ParquetDataWriter<in T>(
}
val queue = queue
- val buf = mutableListOf<T>()
+ val buf = mutableListOf<GenericData.Record>()
var shouldStop = false
try {
while (!shouldStop) {
try {
- process(writer, queue.take())
+ writer.write(queue.take())
} catch (e: InterruptedException) {
shouldStop = true
}
if (queue.drainTo(buf) > 0) {
for (data in buf) {
- process(writer, data)
+ writer.write(data)
}
buf.clear()
}
@@ -119,7 +119,9 @@ public abstract class ParquetDataWriter<in T>(
throw IllegalStateException("Writer thread failed", exception)
}
- queue.put(data)
+ val builder = GenericRecordBuilder(schema)
+ convert(builder, data)
+ queue.put(builder.build())
}
/**
@@ -133,13 +135,4 @@ public abstract class ParquetDataWriter<in T>(
init {
writerThread.start()
}
-
- /**
- * Process the specified [data] to be written to the Parquet file.
- */
- private fun process(writer: ParquetWriter<GenericData.Record>, data: T) {
- val builder = GenericRecordBuilder(schema)
- convert(builder, data)
- writer.write(builder.build())
- }
}
diff --git a/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetHostDataWriter.kt b/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetHostDataWriter.kt
index 98a0739e..2b7cac8f 100644
--- a/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetHostDataWriter.kt
+++ b/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetHostDataWriter.kt
@@ -28,17 +28,17 @@ import org.apache.avro.generic.GenericData
import org.apache.avro.generic.GenericRecordBuilder
import org.apache.parquet.avro.AvroParquetWriter
import org.apache.parquet.hadoop.ParquetWriter
-import org.opendc.telemetry.compute.table.HostData
+import org.opendc.telemetry.compute.table.HostTableReader
import org.opendc.trace.util.parquet.TIMESTAMP_SCHEMA
import org.opendc.trace.util.parquet.UUID_SCHEMA
import org.opendc.trace.util.parquet.optional
import java.io.File
/**
- * A Parquet event writer for [HostData]s.
+ * A Parquet event writer for [HostTableReader]s.
*/
public class ParquetHostDataWriter(path: File, bufferSize: Int) :
- ParquetDataWriter<HostData>(path, SCHEMA, bufferSize) {
+ ParquetDataWriter<HostTableReader>(path, SCHEMA, bufferSize) {
override fun buildWriter(builder: AvroParquetWriter.Builder<GenericData.Record>): ParquetWriter<GenericData.Record> {
return builder
@@ -46,7 +46,7 @@ public class ParquetHostDataWriter(path: File, bufferSize: Int) :
.build()
}
- override fun convert(builder: GenericRecordBuilder, data: HostData) {
+ override fun convert(builder: GenericRecordBuilder, data: HostTableReader) {
builder["timestamp"] = data.timestamp.toEpochMilli()
builder["host_id"] = data.host.id
diff --git a/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetServerDataWriter.kt b/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetServerDataWriter.kt
index 0d11ec23..144b6624 100644
--- a/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetServerDataWriter.kt
+++ b/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetServerDataWriter.kt
@@ -28,17 +28,17 @@ import org.apache.avro.generic.GenericData
import org.apache.avro.generic.GenericRecordBuilder
import org.apache.parquet.avro.AvroParquetWriter
import org.apache.parquet.hadoop.ParquetWriter
-import org.opendc.telemetry.compute.table.ServerData
+import org.opendc.telemetry.compute.table.ServerTableReader
import org.opendc.trace.util.parquet.TIMESTAMP_SCHEMA
import org.opendc.trace.util.parquet.UUID_SCHEMA
import org.opendc.trace.util.parquet.optional
import java.io.File
/**
- * A Parquet event writer for [ServerData]s.
+ * A Parquet event writer for [ServerTableReader]s.
*/
public class ParquetServerDataWriter(path: File, bufferSize: Int) :
- ParquetDataWriter<ServerData>(path, SCHEMA, bufferSize) {
+ ParquetDataWriter<ServerTableReader>(path, SCHEMA, bufferSize) {
override fun buildWriter(builder: AvroParquetWriter.Builder<GenericData.Record>): ParquetWriter<GenericData.Record> {
return builder
@@ -47,7 +47,7 @@ public class ParquetServerDataWriter(path: File, bufferSize: Int) :
.build()
}
- override fun convert(builder: GenericRecordBuilder, data: ServerData) {
+ override fun convert(builder: GenericRecordBuilder, data: ServerTableReader) {
builder["timestamp"] = data.timestamp.toEpochMilli()
builder["server_id"] = data.server.id
@@ -55,9 +55,8 @@ public class ParquetServerDataWriter(path: File, bufferSize: Int) :
builder["uptime"] = data.uptime
builder["downtime"] = data.downtime
- val bootTime = data.bootTime
- builder["boot_time"] = bootTime?.toEpochMilli()
- builder["scheduling_latency"] = data.schedulingLatency
+ builder["boot_time"] = data.bootTime?.toEpochMilli()
+ builder["provision_time"] = data.provisionTime?.toEpochMilli()
builder["cpu_count"] = data.server.cpuCount
builder["cpu_limit"] = data.cpuLimit
@@ -81,8 +80,8 @@ public class ParquetServerDataWriter(path: File, bufferSize: Int) :
.name("host_id").type(UUID_SCHEMA.optional()).noDefault()
.requiredLong("uptime")
.requiredLong("downtime")
+ .name("provision_time").type(TIMESTAMP_SCHEMA.optional()).noDefault()
.name("boot_time").type(TIMESTAMP_SCHEMA.optional()).noDefault()
- .requiredLong("scheduling_latency")
.requiredInt("cpu_count")
.requiredDouble("cpu_limit")
.requiredLong("cpu_time_active")
diff --git a/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetServiceDataWriter.kt b/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetServiceDataWriter.kt
index 47824b29..ec8a2b65 100644
--- a/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetServiceDataWriter.kt
+++ b/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetServiceDataWriter.kt
@@ -25,17 +25,17 @@ package org.opendc.compute.workload.export.parquet
import org.apache.avro.Schema
import org.apache.avro.SchemaBuilder
import org.apache.avro.generic.GenericRecordBuilder
-import org.opendc.telemetry.compute.table.ServiceData
+import org.opendc.telemetry.compute.table.ServiceTableReader
import org.opendc.trace.util.parquet.TIMESTAMP_SCHEMA
import java.io.File
/**
- * A Parquet event writer for [ServiceData]s.
+ * A Parquet event writer for [ServiceTableReader]s.
*/
public class ParquetServiceDataWriter(path: File, bufferSize: Int) :
- ParquetDataWriter<ServiceData>(path, SCHEMA, bufferSize) {
+ ParquetDataWriter<ServiceTableReader>(path, SCHEMA, bufferSize) {
- override fun convert(builder: GenericRecordBuilder, data: ServiceData) {
+ override fun convert(builder: GenericRecordBuilder, data: ServiceTableReader) {
builder["timestamp"] = data.timestamp.toEpochMilli()
builder["hosts_up"] = data.hostsUp
builder["hosts_down"] = data.hostsDown
diff --git a/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/util/PerformanceInterferenceReader.kt b/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/util/PerformanceInterferenceReader.kt
deleted file mode 100644
index 67f9626c..00000000
--- a/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/util/PerformanceInterferenceReader.kt
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (c) 2021 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.compute.workload.util
-
-import com.fasterxml.jackson.annotation.JsonProperty
-import com.fasterxml.jackson.databind.ObjectMapper
-import com.fasterxml.jackson.module.kotlin.jacksonObjectMapper
-import com.fasterxml.jackson.module.kotlin.readValue
-import org.opendc.simulator.compute.kernel.interference.VmInterferenceGroup
-import java.io.File
-import java.io.InputStream
-
-/**
- * A parser for the JSON performance interference setup files used for the TPDS article on Capelin.
- */
-public class PerformanceInterferenceReader {
- /**
- * The [ObjectMapper] to use.
- */
- private val mapper = jacksonObjectMapper()
-
- init {
- mapper.addMixIn(VmInterferenceGroup::class.java, GroupMixin::class.java)
- }
-
- /**
- * Read the performance interface model from [file].
- */
- public fun read(file: File): List<VmInterferenceGroup> {
- return mapper.readValue(file)
- }
-
- /**
- * Read the performance interface model from the input.
- */
- public fun read(input: InputStream): List<VmInterferenceGroup> {
- return mapper.readValue(input)
- }
-
- private data class GroupMixin(
- @JsonProperty("minServerLoad")
- val targetLoad: Double,
- @JsonProperty("performanceScore")
- val score: Double,
- @JsonProperty("vms")
- val members: Set<String>,
- )
-}
diff --git a/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/util/VmInterferenceModelReader.kt b/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/util/VmInterferenceModelReader.kt
new file mode 100644
index 00000000..e0fa8904
--- /dev/null
+++ b/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/util/VmInterferenceModelReader.kt
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2021 AtLarge Research
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package org.opendc.compute.workload.util
+
+import com.fasterxml.jackson.annotation.JsonProperty
+import com.fasterxml.jackson.core.JsonParseException
+import com.fasterxml.jackson.core.JsonParser
+import com.fasterxml.jackson.core.JsonToken
+import com.fasterxml.jackson.databind.ObjectMapper
+import com.fasterxml.jackson.module.kotlin.jacksonObjectMapper
+import org.opendc.simulator.compute.kernel.interference.VmInterferenceModel
+import java.io.File
+import java.io.InputStream
+
+/**
+ * A parser for the JSON performance interference setup files used for the TPDS article on Capelin.
+ */
+public class VmInterferenceModelReader {
+ /**
+ * The [ObjectMapper] to use.
+ */
+ private val mapper = jacksonObjectMapper()
+
+ /**
+ * Read the performance interface model from [file].
+ */
+ public fun read(file: File): VmInterferenceModel {
+ val builder = VmInterferenceModel.builder()
+ val parser = mapper.createParser(file)
+ parseGroups(parser, builder)
+ return builder.build()
+ }
+
+ /**
+ * Read the performance interface model from the input.
+ */
+ public fun read(input: InputStream): VmInterferenceModel {
+ val builder = VmInterferenceModel.builder()
+ val parser = mapper.createParser(input)
+ parseGroups(parser, builder)
+ return builder.build()
+ }
+
+ /**
+ * Parse all groups in an interference JSON file.
+ */
+ private fun parseGroups(parser: JsonParser, builder: VmInterferenceModel.Builder) {
+ parser.nextToken()
+
+ if (!parser.isExpectedStartArrayToken) {
+ throw JsonParseException(parser, "Expected array at start, but got ${parser.currentToken()}")
+ }
+
+ while (parser.nextToken() != JsonToken.END_ARRAY) {
+ parseGroup(parser, builder)
+ }
+ }
+
+ /**
+ * Parse a group an interference JSON file.
+ */
+ private fun parseGroup(parser: JsonParser, builder: VmInterferenceModel.Builder) {
+ var targetLoad = Double.POSITIVE_INFINITY
+ var score = 1.0
+ val members = mutableSetOf<String>()
+
+ if (!parser.isExpectedStartObjectToken) {
+ throw JsonParseException(parser, "Expected object, but got ${parser.currentToken()}")
+ }
+
+ while (parser.nextValue() != JsonToken.END_OBJECT) {
+ when (parser.currentName) {
+ "vms" -> parseGroupMembers(parser, members)
+ "minServerLoad" -> targetLoad = parser.doubleValue
+ "performanceScore" -> score = parser.doubleValue
+ }
+ }
+
+ builder.addGroup(members, targetLoad, score)
+ }
+
+ /**
+ * Parse the members of a group.
+ */
+ private fun parseGroupMembers(parser: JsonParser, members: MutableSet<String>) {
+ if (!parser.isExpectedStartArrayToken) {
+ throw JsonParseException(parser, "Expected array for group members, but got ${parser.currentToken()}")
+ }
+
+ while (parser.nextValue() != JsonToken.END_ARRAY) {
+ if (parser.currentToken() != JsonToken.VALUE_STRING) {
+ throw JsonParseException(parser, "Expected string value for group member")
+ }
+
+ val member = parser.text.removePrefix("vm__workload__").removeSuffix(".txt")
+ members.add(member)
+ }
+ }
+
+ private data class Group(
+ @JsonProperty("minServerLoad")
+ val targetLoad: Double,
+ @JsonProperty("performanceScore")
+ val score: Double,
+ @JsonProperty("vms")
+ val members: Set<String>,
+ )
+}
diff --git a/opendc-compute/opendc-compute-workload/src/test/kotlin/org/opendc/compute/workload/util/PerformanceInterferenceReaderTest.kt b/opendc-compute/opendc-compute-workload/src/test/kotlin/org/opendc/compute/workload/util/VmInterferenceModelReaderTest.kt
index c79f0584..1c3e7149 100644
--- a/opendc-compute/opendc-compute-workload/src/test/kotlin/org/opendc/compute/workload/util/PerformanceInterferenceReaderTest.kt
+++ b/opendc-compute/opendc-compute-workload/src/test/kotlin/org/opendc/compute/workload/util/VmInterferenceModelReaderTest.kt
@@ -22,24 +22,16 @@
package org.opendc.compute.workload.util
-import org.junit.jupiter.api.Assertions.assertEquals
import org.junit.jupiter.api.Test
-import org.junit.jupiter.api.assertAll
+import org.junit.jupiter.api.assertDoesNotThrow
/**
- * Test suite for the [PerformanceInterferenceReader] class.
+ * Test suite for the [VmInterferenceModelReader] class.
*/
-class PerformanceInterferenceReaderTest {
+class VmInterferenceModelReaderTest {
@Test
fun testSmoke() {
- val input = checkNotNull(PerformanceInterferenceReader::class.java.getResourceAsStream("/perf-interference.json"))
- val result = PerformanceInterferenceReader().read(input)
-
- assertAll(
- { assertEquals(2, result.size) },
- { assertEquals(setOf("vm_a", "vm_c", "vm_x", "vm_y"), result[0].members) },
- { assertEquals(0.0, result[0].targetLoad, 0.001) },
- { assertEquals(0.8830158730158756, result[0].score, 0.001) }
- )
+ val input = checkNotNull(VmInterferenceModelReader::class.java.getResourceAsStream("/perf-interference.json"))
+ assertDoesNotThrow { VmInterferenceModelReader().read(input) }
}
}