summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--buildSrc/src/main/kotlin/testing-conventions.gradle.kts26
-rw-r--r--opendc-compute/opendc-compute-api/src/main/kotlin/org/opendc/compute/api/Flavor.kt6
-rw-r--r--opendc-compute/opendc-compute-api/src/main/kotlin/org/opendc/compute/api/Resource.kt9
-rw-r--r--opendc-compute/opendc-compute-carbon/src/main/kotlin/org/opendc/compute/carbon/CarbonTraceLoader.kt17
-rw-r--r--opendc-compute/opendc-compute-simulator/src/main/java/org/opendc/compute/simulator/service/ComputeService.java136
-rw-r--r--opendc-compute/opendc-compute-simulator/src/main/java/org/opendc/compute/simulator/service/ServiceFlavor.java51
-rw-r--r--opendc-compute/opendc-compute-simulator/src/main/java/org/opendc/compute/simulator/service/ServiceImage.java95
-rw-r--r--opendc-compute/opendc-compute-simulator/src/main/java/org/opendc/compute/simulator/service/ServiceTask.java24
-rw-r--r--opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/internal/Guest.kt2
-rw-r--r--opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/provisioner/HostsProvisioningStep.kt5
-rw-r--r--opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/scheduler/filters/DifferentHostFilter.kt5
-rw-r--r--opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/scheduler/filters/SameHostFilter.kt5
-rw-r--r--opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltTaskExportColumns.kt5
-rw-r--r--opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/table/task/TaskInfo.kt2
-rw-r--r--opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/table/task/TaskTableReaderImpl.kt2
-rw-r--r--opendc-compute/opendc-compute-simulator/src/test/kotlin/org/opendc/compute/simulator/scheduler/FilterSchedulerTest.kt9
-rw-r--r--opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/ComputeWorkloadLoader.kt119
-rw-r--r--opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/Task.kt10
-rw-r--r--opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/runner/ScenarioReplayer.kt3
-rw-r--r--opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/runner/ScenarioRunner.kt16
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/BatteryTest.kt32
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/CarbonTest.kt26
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/DistributionPoliciesTest.kt238
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/ExperimentTest.kt83
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/FailuresAndCheckpointingTest.kt54
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/FlowDistributorTest.kt447
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/FragmentScalingTest.kt166
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/GpuTest.kt45
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/SchedulerTest.kt22
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/TestingUtils.kt26
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/VirtualizationOverheadTests.kt90
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/bitbrains-small/fragments.parquetbin717069 -> 716007 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/bitbrains-small/interference-model.json21
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/bitbrains-small/tasks.parquetbin5525 -> 4871 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/single_task/fragments.parquetbin3012 -> 2184 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/single_task/tasks.parquetbin4471 -> 3925 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/small_gpu/fragments.parquetbin20422 -> 19167 bytes
-rw-r--r--opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/small_gpu/tasks.parquetbin5368 -> 5171 bytes
-rw-r--r--opendc-experiments/opendc-experiments-m3sa/src/test/kotlin/org/opendc/experiments/m3sa/M3SARunnerTest.kt4
-rw-r--r--opendc-experiments/opendc-experiments-m3sa/src/test/resources/workloadTraces/experiment1/fragments.parquetbin1647901 -> 1647441 bytes
-rw-r--r--opendc-experiments/opendc-experiments-m3sa/src/test/resources/workloadTraces/experiment1/tasks.parquetbin115791 -> 112083 bytes
-rw-r--r--opendc-simulator/opendc-simulator-compute/src/main/java/org/opendc/simulator/compute/workload/trace/SimTraceWorkload.java10
-rw-r--r--opendc-simulator/opendc-simulator-compute/src/main/java/org/opendc/simulator/compute/workload/trace/TraceFragment.java29
-rw-r--r--opendc-simulator/opendc-simulator-compute/src/main/java/org/opendc/simulator/compute/workload/trace/TraceWorkload.java60
-rw-r--r--opendc-simulator/opendc-simulator-flow/src/main/java/org/opendc/simulator/engine/graph/FlowDistributor.java6
-rw-r--r--opendc-simulator/opendc-simulator-flow/src/main/java/org/opendc/simulator/engine/graph/distributionPolicies/MaxMinFairnessFlowDistributor.java4
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/CarbonColumns.kt (renamed from opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/CarbonIntensityColumns.kt)10
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/FragmentColumns.kt (renamed from opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/InterferenceGroupColumns.kt)19
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/ResourceColumns.kt115
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/ResourceStateColumns.kt103
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/Tables.kt19
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/TaskColumns.kt56
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/azure/AzureResourceStateTableReader.kt219
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/azure/AzureResourceTableReader.kt246
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/azure/AzureTraceFormat.kt147
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/bitbrains/BitbrainsExResourceStateTableReader.kt292
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/bitbrains/BitbrainsExTraceFormat.kt135
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/bitbrains/BitbrainsResourceStateTableReader.kt365
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/bitbrains/BitbrainsResourceTableReader.kt175
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/bitbrains/BitbrainsTraceFormat.kt159
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/CarbonTableReader.kt14
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/CarbonTraceFormat.kt20
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/parquet/CarbonFragment.kt (renamed from opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/parquet/CarbonIntensityFragment.kt)2
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/parquet/CarbonReadSupport.kt (renamed from opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/parquet/CarbonIntensityReadSupport.kt)41
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/parquet/CarbonRecordMaterializer.kt (renamed from opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/parquet/CarbonIntensityRecordMaterializer.kt)8
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/parquet/CarbonSchemas.kt43
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/failure/parquet/FailureReadSupport.kt30
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/failure/parquet/FailureRecordMaterializer.kt2
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/failure/parquet/FailureSchemas.kt (renamed from opendc-trace/opendc-trace-tools/src/main/kotlin/org/opendc/trace/tools/TraceTools.kt)41
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/gwf/GwfTaskTableReader.kt286
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/gwf/GwfTraceFormat.kt104
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/OdcVmInterferenceJsonTableReader.kt225
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/OdcVmInterferenceJsonTableWriter.kt192
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/OdcVmTraceFormat.kt202
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/parquet/ResourceReadSupport.kt214
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/parquet/ResourceStateReadSupport.kt161
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/swf/SwfTaskTableReader.kt236
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/swf/SwfTraceFormat.kt100
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/wfformat/WfFormatTaskTableReader.kt314
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/wfformat/WfFormatTraceFormat.kt95
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/FragmentTableReader.kt (renamed from opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/OdcVmResourceStateTableReader.kt)57
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/FragmentTableWriter.kt (renamed from opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/OdcVmResourceStateTableWriter.kt)72
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/TaskTableReader.kt (renamed from opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/OdcVmResourceTableReader.kt)90
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/TaskTableWriter.kt (renamed from opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/OdcVmResourceTableWriter.kt)95
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/WorkloadTraceFormat.kt165
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/Fragment.kt (renamed from opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/parquet/ResourceState.kt)10
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/FragmentReadSupport.kt79
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/FragmentRecordMaterializer.kt (renamed from opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/parquet/ResourceStateRecordMaterializer.kt)23
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/FragmentSchemas.kt80
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/FragmentWriteSupport.kt (renamed from opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/parquet/ResourceStateWriteSupport.kt)24
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/Task.kt (renamed from opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/parquet/Resource.kt)11
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/TaskReadSupport.kt101
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/TaskRecordMaterializer.kt (renamed from opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/parquet/ResourceRecordMaterializer.kt)36
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/TaskSchemas.kt166
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/TaskWriteSupport.kt (renamed from opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/parquet/ResourceWriteSupport.kt)57
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/wtf/WtfTaskTableReader.kt187
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/wtf/WtfTraceFormat.kt102
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/wtf/parquet/Task.kt42
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/wtf/parquet/TaskReadSupport.kt148
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/wtf/parquet/TaskRecordMaterializer.kt188
-rw-r--r--opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/spi/TraceFormat.kt16
-rw-r--r--opendc-trace/opendc-trace-api/src/test/kotlin/formats/azure/AzureTraceFormatTest.kt132
-rw-r--r--opendc-trace/opendc-trace-api/src/test/kotlin/formats/bitbrains/BitbrainsExTraceFormatTest.kt97
-rw-r--r--opendc-trace/opendc-trace-api/src/test/kotlin/formats/bitbrains/BitbrainsTraceFormatTest.kt129
-rw-r--r--opendc-trace/opendc-trace-api/src/test/kotlin/formats/gwf/GwfTraceFormatTest.kt123
-rw-r--r--opendc-trace/opendc-trace-api/src/test/kotlin/formats/opendc/OdcVmTraceFormatTest.kt341
-rw-r--r--opendc-trace/opendc-trace-api/src/test/kotlin/formats/swf/SwfTraceFormatTest.kt101
-rw-r--r--opendc-trace/opendc-trace-api/src/test/kotlin/formats/wfformat/WfFormatTaskTableReaderTest.kt361
-rw-r--r--opendc-trace/opendc-trace-api/src/test/kotlin/formats/wfformat/WfFormatTraceFormatTest.kt129
-rw-r--r--opendc-trace/opendc-trace-api/src/test/kotlin/formats/wtf/TableReaderTestKit.kt190
-rw-r--r--opendc-trace/opendc-trace-api/src/test/kotlin/formats/wtf/TableWriterTestKit.kt130
-rw-r--r--opendc-trace/opendc-trace-api/src/test/kotlin/formats/wtf/WtfTraceFormatTest.kt141
-rw-r--r--opendc-trace/opendc-trace-calcite/build.gradle.kts36
-rw-r--r--opendc-trace/opendc-trace-calcite/src/main/kotlin/org/opendc/trace/calcite/InsertableTable.kt39
-rw-r--r--opendc-trace/opendc-trace-calcite/src/main/kotlin/org/opendc/trace/calcite/TraceReaderEnumerator.kt118
-rw-r--r--opendc-trace/opendc-trace-calcite/src/main/kotlin/org/opendc/trace/calcite/TraceSchema.kt47
-rw-r--r--opendc-trace/opendc-trace-calcite/src/main/kotlin/org/opendc/trace/calcite/TraceSchemaFactory.kt54
-rw-r--r--opendc-trace/opendc-trace-calcite/src/main/kotlin/org/opendc/trace/calcite/TraceTable.kt214
-rw-r--r--opendc-trace/opendc-trace-calcite/src/main/kotlin/org/opendc/trace/calcite/TraceTableModify.kt157
-rw-r--r--opendc-trace/opendc-trace-calcite/src/main/kotlin/org/opendc/trace/calcite/TraceTableModifyRule.kt71
-rw-r--r--opendc-trace/opendc-trace-calcite/src/test/kotlin/org/opendc/trace/calcite/CalciteTest.kt268
-rw-r--r--opendc-trace/opendc-trace-calcite/src/test/kotlin/org/opendc/trace/calcite/TraceSchemaFactoryTest.kt80
-rw-r--r--opendc-trace/opendc-trace-calcite/src/test/resources/model.json15
-rw-r--r--opendc-trace/opendc-trace-calcite/src/test/resources/trace/fragments.parquetbin65174 -> 0 bytes
-rw-r--r--opendc-trace/opendc-trace-calcite/src/test/resources/trace/interference-model.json20
-rw-r--r--opendc-trace/opendc-trace-calcite/src/test/resources/trace/tasks.parquetbin1679 -> 0 bytes
-rw-r--r--opendc-trace/opendc-trace-tools/build.gradle.kts44
-rw-r--r--opendc-trace/opendc-trace-tools/src/main/kotlin/org/opendc/trace/tools/ConvertCommand.kt522
-rw-r--r--opendc-trace/opendc-trace-tools/src/main/kotlin/org/opendc/trace/tools/QueryCommand.kt164
-rw-r--r--opendc-trace/opendc-trace-tools/src/main/resources/log4j2.xml38
130 files changed, 1824 insertions, 9986 deletions
diff --git a/buildSrc/src/main/kotlin/testing-conventions.gradle.kts b/buildSrc/src/main/kotlin/testing-conventions.gradle.kts
index b374d0ff..627beb14 100644
--- a/buildSrc/src/main/kotlin/testing-conventions.gradle.kts
+++ b/buildSrc/src/main/kotlin/testing-conventions.gradle.kts
@@ -25,6 +25,8 @@ plugins {
}
tasks.test {
+// val javaVersion = project.defaultVersionCatalog.getVersion("java")
+// languageVersion.set(JavaLanguageVersion.of(javaVersion))
useJUnitPlatform()
reports {
@@ -41,27 +43,3 @@ dependencies {
testImplementation(versionCatalog["mockk"])
testRuntimeOnly(versionCatalog["junit.jupiter.engine"])
}
-
-tasks.register<Test>("testsOn18") {
- javaLauncher.set(javaToolchains.launcherFor {
- languageVersion.set(JavaLanguageVersion.of(18))
- })
-
- useJUnitPlatform()
-
- minHeapSize = "512m"
- maxHeapSize = "1024m"
- jvmArgs = listOf("-XX:MaxMetaspaceSize=512m")
-}
-
-tasks.register<Test>("testsOn19") {
- javaLauncher.set(javaToolchains.launcherFor {
- languageVersion.set(JavaLanguageVersion.of(19))
- })
-
- useJUnitPlatform()
-
- minHeapSize = "512m"
- maxHeapSize = "1024m"
- jvmArgs = listOf("-XX:MaxMetaspaceSize=512m")
-}
diff --git a/opendc-compute/opendc-compute-api/src/main/kotlin/org/opendc/compute/api/Flavor.kt b/opendc-compute/opendc-compute-api/src/main/kotlin/org/opendc/compute/api/Flavor.kt
index a15191c6..581c29ba 100644
--- a/opendc-compute/opendc-compute-api/src/main/kotlin/org/opendc/compute/api/Flavor.kt
+++ b/opendc-compute/opendc-compute-api/src/main/kotlin/org/opendc/compute/api/Flavor.kt
@@ -45,15 +45,15 @@ public interface Flavor : Resource {
/**
* Set of Tasks that need to be finished before this can startAdd commentMore actions
*/
- public val dependencies: Set<String>
+ public val dependencies: Set<Int>
/**
* Set of Tasks that need to be finished before this can startAdd commentMore actions
*/
- public val parents: Set<String>
+ public val parents: Set<Int>
/**
* Set of Tasks that need to be finished before this can startAdd commentMore actions
*/
- public val children: Set<String>
+ public val children: Set<Int>
}
diff --git a/opendc-compute/opendc-compute-api/src/main/kotlin/org/opendc/compute/api/Resource.kt b/opendc-compute/opendc-compute-api/src/main/kotlin/org/opendc/compute/api/Resource.kt
index 2c3822a7..ab9d42f3 100644
--- a/opendc-compute/opendc-compute-api/src/main/kotlin/org/opendc/compute/api/Resource.kt
+++ b/opendc-compute/opendc-compute-api/src/main/kotlin/org/opendc/compute/api/Resource.kt
@@ -22,21 +22,14 @@
package org.opendc.compute.api
-import java.util.UUID
-
/**
* A generic resource provided by the OpenDC Compute service.
*/
public interface Resource {
/**
- * The unique identifier of the resource.
- */
- public val uid: UUID
-
- /**
* The name of the resource.
*/
- public val name: String
+ public val taskId: Int
/**
* The non-identifying metadata attached to the resource.
diff --git a/opendc-compute/opendc-compute-carbon/src/main/kotlin/org/opendc/compute/carbon/CarbonTraceLoader.kt b/opendc-compute/opendc-compute-carbon/src/main/kotlin/org/opendc/compute/carbon/CarbonTraceLoader.kt
index 104abdca..197134c4 100644
--- a/opendc-compute/opendc-compute-carbon/src/main/kotlin/org/opendc/compute/carbon/CarbonTraceLoader.kt
+++ b/opendc-compute/opendc-compute-carbon/src/main/kotlin/org/opendc/compute/carbon/CarbonTraceLoader.kt
@@ -24,9 +24,9 @@ package org.opendc.compute.carbon
import org.opendc.simulator.compute.power.CarbonFragment
import org.opendc.trace.Trace
-import org.opendc.trace.conv.CARBON_INTENSITY_TIMESTAMP
-import org.opendc.trace.conv.CARBON_INTENSITY_VALUE
-import org.opendc.trace.conv.TABLE_CARBON_INTENSITIES
+import org.opendc.trace.conv.CARBON_INTENSITY
+import org.opendc.trace.conv.CARBON_TIMESTAMP
+import org.opendc.trace.conv.TABLE_CARBON
import java.io.File
import java.lang.ref.SoftReference
import java.time.Instant
@@ -35,7 +35,6 @@ import java.util.concurrent.ConcurrentHashMap
/**
* A helper class for loading compute workload traces into memory.
*
- * @param baseDir The directory containing the traces.
*/
public class CarbonTraceLoader {
/**
@@ -49,10 +48,10 @@ public class CarbonTraceLoader {
* Read the metadata into a workload.
*/
private fun parseCarbon(trace: Trace): List<CarbonFragment> {
- val reader = checkNotNull(trace.getTable(TABLE_CARBON_INTENSITIES)).newReader()
+ val reader = checkNotNull(trace.getTable(TABLE_CARBON)).newReader()
- val startTimeCol = reader.resolve(CARBON_INTENSITY_TIMESTAMP)
- val carbonIntensityCol = reader.resolve(CARBON_INTENSITY_VALUE)
+ val startTimeCol = reader.resolve(CARBON_TIMESTAMP)
+ val carbonIntensityCol = reader.resolve(CARBON_INTENSITY)
try {
while (reader.nextRow()) {
@@ -75,7 +74,7 @@ public class CarbonTraceLoader {
}
/**
- * Load the trace with the specified [name] and [format].
+ * Load the Carbon Trace at the given path.
*/
public fun get(pathToFile: File): List<CarbonFragment> {
val trace = Trace.open(pathToFile, "carbon")
@@ -97,7 +96,7 @@ public class CarbonTraceLoader {
/**
* The total load of the trace.
*/
- public val fragments: MutableList<CarbonFragment> = mutableListOf()
+ val fragments: MutableList<CarbonFragment> = mutableListOf()
/**
* Add a fragment to the trace.
diff --git a/opendc-compute/opendc-compute-simulator/src/main/java/org/opendc/compute/simulator/service/ComputeService.java b/opendc-compute/opendc-compute-simulator/src/main/java/org/opendc/compute/simulator/service/ComputeService.java
index 8b6bef2c..fde83ead 100644
--- a/opendc-compute/opendc-compute-simulator/src/main/java/org/opendc/compute/simulator/service/ComputeService.java
+++ b/opendc-compute/opendc-compute-simulator/src/main/java/org/opendc/compute/simulator/service/ComputeService.java
@@ -34,14 +34,11 @@ import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
-import java.util.SplittableRandom;
-import java.util.UUID;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.opendc.common.Dispatcher;
import org.opendc.common.util.Pacer;
import org.opendc.compute.api.Flavor;
-import org.opendc.compute.api.Image;
import org.opendc.compute.api.TaskState;
import org.opendc.compute.simulator.host.HostListener;
import org.opendc.compute.simulator.host.HostModel;
@@ -82,11 +79,6 @@ public final class ComputeService implements AutoCloseable, CarbonReceiver {
*/
private final Pacer pacer;
- /**
- * The {@link SplittableRandom} used to generate the unique identifiers for the service resources.
- */
- private final SplittableRandom random = new SplittableRandom(0);
-
private final int maxNumFailures;
/**
@@ -126,31 +118,21 @@ public final class ComputeService implements AutoCloseable, CarbonReceiver {
*/
private final Map<ServiceTask, SimHost> activeTasks = new HashMap<>();
- /**
- * The active tasks in the system.
- */
- private final List<String> completedTasks = new ArrayList<>();
+ private final List<Integer> completedTasks = new ArrayList<>();
- private final List<String> terminatedTasks = new ArrayList<>();
+ private final List<Integer> terminatedTasks = new ArrayList<>();
/**
* The registered flavors for this compute service.
*/
- private final Map<UUID, ServiceFlavor> flavorById = new HashMap<>();
+ private final Map<Integer, ServiceFlavor> flavorById = new HashMap<>();
private final List<ServiceFlavor> flavors = new ArrayList<>();
/**
- * The registered images for this compute service.
- */
- private final Map<UUID, ServiceImage> imageById = new HashMap<>();
-
- private final List<ServiceImage> images = new ArrayList<>();
-
- /**
* The registered tasks for this compute service.
*/
- private final Map<UUID, ServiceTask> taskById = new HashMap<>();
+ private final Map<Integer, ServiceTask> taskById = new HashMap<>();
private final List<ServiceTask> tasksToRemove = new ArrayList<>();
@@ -192,7 +174,7 @@ public final class ComputeService implements AutoCloseable, CarbonReceiver {
|| newState == TaskState.PAUSED
|| newState == TaskState.TERMINATED
|| newState == TaskState.FAILED) {
- LOGGER.info("task {} {} {} finished", task.getUid(), task.getName(), task.getFlavor());
+ LOGGER.info("task {} {} {} finished", task.getId(), task.getName(), task.getFlavor());
if (activeTasks.remove(task) != null) {
tasksActive--;
@@ -272,7 +254,7 @@ public final class ComputeService implements AutoCloseable, CarbonReceiver {
/**
* Return the {@link ServiceTask}s hosted by this service.
*/
- public Map<UUID, ServiceTask> getTasks() {
+ public Map<Integer, ServiceTask> getTasks() {
return taskById;
}
@@ -419,11 +401,10 @@ public final class ComputeService implements AutoCloseable, CarbonReceiver {
}
SchedulingRequest schedule(ServiceTask task, boolean atFront) {
- LOGGER.debug("Enqueueing task {} to be assigned to host", task.getUid());
+ LOGGER.debug("Enqueueing task {} to be assigned to host", task.getId());
if (task.getNumFailures() >= maxNumFailures) {
- LOGGER.warn("task {} has been terminated because it failed {} times", (Object) task, (Object)
- task.getNumFailures());
+ LOGGER.warn("task {} has been terminated because it failed {} times", task, task.getNumFailures());
tasksTerminated++;
task.setState(TaskState.TERMINATED);
@@ -436,8 +417,8 @@ public final class ComputeService implements AutoCloseable, CarbonReceiver {
SchedulingRequest request = new SchedulingRequest(task, now);
ServiceFlavor flavor = task.getFlavor();
- for (String taskName : this.terminatedTasks) {
- if (flavor.isInDependencies(taskName)) {
+ for (int taskId : this.terminatedTasks) {
+ if (flavor.isInDependencies(taskId)) {
// Terminate task
task.setState(TaskState.TERMINATED);
}
@@ -447,10 +428,10 @@ public final class ComputeService implements AutoCloseable, CarbonReceiver {
flavor.updatePendingDependencies(this.completedTasks);
// If there are still pending dependencies, we cannot schedule the task yet
- Set<String> pendingDependencies = flavor.getDependencies();
+ Set<Integer> pendingDependencies = flavor.getDependencies();
if (!pendingDependencies.isEmpty()) {
// If the task has pending dependencies, we cannot schedule it yet
- LOGGER.debug("Task {} has pending dependencies: {}", task.getUid(), pendingDependencies);
+ LOGGER.debug("Task {} has pending dependencies: {}", task.getId(), pendingDependencies);
blockedTasks.add(request);
return null;
}
@@ -466,18 +447,18 @@ public final class ComputeService implements AutoCloseable, CarbonReceiver {
}
void addCompletedTask(ServiceTask task) {
- String taskName = task.getName();
+ int taskId = task.getId();
- if (!this.completedTasks.contains(taskName)) {
- this.completedTasks.add(taskName);
+ if (!this.completedTasks.contains(taskId)) {
+ this.completedTasks.add(taskId);
}
List<SchedulingRequest> requestsToRemove = new ArrayList<>();
for (SchedulingRequest request : blockedTasks) {
- request.getTask().getFlavor().updatePendingDependencies(taskName);
+ request.getTask().getFlavor().updatePendingDependencies(taskId);
- Set<String> pendingDependencies = request.getTask().getFlavor().getDependencies();
+ Set<Integer> pendingDependencies = request.getTask().getFlavor().getDependencies();
if (pendingDependencies.isEmpty()) {
requestsToRemove.add(request);
@@ -492,16 +473,16 @@ public final class ComputeService implements AutoCloseable, CarbonReceiver {
}
void addTerminatedTask(ServiceTask task) {
- String taskName = task.getName();
+ int taskId = task.getId();
List<SchedulingRequest> requestsToRemove = new ArrayList<>();
- if (!this.terminatedTasks.contains(taskName)) {
- this.terminatedTasks.add(taskName);
+ if (!this.terminatedTasks.contains(taskId)) {
+ this.terminatedTasks.add(taskId);
}
for (SchedulingRequest request : blockedTasks) {
- if (request.getTask().getFlavor().isInDependencies(taskName)) {
+ if (request.getTask().getFlavor().isInDependencies(taskId)) {
requestsToRemove.add(request);
request.getTask().setState(TaskState.TERMINATED);
}
@@ -513,18 +494,13 @@ public final class ComputeService implements AutoCloseable, CarbonReceiver {
}
void delete(ServiceFlavor flavor) {
- flavorById.remove(flavor.getUid());
+ flavorById.remove(flavor.getTaskId());
flavors.remove(flavor);
}
- void delete(ServiceImage image) {
- imageById.remove(image.getUid());
- images.remove(image);
- }
-
void delete(ServiceTask task) {
completedTasks.remove(task);
- taskById.remove(task.getUid());
+ taskById.remove(task.getId());
}
public void updateCarbonIntensity(double newCarbonIntensity) {
@@ -675,68 +651,25 @@ public final class ComputeService implements AutoCloseable, CarbonReceiver {
return new ArrayList<>(service.flavors);
}
- public Flavor findFlavor(@NotNull UUID id) {
- checkOpen();
-
- return service.flavorById.get(id);
- }
-
@NotNull
public ServiceFlavor newFlavor(
- @NotNull String name,
+ int taskId,
int cpuCount,
long memorySize,
int gpuCoreCount,
- @NotNull Set<String> parents,
- @NotNull Set<String> children,
+ @NotNull Set<Integer> parents,
+ @NotNull Set<Integer> children,
@NotNull Map<String, ?> meta) {
checkOpen();
final ComputeService service = this.service;
- UUID uid = new UUID(service.clock.millis(), service.random.nextLong());
- ServiceFlavor flavor =
- new ServiceFlavor(service, uid, name, cpuCount, memorySize, gpuCoreCount, parents, children, meta);
-
- // service.flavorById.put(uid, flavor);
- // service.flavors.add(flavor);
- return flavor;
- }
-
- @NotNull
- public List<Image> queryImages() {
- checkOpen();
-
- return new ArrayList<>(service.images);
- }
-
- public Image findImage(@NotNull UUID id) {
- checkOpen();
-
- return service.imageById.get(id);
- }
-
- public Image newImage(@NotNull String name) {
- return newImage(name, Collections.emptyMap(), Collections.emptyMap());
- }
-
- @NotNull
- public Image newImage(@NotNull String name, @NotNull Map<String, String> labels, @NotNull Map<String, ?> meta) {
- checkOpen();
-
- final ComputeService service = this.service;
- UUID uid = new UUID(service.clock.millis(), service.random.nextLong());
-
- ServiceImage image = new ServiceImage(service, uid, name, labels, meta);
-
- service.imageById.put(uid, image);
- service.images.add(image);
-
- return image;
+ return new ServiceFlavor(service, taskId, cpuCount, memorySize, gpuCoreCount, parents, children, meta);
}
@NotNull
public ServiceTask newTask(
+ int id,
@NotNull String name,
@NotNull TaskNature nature,
@NotNull Duration duration,
@@ -747,15 +680,10 @@ public final class ComputeService implements AutoCloseable, CarbonReceiver {
checkOpen();
final ComputeService service = this.service;
- UUID uid = new UUID(service.clock.millis(), service.random.nextLong());
-
- // final ServiceFlavor internalFlavor =
- // Objects.requireNonNull(service.flavorById.get(flavor.getUid()), "Unknown flavor");
- // ServiceTask task = new ServiceTask(service, uid, name, internalFlavor, workload, meta);
- ServiceTask task = new ServiceTask(service, uid, name, nature, duration, deadline, flavor, workload, meta);
+ ServiceTask task = new ServiceTask(service, id, name, nature, duration, deadline, flavor, workload, meta);
- service.taskById.put(uid, task);
+ service.taskById.put(id, task);
service.tasksTotal++;
@@ -765,7 +693,7 @@ public final class ComputeService implements AutoCloseable, CarbonReceiver {
}
@Nullable
- public ServiceTask findTask(@NotNull UUID id) {
+ public ServiceTask findTask(int id) {
checkOpen();
return service.taskById.get(id);
}
@@ -781,7 +709,7 @@ public final class ComputeService implements AutoCloseable, CarbonReceiver {
@Nullable
public void rescheduleTask(@NotNull ServiceTask task, @NotNull Workload workload) {
- ServiceTask internalTask = findTask(task.getUid());
+ ServiceTask internalTask = findTask(task.getId());
// SimHost from = service.lookupHost(internalTask);
// from.delete(internalTask);
diff --git a/opendc-compute/opendc-compute-simulator/src/main/java/org/opendc/compute/simulator/service/ServiceFlavor.java b/opendc-compute/opendc-compute-simulator/src/main/java/org/opendc/compute/simulator/service/ServiceFlavor.java
index bb68d336..6201f21f 100644
--- a/opendc-compute/opendc-compute-simulator/src/main/java/org/opendc/compute/simulator/service/ServiceFlavor.java
+++ b/opendc-compute/opendc-compute-simulator/src/main/java/org/opendc/compute/simulator/service/ServiceFlavor.java
@@ -28,7 +28,6 @@ import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
-import java.util.UUID;
import org.jetbrains.annotations.NotNull;
import org.opendc.compute.api.Flavor;
@@ -37,29 +36,26 @@ import org.opendc.compute.api.Flavor;
*/
public final class ServiceFlavor implements Flavor {
private final ComputeService service;
- private final UUID uid;
- private final String name;
+ private final int taskId;
private final int cpuCoreCount;
private final long memorySize;
private final int gpuCoreCount;
- private final Set<String> parents;
- private final Set<String> children;
- private final Set<String> dependencies;
+ private final Set<Integer> parents;
+ private final Set<Integer> children;
+ private final Set<Integer> dependencies;
private final Map<String, ?> meta;
ServiceFlavor(
ComputeService service,
- UUID uid,
- String name,
+ int taskId,
int cpuCoreCount,
long memorySize,
int gpuCoreCount,
- Set<String> parents,
- Set<String> children,
+ Set<Integer> parents,
+ Set<Integer> children,
Map<String, ?> meta) {
this.service = service;
- this.uid = uid;
- this.name = name;
+ this.taskId = taskId;
this.cpuCoreCount = cpuCoreCount;
this.memorySize = memorySize;
this.gpuCoreCount = gpuCoreCount;
@@ -84,16 +80,9 @@ public final class ServiceFlavor implements Flavor {
return gpuCoreCount;
}
- @NotNull
- @Override
- public UUID getUid() {
- return uid;
- }
-
- @NotNull
@Override
- public String getName() {
- return name;
+ public int getTaskId() {
+ return taskId;
}
@NotNull
@@ -117,45 +106,45 @@ public final class ServiceFlavor implements Flavor {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ServiceFlavor flavor = (ServiceFlavor) o;
- return service.equals(flavor.service) && uid.equals(flavor.uid);
+ return service.equals(flavor.service) && taskId == flavor.taskId;
}
@Override
public int hashCode() {
- return Objects.hash(service, uid);
+ return Objects.hash(service, taskId);
}
@Override
public String toString() {
- return "Flavor[uid=" + uid + ",name=" + name + "]";
+ return "Flavor[name=" + taskId + "]";
}
@Override
- public @NotNull Set<String> getDependencies() {
+ public @NotNull Set<Integer> getDependencies() {
return dependencies;
}
- public void updatePendingDependencies(List<String> completedTasks) {
- for (String task : completedTasks) {
+ public void updatePendingDependencies(List<Integer> completedTasks) {
+ for (int task : completedTasks) {
this.updatePendingDependencies(task);
}
}
- public void updatePendingDependencies(String completedTask) {
+ public void updatePendingDependencies(int completedTask) {
this.dependencies.remove(completedTask);
}
- public boolean isInDependencies(String task) {
+ public boolean isInDependencies(int task) {
return this.dependencies.contains(task);
}
@Override
- public @NotNull Set<@NotNull String> getParents() {
+ public @NotNull Set<Integer> getParents() {
return parents;
}
@Override
- public @NotNull Set<@NotNull String> getChildren() {
+ public @NotNull Set<Integer> getChildren() {
return children;
}
}
diff --git a/opendc-compute/opendc-compute-simulator/src/main/java/org/opendc/compute/simulator/service/ServiceImage.java b/opendc-compute/opendc-compute-simulator/src/main/java/org/opendc/compute/simulator/service/ServiceImage.java
deleted file mode 100644
index dffa4356..00000000
--- a/opendc-compute/opendc-compute-simulator/src/main/java/org/opendc/compute/simulator/service/ServiceImage.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2022 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.compute.simulator.service;
-
-import java.util.Collections;
-import java.util.Map;
-import java.util.Objects;
-import java.util.UUID;
-import org.jetbrains.annotations.NotNull;
-import org.opendc.compute.api.Image;
-
-/**
- * Implementation of {@link Image} provided by {@link ComputeService}.
- */
-public final class ServiceImage implements Image {
- private final ComputeService service;
- private final UUID uid;
- private final String name;
- private final Map<String, String> labels;
- private final Map<String, ?> meta;
-
- ServiceImage(ComputeService service, UUID uid, String name, Map<String, String> labels, Map<String, ?> meta) {
- this.service = service;
- this.uid = uid;
- this.name = name;
- this.labels = labels;
- this.meta = meta;
- }
-
- @NotNull
- @Override
- public UUID getUid() {
- return uid;
- }
-
- @NotNull
- @Override
- public String getName() {
- return name;
- }
-
- @NotNull
- @Override
- public Map<String, Object> getMeta() {
- return Collections.unmodifiableMap(meta);
- }
-
- @Override
- public void reload() {
- // No-op: this object is the source-of-truth
- }
-
- @Override
- public void delete() {
- service.delete(this);
- }
-
- @Override
- public boolean equals(Object o) {
- if (this == o) return true;
- if (o == null || getClass() != o.getClass()) return false;
- ServiceImage image = (ServiceImage) o;
- return service.equals(image.service) && uid.equals(image.uid);
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(service, uid);
- }
-
- @Override
- public String toString() {
- return "Image[uid=" + uid + ",name=" + name + "]";
- }
-}
diff --git a/opendc-compute/opendc-compute-simulator/src/main/java/org/opendc/compute/simulator/service/ServiceTask.java b/opendc-compute/opendc-compute-simulator/src/main/java/org/opendc/compute/simulator/service/ServiceTask.java
index 281f75ca..57bbb7c3 100644
--- a/opendc-compute/opendc-compute-simulator/src/main/java/org/opendc/compute/simulator/service/ServiceTask.java
+++ b/opendc-compute/opendc-compute-simulator/src/main/java/org/opendc/compute/simulator/service/ServiceTask.java
@@ -29,7 +29,6 @@ import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
-import java.util.UUID;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.opendc.compute.api.TaskState;
@@ -47,7 +46,7 @@ public class ServiceTask {
private static final Logger LOGGER = LoggerFactory.getLogger(ServiceTask.class);
private final ComputeService service;
- private final UUID uid;
+ private final int id;
private final String name;
private final TaskNature nature;
@@ -73,7 +72,7 @@ public class ServiceTask {
ServiceTask(
ComputeService service,
- UUID uid,
+ int id,
String name,
TaskNature nature,
Duration duration,
@@ -82,7 +81,7 @@ public class ServiceTask {
Workload workload,
Map<String, ?> meta) {
this.service = service;
- this.uid = uid;
+ this.id = id;
this.name = name;
this.nature = nature;
this.duration = duration;
@@ -94,9 +93,8 @@ public class ServiceTask {
this.submittedAt = this.service.getClock().instant();
}
- @NotNull
- public UUID getUid() {
- return uid;
+ public int getId() {
+ return id;
}
@NotNull
@@ -191,18 +189,18 @@ public class ServiceTask {
LOGGER.warn("User tried to start deleted task");
throw new IllegalStateException("Task is deleted");
case CREATED:
- LOGGER.info("User requested to start task {}", uid);
+ LOGGER.info("User requested to start task {}", id);
setState(TaskState.PROVISIONING);
assert request == null : "Scheduling request already active";
request = service.schedule(this);
break;
case PAUSED:
- LOGGER.info("User requested to start task after pause {}", uid);
+ LOGGER.info("User requested to start task after pause {}", id);
setState(TaskState.PROVISIONING);
request = service.schedule(this, true);
break;
case FAILED:
- LOGGER.info("User requested to start task after failure {}", uid);
+ LOGGER.info("User requested to start task after failure {}", id);
setState(TaskState.PROVISIONING);
request = service.schedule(this, true);
break;
@@ -235,15 +233,15 @@ public class ServiceTask {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ServiceTask task = (ServiceTask) o;
- return service.equals(task.service) && uid.equals(task.uid);
+ return service.equals(task.service) && id == task.id;
}
public int hashCode() {
- return Objects.hash(service, uid);
+ return Objects.hash(service, id);
}
public String toString() {
- return "Task[uid=" + uid + ",name=" + name + ",state=" + state + "]";
+ return "Task[uid=" + id + ",name=" + name + ",state=" + state + "]";
}
void setState(TaskState newState) {
diff --git a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/internal/Guest.kt b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/internal/Guest.kt
index 40de94bb..ed0c5226 100644
--- a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/internal/Guest.kt
+++ b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/internal/Guest.kt
@@ -73,7 +73,7 @@ public class Guest(
public fun start() {
when (state) {
TaskState.CREATED, TaskState.FAILED, TaskState.PAUSED -> {
- LOGGER.info { "User requested to start task ${task.uid}" }
+ LOGGER.info { "User requested to start task ${task.id}" }
doStart()
}
TaskState.RUNNING -> return
diff --git a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/provisioner/HostsProvisioningStep.kt b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/provisioner/HostsProvisioningStep.kt
index ced38480..7a0d5d65 100644
--- a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/provisioner/HostsProvisioningStep.kt
+++ b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/provisioner/HostsProvisioningStep.kt
@@ -22,6 +22,7 @@
package org.opendc.compute.simulator.provisioner
+import org.opendc.common.ResourceType
import org.opendc.compute.carbon.getCarbonFragments
import org.opendc.compute.simulator.host.SimHost
import org.opendc.compute.simulator.service.ComputeService
@@ -119,11 +120,11 @@ public class HostsProvisioningStep internal constructor(
carbonModel?.addReceiver(batteryPolicy)
- FlowEdge(hostDistributor, batteryAggregator)
+ FlowEdge(hostDistributor, batteryAggregator, ResourceType.POWER)
service.addBattery(battery)
} else {
- FlowEdge(hostDistributor, simPowerSource)
+ FlowEdge(hostDistributor, simPowerSource, ResourceType.POWER)
}
// Create hosts, they are connected to the powerMux when SimMachine is created
diff --git a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/scheduler/filters/DifferentHostFilter.kt b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/scheduler/filters/DifferentHostFilter.kt
index 279a2717..bc98a575 100644
--- a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/scheduler/filters/DifferentHostFilter.kt
+++ b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/scheduler/filters/DifferentHostFilter.kt
@@ -24,7 +24,6 @@ package org.opendc.compute.simulator.scheduler.filters
import org.opendc.compute.simulator.service.HostView
import org.opendc.compute.simulator.service.ServiceTask
-import java.util.UUID
/**
* A [HostFilter] that ensures an instance is scheduled on a different host from a set of instances.
@@ -35,7 +34,7 @@ public class DifferentHostFilter : HostFilter {
task: ServiceTask,
): Boolean {
@Suppress("UNCHECKED_CAST")
- val affinityUUIDs = task.meta["scheduler_hint:different_host"] as? Set<UUID> ?: return true
- return host.host.getInstances().none { it.uid in affinityUUIDs }
+ val affinityIDs = task.meta["scheduler_hint:different_host"] as? Set<Int> ?: return true
+ return host.host.getInstances().none { it.id in affinityIDs }
}
}
diff --git a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/scheduler/filters/SameHostFilter.kt b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/scheduler/filters/SameHostFilter.kt
index 761b125d..73fd0d3c 100644
--- a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/scheduler/filters/SameHostFilter.kt
+++ b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/scheduler/filters/SameHostFilter.kt
@@ -24,7 +24,6 @@ package org.opendc.compute.simulator.scheduler.filters
import org.opendc.compute.simulator.service.HostView
import org.opendc.compute.simulator.service.ServiceTask
-import java.util.UUID
/**
* A [HostFilter] that ensures an instance is scheduled on the same host as all other instances in a set of instances.
@@ -35,7 +34,7 @@ public class SameHostFilter : HostFilter {
task: ServiceTask,
): Boolean {
@Suppress("UNCHECKED_CAST")
- val affinityUUIDs = task.meta["scheduler_hint:same_host"] as? Set<UUID> ?: return true
- return host.host.getInstances().any { it.uid in affinityUUIDs }
+ val affinityIDs = task.meta["scheduler_hint:same_host"] as? Set<Int> ?: return true
+ return host.host.getInstances().any { it.id in affinityIDs }
}
}
diff --git a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltTaskExportColumns.kt b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltTaskExportColumns.kt
index 07750114..0397b9a1 100644
--- a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltTaskExportColumns.kt
+++ b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/parquet/DfltTaskExportColumns.kt
@@ -61,10 +61,9 @@ public object DfltTaskExportColumns {
public val TASK_ID: ExportColumn<TaskTableReader> =
ExportColumn(
field =
- Types.required(BINARY)
- .`as`(LogicalTypeAnnotation.stringType())
+ Types.required(INT32)
.named("task_id"),
- ) { Binary.fromString(it.taskInfo.id) }
+ ) { it.taskInfo.id }
public val TASK_NAME: ExportColumn<TaskTableReader> =
ExportColumn(
diff --git a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/table/task/TaskInfo.kt b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/table/task/TaskInfo.kt
index c1a14613..2727847f 100644
--- a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/table/task/TaskInfo.kt
+++ b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/table/task/TaskInfo.kt
@@ -26,7 +26,7 @@ package org.opendc.compute.simulator.telemetry.table.task
* Static information about a task exposed to the telemetry service.
*/
public data class TaskInfo(
- val id: String,
+ val id: Int,
val name: String,
val type: String,
val arch: String,
diff --git a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/table/task/TaskTableReaderImpl.kt b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/table/task/TaskTableReaderImpl.kt
index ce62fdb0..3183cf11 100644
--- a/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/table/task/TaskTableReaderImpl.kt
+++ b/opendc-compute/opendc-compute-simulator/src/main/kotlin/org/opendc/compute/simulator/telemetry/table/task/TaskTableReaderImpl.kt
@@ -89,7 +89,7 @@ public class TaskTableReaderImpl(
*/
override val taskInfo: TaskInfo =
TaskInfo(
- task.uid.toString(),
+ task.id,
task.name,
"vm",
"x86",
diff --git a/opendc-compute/opendc-compute-simulator/src/test/kotlin/org/opendc/compute/simulator/scheduler/FilterSchedulerTest.kt b/opendc-compute/opendc-compute-simulator/src/test/kotlin/org/opendc/compute/simulator/scheduler/FilterSchedulerTest.kt
index 5109f828..fe5cea70 100644
--- a/opendc-compute/opendc-compute-simulator/src/test/kotlin/org/opendc/compute/simulator/scheduler/FilterSchedulerTest.kt
+++ b/opendc-compute/opendc-compute-simulator/src/test/kotlin/org/opendc/compute/simulator/scheduler/FilterSchedulerTest.kt
@@ -44,7 +44,6 @@ import org.opendc.compute.simulator.scheduler.weights.VCpuWeigher
import org.opendc.compute.simulator.service.HostView
import org.opendc.compute.simulator.service.ServiceTask
import java.util.Random
-import java.util.UUID
/**
* Test suite for the [FilterScheduler].
@@ -362,7 +361,7 @@ internal class FilterSchedulerTest {
every { reqA.task.flavor.memorySize } returns 1024
every { reqA.isCancelled } returns false
val taskA = mockk<ServiceTask>()
- every { taskA.uid } returns UUID.randomUUID()
+ every { taskA.id } returns Random().nextInt(1, Int.MAX_VALUE)
every { reqA.task } returns taskA
val hostA = mockk<HostView>()
@@ -388,7 +387,7 @@ internal class FilterSchedulerTest {
assertEquals(hostA, scheduler.select(mutableListOf(reqB).iterator()).host)
- every { reqB.task.meta } returns mapOf("scheduler_hint:same_host" to setOf(reqA.task.uid))
+ every { reqB.task.meta } returns mapOf("scheduler_hint:same_host" to setOf(reqA.task.id))
assertEquals(hostB, scheduler.select(mutableListOf(reqB).iterator()).host)
}
@@ -406,7 +405,7 @@ internal class FilterSchedulerTest {
every { reqA.task.flavor.memorySize } returns 1024
every { reqA.isCancelled } returns false
val taskA = mockk<ServiceTask>()
- every { taskA.uid } returns UUID.randomUUID()
+ every { taskA.id } returns Random().nextInt(1, Int.MAX_VALUE)
every { reqA.task } returns taskA
val hostA = mockk<HostView>()
@@ -432,7 +431,7 @@ internal class FilterSchedulerTest {
assertEquals(hostA, scheduler.select(mutableListOf(reqB).iterator()).host)
- every { reqB.task.meta } returns mapOf("scheduler_hint:different_host" to setOf(taskA.uid))
+ every { reqB.task.meta } returns mapOf("scheduler_hint:different_host" to setOf(taskA.id))
assertEquals(hostB, scheduler.select(mutableListOf(reqB).iterator()).host)
}
diff --git a/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/ComputeWorkloadLoader.kt b/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/ComputeWorkloadLoader.kt
index 3a0ee3e0..5db2b43b 100644
--- a/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/ComputeWorkloadLoader.kt
+++ b/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/ComputeWorkloadLoader.kt
@@ -27,27 +27,27 @@ import org.opendc.simulator.compute.workload.trace.TraceWorkload
import org.opendc.simulator.compute.workload.trace.scaling.NoDelayScaling
import org.opendc.simulator.compute.workload.trace.scaling.ScalingPolicy
import org.opendc.trace.Trace
-import org.opendc.trace.conv.TABLE_RESOURCES
-import org.opendc.trace.conv.TABLE_RESOURCE_STATES
-import org.opendc.trace.conv.resourceChildren
-import org.opendc.trace.conv.resourceCpuCapacity
-import org.opendc.trace.conv.resourceCpuCount
-import org.opendc.trace.conv.resourceDeadline
-import org.opendc.trace.conv.resourceDuration
-import org.opendc.trace.conv.resourceGpuCapacity
-import org.opendc.trace.conv.resourceGpuCount
-import org.opendc.trace.conv.resourceID
-import org.opendc.trace.conv.resourceMemCapacity
-import org.opendc.trace.conv.resourceNature
-import org.opendc.trace.conv.resourceParents
-import org.opendc.trace.conv.resourceStateCpuUsage
-import org.opendc.trace.conv.resourceStateDuration
-import org.opendc.trace.conv.resourceStateGpuUsage
-import org.opendc.trace.conv.resourceSubmissionTime
+import org.opendc.trace.conv.FRAGMENT_CPU_USAGE
+import org.opendc.trace.conv.FRAGMENT_DURATION
+import org.opendc.trace.conv.FRAGMENT_GPU_USAGE
+import org.opendc.trace.conv.TABLE_FRAGMENTS
+import org.opendc.trace.conv.TABLE_TASKS
+import org.opendc.trace.conv.TASK_CHILDREN
+import org.opendc.trace.conv.TASK_CPU_CAPACITY
+import org.opendc.trace.conv.TASK_CPU_COUNT
+import org.opendc.trace.conv.TASK_DEADLINE
+import org.opendc.trace.conv.TASK_DURATION
+import org.opendc.trace.conv.TASK_GPU_CAPACITY
+import org.opendc.trace.conv.TASK_GPU_COUNT
+import org.opendc.trace.conv.TASK_ID
+import org.opendc.trace.conv.TASK_MEM_CAPACITY
+import org.opendc.trace.conv.TASK_NAME
+import org.opendc.trace.conv.TASK_NATURE
+import org.opendc.trace.conv.TASK_PARENTS
+import org.opendc.trace.conv.TASK_SUBMISSION_TIME
import java.io.File
import java.lang.ref.SoftReference
import java.time.Duration
-import java.util.UUID
import java.util.concurrent.ConcurrentHashMap
import kotlin.math.roundToLong
@@ -77,23 +77,20 @@ public class ComputeWorkloadLoader(
/**
* Read the fragments into memory.
*/
- private fun parseFragments(trace: Trace): Map<String, Builder> {
- val reader = checkNotNull(trace.getTable(TABLE_RESOURCE_STATES)).newReader()
+ private fun parseFragments(trace: Trace): Map<Int, Builder> {
+ val reader = checkNotNull(trace.getTable(TABLE_FRAGMENTS)).newReader()
- val idCol = reader.resolve(resourceID)
- val durationCol = reader.resolve(resourceStateDuration)
- val coresCol = reader.resolve(resourceCpuCount)
- val usageCol = reader.resolve(resourceStateCpuUsage)
- val gpuCoresCol = reader.resolve(resourceGpuCount)
- val resourceGpuCapacityCol = reader.resolve(resourceStateGpuUsage)
+ val idCol = reader.resolve(TASK_ID)
+ val durationCol = reader.resolve(FRAGMENT_DURATION)
+ val usageCol = reader.resolve(FRAGMENT_CPU_USAGE)
+ val resourceGpuCapacityCol = reader.resolve(FRAGMENT_GPU_USAGE)
- val fragments = mutableMapOf<String, Builder>()
+ val fragments = mutableMapOf<Int, Builder>()
return try {
while (reader.nextRow()) {
- val id = reader.getString(idCol)!!
+ val id = reader.getInt(idCol)
val durationMs = reader.getDuration(durationCol)!!
- val cores = reader.getInt(coresCol)
val cpuUsage = reader.getDouble(usageCol)
val gpuUsage =
if (reader.getDouble(
@@ -104,14 +101,13 @@ public class ComputeWorkloadLoader(
} else {
reader.getDouble(resourceGpuCapacityCol) // Default to 0 if not present
}
- val gpuCores = reader.getInt(gpuCoresCol) // Default to 0 if not present
- val gpuMemory = 0L // Default to 0 if not present
+ val gpuMemory = 0 // Default to 0 if not present
val builder =
fragments.computeIfAbsent(
id,
) { Builder(checkpointInterval, checkpointDuration, checkpointIntervalScaling, scalingPolicy, id) }
- builder.add(durationMs, cpuUsage, cores, gpuUsage, gpuCores, gpuMemory)
+ builder.add(durationMs, cpuUsage, gpuUsage, gpuMemory)
}
fragments
@@ -125,29 +121,35 @@ public class ComputeWorkloadLoader(
*/
private fun parseMeta(
trace: Trace,
- fragments: Map<String, Builder>,
+ fragments: Map<Int, Builder>,
): List<Task> {
- val reader = checkNotNull(trace.getTable(TABLE_RESOURCES)).newReader()
+ val reader = checkNotNull(trace.getTable(TABLE_TASKS)).newReader()
- val idCol = reader.resolve(resourceID)
- val submissionTimeCol = reader.resolve(resourceSubmissionTime)
- val durationCol = reader.resolve(resourceDuration)
- val cpuCountCol = reader.resolve(resourceCpuCount)
- val cpuCapacityCol = reader.resolve(resourceCpuCapacity)
- val memCol = reader.resolve(resourceMemCapacity)
- val gpuCapacityCol = reader.resolve(resourceGpuCapacity) // Assuming GPU capacity is also present
- val gpuCoreCountCol = reader.resolve(resourceGpuCount) // Assuming GPU cores are also present
- val parentsCol = reader.resolve(resourceParents)
- val childrenCol = reader.resolve(resourceChildren)
- val natureCol = reader.resolve(resourceNature)
- val deadlineCol = reader.resolve(resourceDeadline)
+ val idCol = reader.resolve(TASK_ID)
+ val idName = reader.resolve(TASK_NAME)
+ val submissionTimeCol = reader.resolve(TASK_SUBMISSION_TIME)
+ val durationCol = reader.resolve(TASK_DURATION)
+ val cpuCountCol = reader.resolve(TASK_CPU_COUNT)
+ val cpuCapacityCol = reader.resolve(TASK_CPU_CAPACITY)
+ val memCol = reader.resolve(TASK_MEM_CAPACITY)
+ val gpuCapacityCol = reader.resolve(TASK_GPU_CAPACITY) // Assuming GPU capacity is also present
+ val gpuCoreCountCol = reader.resolve(TASK_GPU_COUNT) // Assuming GPU cores are also present
+ val parentsCol = reader.resolve(TASK_PARENTS)
+ val childrenCol = reader.resolve(TASK_CHILDREN)
+ val natureCol = reader.resolve(TASK_NATURE)
+ val deadlineCol = reader.resolve(TASK_DEADLINE)
- var counter = 0
val entries = mutableListOf<Task>()
return try {
while (reader.nextRow()) {
- val id = reader.getString(idCol)!!
+ val id = reader.getInt(idCol)
+ var name = reader.getString(idName)
+
+ if (name == null) {
+ name = id.toString()
+ }
+
if (!fragments.containsKey(id)) {
continue
}
@@ -169,10 +171,9 @@ public class ComputeWorkloadLoader(
val gpuCoreCount = reader.getInt(gpuCoreCountCol) // Default to 0 if not present
val gpuMemory = 0L // currently not implemented
- val parents = reader.getSet(parentsCol, String::class.java) // No dependencies in the trace
- val children = reader.getSet(childrenCol, String::class.java) // No dependencies in the trace
+ val parents = reader.getSet(parentsCol, Int::class.java) // No dependencies in the trace
+ val children = reader.getSet(childrenCol, Int::class.java) // No dependencies in the trace
- val uid = UUID.nameUUIDFromBytes("$id-${counter++}".toByteArray())
var nature = reader.getString(natureCol)
var deadline = reader.getLong(deadlineCol)
if (deferAll) {
@@ -185,8 +186,8 @@ public class ComputeWorkloadLoader(
entries.add(
Task(
- uid,
id,
+ name,
submissionTime,
duration,
parents!!,
@@ -221,7 +222,7 @@ public class ComputeWorkloadLoader(
* Load the trace at the specified [pathToFile].
*/
override fun load(): List<Task> {
- val trace = Trace.open(pathToFile, "opendc-vm")
+ val trace = Trace.open(pathToFile, "workload")
val fragments = parseFragments(trace)
val vms = parseMeta(trace, fragments)
@@ -243,7 +244,7 @@ public class ComputeWorkloadLoader(
checkpointDuration: Long,
checkpointIntervalScaling: Double,
scalingPolicy: ScalingPolicy,
- taskName: String,
+ taskId: Int,
) {
/**
* The total load of the trace.
@@ -259,7 +260,7 @@ public class ComputeWorkloadLoader(
checkpointDuration,
checkpointIntervalScaling,
scalingPolicy,
- taskName,
+ taskId,
)
/**
@@ -267,22 +268,18 @@ public class ComputeWorkloadLoader(
*
* @param duration The duration of the fragment (in epoch millis).
* @param cpuUsage CPU usage of this fragment.
- * @param cpuCores Number of cores used.
* @param gpuUsage GPU usage of this fragment.
- * @param gpuCores Number of GPU cores used.
* @param gpuMemoryUsage GPU memory usage of this fragment.
*/
fun add(
duration: Duration,
cpuUsage: Double,
- cpuCores: Int,
gpuUsage: Double = 0.0,
- gpuCores: Int = 0,
- gpuMemoryUsage: Long = 0,
+ gpuMemoryUsage: Int = 0,
) {
totalLoad += ((cpuUsage * duration.toMillis()) + (gpuUsage * duration.toMillis())) / 1000 // avg MHz * duration = MFLOPs
- builder.add(duration.toMillis(), cpuUsage, cpuCores, gpuUsage, gpuCores, gpuMemoryUsage)
+ builder.add(duration.toMillis(), cpuUsage, gpuUsage, gpuMemoryUsage)
}
/**
diff --git a/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/Task.kt b/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/Task.kt
index b1ba4545..c875b8a2 100644
--- a/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/Task.kt
+++ b/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/Task.kt
@@ -23,27 +23,25 @@
package org.opendc.compute.workload
import org.opendc.simulator.compute.workload.trace.TraceWorkload
-import java.util.UUID
/**
* A virtual machine workload.
*
- * @param uid The unique identifier of the virtual machine.
+ * @param id The unique identifier of the virtual machine.
* @param name The name of the virtual machine.
* @param cpuCapacity The required CPU capacity for the VM in MHz.
* @param cpuCount The number of vCPUs in the VM.
* @param memCapacity The provisioned memory for the VM in MB.
* @param submissionTime The start time of the VM.
* @param trace The trace that belong to this VM.
- * @param interferenceProfile The interference profile of this virtual machine.
*/
public data class Task(
- val uid: UUID,
+ val id: Int,
val name: String,
var submissionTime: Long,
val duration: Long,
- val parents: Set<String> = emptySet(),
- val children: Set<String> = emptySet(),
+ val parents: Set<Int> = emptySet(),
+ val children: Set<Int> = emptySet(),
val cpuCount: Int,
val cpuCapacity: Double,
val totalCpuLoad: Double,
diff --git a/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/runner/ScenarioReplayer.kt b/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/runner/ScenarioReplayer.kt
index 6c325349..14760a63 100644
--- a/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/runner/ScenarioReplayer.kt
+++ b/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/runner/ScenarioReplayer.kt
@@ -141,12 +141,13 @@ public suspend fun ComputeService.replay(
launch {
val task =
client.newTask(
+ entry.id,
entry.name,
nature,
Duration.ofMillis(entry.duration),
entry.deadline,
client.newFlavor(
- entry.name,
+ entry.id,
entry.cpuCount,
entry.memCapacity,
entry.gpuCount,
diff --git a/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/runner/ScenarioRunner.kt b/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/runner/ScenarioRunner.kt
index e14a06cc..ce99a61d 100644
--- a/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/runner/ScenarioRunner.kt
+++ b/opendc-experiments/opendc-experiments-base/src/main/kotlin/org/opendc/experiments/base/runner/ScenarioRunner.kt
@@ -101,7 +101,7 @@ public fun runScenario(
scalingPolicy,
scenario.workloadSpec.deferAll,
)
- var workload = workloadLoader.sampleByLoad(scenario.workloadSpec.sampleFraction)
+ val workload = workloadLoader.sampleByLoad(scenario.workloadSpec.sampleFraction)
val startTimeLong = workload.minOf { it.submissionTime }
val startTime = Duration.ofMillis(startTimeLong)
@@ -143,7 +143,7 @@ public fun runScenario(
service.setTasksExpected(workload.size)
service.setMetricReader(provisioner.getMonitor())
- var carbonModel: CarbonModel? = null
+ var carbonModel: CarbonModel?
if (provisioner.registry.hasService(serviceDomain, CarbonModel::class.java)) {
carbonModel = provisioner.registry.resolve(serviceDomain, CarbonModel::class.java)!!
@@ -167,12 +167,12 @@ public fun runScenario(
}
}
- service.replay(
- timeSource,
- workload,
- failureModelSpec = scenario.failureModelSpec,
- seed = seed,
- )
+// service.replay(
+// timeSource,
+// workload,
+// failureModelSpec = scenario.failureModelSpec,
+// seed = seed,
+// )
}
}
diff --git a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/BatteryTest.kt b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/BatteryTest.kt
index a85c84f3..f2ab32d3 100644
--- a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/BatteryTest.kt
+++ b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/BatteryTest.kt
@@ -42,10 +42,10 @@ class BatteryTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0),
),
submissionTime = "2022-01-01T00:00",
),
@@ -68,10 +68,10 @@ class BatteryTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0),
),
submissionTime = "2022-01-01T00:00",
),
@@ -95,10 +95,10 @@ class BatteryTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(20 * 60 * 1000, 1000.0, 1),
+ TraceFragment(20 * 60 * 1000, 1000.0),
),
submissionTime = "2022-01-01T00:00",
),
@@ -123,10 +123,10 @@ class BatteryTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(30 * 60 * 1000, 1000.0, 1),
+ TraceFragment(30 * 60 * 1000, 1000.0),
),
submissionTime = "2022-01-01T00:00",
),
@@ -150,10 +150,10 @@ class BatteryTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(30 * 60 * 1000, 1000.0, 1),
+ TraceFragment(30 * 60 * 1000, 1000.0),
),
submissionTime = "2022-01-01T00:00",
),
@@ -193,9 +193,9 @@ class BatteryTest {
repeat(numTasks) {
this.add(
createTestTask(
- name = "0",
+ id = 0,
fragments =
- arrayListOf(TraceFragment(10 * 60 * 1000, 1000.0, 1)),
+ arrayListOf(TraceFragment(10 * 60 * 1000, 1000.0)),
submissionTime = "2022-01-01T00:00",
),
)
@@ -218,10 +218,10 @@ class BatteryTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0),
),
submissionTime = "2022-01-01T00:00",
),
@@ -251,10 +251,10 @@ class BatteryTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0),
),
checkpointInterval = 60 * 1000L,
checkpointDuration = 1000L,
diff --git a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/CarbonTest.kt b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/CarbonTest.kt
index a0f5978f..f2ee3b53 100644
--- a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/CarbonTest.kt
+++ b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/CarbonTest.kt
@@ -46,10 +46,10 @@ class CarbonTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(120 * 60 * 1000, 1000.0, 1),
+ TraceFragment(120 * 60 * 1000, 1000.0),
),
submissionTime = "2022-01-01T00:00",
),
@@ -93,17 +93,17 @@ class CarbonTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(40 * 60 * 1000, 1000.0, 1),
- TraceFragment(40 * 60 * 1000, 2000.0, 1),
- TraceFragment(40 * 60 * 1000, 1000.0, 1),
- TraceFragment(40 * 60 * 1000, 2000.0, 1),
- TraceFragment(40 * 60 * 1000, 1000.0, 1),
- TraceFragment(40 * 60 * 1000, 2000.0, 1),
- TraceFragment(40 * 60 * 1000, 1000.0, 1),
- TraceFragment(40 * 60 * 1000, 2000.0, 1),
+ TraceFragment(40 * 60 * 1000, 1000.0),
+ TraceFragment(40 * 60 * 1000, 2000.0),
+ TraceFragment(40 * 60 * 1000, 1000.0),
+ TraceFragment(40 * 60 * 1000, 2000.0),
+ TraceFragment(40 * 60 * 1000, 1000.0),
+ TraceFragment(40 * 60 * 1000, 2000.0),
+ TraceFragment(40 * 60 * 1000, 1000.0),
+ TraceFragment(40 * 60 * 1000, 2000.0),
),
submissionTime = "2022-01-01T00:00",
),
@@ -162,10 +162,10 @@ class CarbonTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(60 * 60 * 1000, 1000.0, 1),
+ TraceFragment(60 * 60 * 1000, 1000.0),
),
submissionTime = "2022-01-01T00:00",
),
diff --git a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/DistributionPoliciesTest.kt b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/DistributionPoliciesTest.kt
index 68111bc1..256c067d 100644
--- a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/DistributionPoliciesTest.kt
+++ b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/DistributionPoliciesTest.kt
@@ -89,11 +89,13 @@ class DistributionPoliciesTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 2000.0, 1),
+ TraceFragment(10 * 60 * 1000, 0.0, 2000.0),
),
+ cpuCount = 0,
+ gpuCount = 1,
),
)
@@ -105,8 +107,8 @@ class DistributionPoliciesTest {
assertAll(
// single gpu
- { assertEquals(2000.0, singleMonitor.taskGpuDemands["0"]?.get(1), "Single GPU demand in task \"0\" should be 2000.0") },
- { assertEquals(4000.0, singleMonitor.taskGpuSupplied["0"]?.get(1), "Single GPU demand in task \"0\" should be 2000.0") },
+ { assertEquals(2000.0, singleMonitor.taskGpuDemands[0]?.get(1), "Single GPU demand in task \"0\" should be 2000.0") },
+ { assertEquals(4000.0, singleMonitor.taskGpuSupplied[0]?.get(1), "Single GPU demand in task \"0\" should be 2000.0") },
{
assertEquals(
4000.0,
@@ -122,8 +124,8 @@ class DistributionPoliciesTest {
)
},
// double gpu
- { assertEquals(2000.0, doubleMonitor.taskGpuDemands["0"]?.get(1), "Double GPU demand in task \"0\" should be 2000.0") },
- { assertEquals(4000.0, doubleMonitor.taskGpuSupplied["0"]?.get(1), "Double GPU supplied in task \"0\" should be 4000.0") },
+ { assertEquals(2000.0, doubleMonitor.taskGpuDemands[0]?.get(1), "Double GPU demand in task \"0\" should be 2000.0") },
+ { assertEquals(4000.0, doubleMonitor.taskGpuSupplied[0]?.get(1), "Double GPU supplied in task \"0\" should be 4000.0") },
{
assertEquals(
2000.0,
@@ -164,18 +166,22 @@ class DistributionPoliciesTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 4000.0, 2),
+ TraceFragment(10 * 60 * 1000, 0.0, 4000.0),
),
+ cpuCount = 0,
+ gpuCount = 2,
),
createTestTask(
- name = "1",
+ id = 1,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 4000.0, 2),
+ TraceFragment(10 * 60 * 1000, 0.0, 4000.0),
),
+ cpuCount = 0,
+ gpuCount = 2,
),
)
@@ -188,11 +194,11 @@ class DistributionPoliciesTest {
assertAll(
// single gpu
// task 0
- { assertEquals(4000.0, singleMonitor.taskGpuDemands["0"]?.get(1), "Single GPU demand in task \"0\" should be 4000.0") },
- { assertEquals(2000.0, singleMonitor.taskGpuSupplied["0"]?.get(1), "Single GPU supplied in task \"0\" should be 2000.0") },
+ { assertEquals(4000.0, singleMonitor.taskGpuDemands[0]?.get(1), "Single GPU demand in task \"0\" should be 4000.0") },
+ { assertEquals(2000.0, singleMonitor.taskGpuSupplied[0]?.get(1), "Single GPU supplied in task \"0\" should be 2000.0") },
// task 1
- { assertEquals(4000.0, singleMonitor.taskGpuDemands["1"]?.get(1), "Single GPU demand in task \"0\" should be 4000.0") },
- { assertEquals(2000.0, singleMonitor.taskGpuSupplied["1"]?.get(1), "Single GPU supplied in task \"0\" should be 2000.0") },
+ { assertEquals(4000.0, singleMonitor.taskGpuDemands[1]?.get(1), "Single GPU demand in task \"0\" should be 4000.0") },
+ { assertEquals(2000.0, singleMonitor.taskGpuSupplied[1]?.get(1), "Single GPU supplied in task \"0\" should be 2000.0") },
// host
{
assertEquals(
@@ -210,11 +216,11 @@ class DistributionPoliciesTest {
},
// double gpu
// task 0
- { assertEquals(4000.0, doubleMonitor.taskGpuDemands["0"]?.get(1), "Double GPU demand in task \"0\" should be 4000.0") },
- { assertEquals(2000.0, doubleMonitor.taskGpuSupplied["0"]?.get(1), "Double GPU supply in task \"0\" should be 2000.0") },
+ { assertEquals(4000.0, doubleMonitor.taskGpuDemands[0]?.get(1), "Double GPU demand in task \"0\" should be 4000.0") },
+ { assertEquals(2000.0, doubleMonitor.taskGpuSupplied[0]?.get(1), "Double GPU supply in task \"0\" should be 2000.0") },
// task 1
- { assertEquals(4000.0, doubleMonitor.taskGpuDemands["1"]?.get(1), "Double GPU demand in task \"0\" should be 4000.0") },
- { assertEquals(2000.0, doubleMonitor.taskGpuSupplied["1"]?.get(1), "Double GPU supply in task \"0\" should be 2000.0") },
+ { assertEquals(4000.0, doubleMonitor.taskGpuDemands[1]?.get(1), "Double GPU demand in task \"0\" should be 4000.0") },
+ { assertEquals(2000.0, doubleMonitor.taskGpuSupplied[1]?.get(1), "Double GPU supply in task \"0\" should be 2000.0") },
// host
{
assertEquals(
@@ -256,11 +262,13 @@ class DistributionPoliciesTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 4000.0, 2),
+ TraceFragment(10 * 60 * 1000, 0.0, 4000.0),
),
+ cpuCount = 0,
+ gpuCount = 2,
),
)
@@ -269,8 +277,8 @@ class DistributionPoliciesTest {
val monitor = runTest(topology, workload)
assertAll(
- { assertEquals(4000.0, monitor.taskGpuDemands["0"]?.get(1), "Task GPU demand should be 4000.0") },
- { assertEquals(1000.0, monitor.taskGpuSupplied["0"]?.get(1), "Task GPU supplied should be 1000.0") },
+ { assertEquals(4000.0, monitor.taskGpuDemands[0]?.get(1), "Task GPU demand should be 4000.0") },
+ { assertEquals(1000.0, monitor.taskGpuSupplied[0]?.get(1), "Task GPU supplied should be 1000.0") },
// Host
{
assertEquals(
@@ -300,11 +308,13 @@ class DistributionPoliciesTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 6000.0, 2),
+ TraceFragment(10 * 60 * 1000, 0.0, 6000.0),
),
+ cpuCount = 0,
+ gpuCount = 2,
),
)
@@ -316,8 +326,8 @@ class DistributionPoliciesTest {
// Fixed share ratio of 0.5 means each GPU gets 50% of available capacity = 2000.0 each
// Total supplied should be 4000.0 (limited by total capacity)
assertAll(
- { assertEquals(6000.0, monitor.taskGpuDemands["0"]?.get(1), "Task GPU demand should be 6000.0") },
- { assertEquals(1000.0, monitor.taskGpuSupplied["0"]?.get(1), "Task GPU supplied should be 1000.0 (limited by the capacity)") },
+ { assertEquals(6000.0, monitor.taskGpuDemands[0]?.get(1), "Task GPU demand should be 6000.0") },
+ { assertEquals(1000.0, monitor.taskGpuSupplied[0]?.get(1), "Task GPU supplied should be 1000.0 (limited by the capacity)") },
// Host
{
assertEquals(
@@ -359,18 +369,22 @@ class DistributionPoliciesTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 3000.0, 2),
+ TraceFragment(10 * 60 * 1000, 0.0, 3000.0),
),
+ cpuCount = 0,
+ gpuCount = 2,
),
createTestTask(
- name = "1",
+ id = 1,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 3000.0, 2),
+ TraceFragment(10 * 60 * 1000, 0.0, 3000.0),
),
+ cpuCount = 0,
+ gpuCount = 2,
),
)
@@ -383,11 +397,11 @@ class DistributionPoliciesTest {
// So each task gets 1000.0, distributed as 1000.0 per GPU (50% share ratio)
assertAll(
// Task 0
- { assertEquals(3000.0, monitor.taskGpuDemands["0"]?.get(1), "Task 0 GPU demand should be 3000.0") },
- { assertEquals(1000.0, monitor.taskGpuSupplied["0"]?.get(1), "Task 0 GPU supplied should be 1000.0") },
+ { assertEquals(3000.0, monitor.taskGpuDemands[0]?.get(1), "Task 0 GPU demand should be 3000.0") },
+ { assertEquals(1000.0, monitor.taskGpuSupplied[0]?.get(1), "Task 0 GPU supplied should be 1000.0") },
// Task 1
- { assertEquals(3000.0, monitor.taskGpuDemands["1"]?.get(1), "Task 1 GPU demand should be 3000.0") },
- { assertEquals(1000.0, monitor.taskGpuSupplied["1"]?.get(1), "Task 1 GPU supplied should be 1000.0") },
+ { assertEquals(3000.0, monitor.taskGpuDemands[1]?.get(1), "Task 1 GPU demand should be 3000.0") },
+ { assertEquals(1000.0, monitor.taskGpuSupplied[1]?.get(1), "Task 1 GPU supplied should be 1000.0") },
// Host
{ assertEquals(1000.0, monitor.hostGpuDemands["DualGpuHost"]?.get(1)?.get(0), "GPU 0 total demand at host should be 1000.0") },
{
@@ -417,11 +431,13 @@ class DistributionPoliciesTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 1500.0, 1),
+ TraceFragment(10 * 60 * 1000, 0.0, 1500.0),
),
+ cpuCount = 0,
+ gpuCount = 1,
),
)
@@ -433,8 +449,8 @@ class DistributionPoliciesTest {
assertAll(
// single gpu - should satisfy demand and utilize remaining capacity
- { assertEquals(1500.0, singleMonitor.taskGpuDemands["0"]?.get(1), "Single GPU demand in task \"0\" should be 1500.0") },
- { assertEquals(1500.0, singleMonitor.taskGpuSupplied["0"]?.get(1)) { "Single GPU should supply the demanded 1500.0" } },
+ { assertEquals(1500.0, singleMonitor.taskGpuDemands[0]?.get(1), "Single GPU demand in task \"0\" should be 1500.0") },
+ { assertEquals(1500.0, singleMonitor.taskGpuSupplied[0]?.get(1)) { "Single GPU should supply the demanded 1500.0" } },
// Host
{
assertEquals(
@@ -451,8 +467,8 @@ class DistributionPoliciesTest {
)
},
// double gpu - should distribute across both GPUs and utilize remaining capacity
- { assertEquals(1500.0, doubleMonitor.taskGpuDemands["0"]?.get(1), "Double GPU demand in task \"0\" should be 1500.0") },
- { assertEquals(1500.0, doubleMonitor.taskGpuSupplied["0"]?.get(1), "Double GPU should supply the demanded 1500.0") },
+ { assertEquals(1500.0, doubleMonitor.taskGpuDemands[0]?.get(1), "Double GPU demand in task \"0\" should be 1500.0") },
+ { assertEquals(1500.0, doubleMonitor.taskGpuSupplied[0]?.get(1), "Double GPU should supply the demanded 1500.0") },
// Host
{
assertEquals(
@@ -494,18 +510,22 @@ class DistributionPoliciesTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 3000.0, 2),
+ TraceFragment(10 * 60 * 1000, 0.0, 3000.0),
),
+ cpuCount = 0,
+ gpuCount = 2,
),
createTestTask(
- name = "1",
+ id = 1,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 2500.0, 2),
+ TraceFragment(10 * 60 * 1000, 0.0, 2500.0),
),
+ cpuCount = 0,
+ gpuCount = 2,
),
)
@@ -517,11 +537,11 @@ class DistributionPoliciesTest {
// Best effort should distribute proportionally based on demand while using round-robin
assertAll(
// Task 0
- { assertEquals(3000.0, monitor.taskGpuDemands["0"]?.get(1), "Task 0 GPU demand should be 3000.0") },
- { assertEquals(3000.0, monitor.taskGpuSupplied["0"]?.get(1), "Task 0 GPU supply should be 1000.0") },
+ { assertEquals(3000.0, monitor.taskGpuDemands[0]?.get(1), "Task 0 GPU demand should be 3000.0") },
+ { assertEquals(3000.0, monitor.taskGpuSupplied[0]?.get(1), "Task 0 GPU supply should be 1000.0") },
// Task 1
- { assertEquals(2500.0, monitor.taskGpuDemands["1"]?.get(1), "Task 1 GPU demand should be 2500.0") },
- { assertEquals(1000.0, monitor.taskGpuSupplied["1"]?.get(1), "Task 0 GPU supply should be 1000.0") },
+ { assertEquals(2500.0, monitor.taskGpuDemands[1]?.get(1), "Task 1 GPU demand should be 2500.0") },
+ { assertEquals(1000.0, monitor.taskGpuSupplied[1]?.get(1), "Task 0 GPU supply should be 1000.0") },
// Host
{ assertEquals(2750.0, monitor.hostGpuDemands["DualGpuHost"]?.get(1)?.get(0), "GPU 0 demand at host should be 2000.0") },
{ assertEquals(2000.0, monitor.hostGpuSupplied["DualGpuHost"]?.get(1)?.get(0), "GPU 0 supplied at host should be 2000.0") },
@@ -539,11 +559,13 @@ class DistributionPoliciesTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 0.0, 1000.0),
),
+ cpuCount = 0,
+ gpuCount = 1,
),
)
@@ -554,8 +576,8 @@ class DistributionPoliciesTest {
// 1. Satisfy the demand
// 2. Utilize remaining capacity efficiently
assertAll(
- { assertEquals(1000.0, monitor.taskGpuDemands["0"]?.get(1), "Task GPU demand should be 1000.0") },
- { assertEquals(1000.0, monitor.taskGpuSupplied["0"]?.get(1), "Task GPU supplied should be 1000.0") },
+ { assertEquals(1000.0, monitor.taskGpuDemands[0]?.get(1), "Task GPU demand should be 1000.0") },
+ { assertEquals(1000.0, monitor.taskGpuSupplied[0]?.get(1), "Task GPU supplied should be 1000.0") },
// host
{ assertEquals(1000.0, monitor.hostGpuDemands["DualGpuHost"]?.get(1)?.get(0), "GPU 0 demand at host should be 1000.0") },
{ assertEquals(1000.0, monitor.hostGpuSupplied["DualGpuHost"]?.get(1)?.get(0), "GPU 0 supplied at host should be 1000.0") },
@@ -573,18 +595,22 @@ class DistributionPoliciesTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 3500.0, 2),
+ TraceFragment(10 * 60 * 1000, 0.0, 3500.0),
),
+ cpuCount = 0,
+ gpuCount = 2,
),
createTestTask(
- name = "1",
+ id = 1,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 500.0, 2),
+ TraceFragment(10 * 60 * 1000, 0.0, 500.0),
),
+ cpuCount = 0,
+ gpuCount = 2,
),
)
@@ -595,10 +621,10 @@ class DistributionPoliciesTest {
// Best effort should prioritize the high-demand task differently than equal share
assertAll(
// Best effort should allocate more to high-demand task compared to equal share
- { assertEquals(3500.0, monitor.taskGpuDemands["0"]?.get(1), "Task 0 demand should be 3500.0") },
- { assertEquals(3500.0, monitor.taskGpuDemands["0"]?.get(1), "Task 0 supply should be 3500.0") },
- { assertEquals(500.0, monitor.taskGpuDemands["1"]?.get(1), "Task 1 demand should be 500.0") },
- { assertEquals(500.0, monitor.taskGpuSupplied["1"]?.get(1), "Task 1 supply should be 500.0") },
+ { assertEquals(3500.0, monitor.taskGpuDemands[0]?.get(1), "Task 0 demand should be 3500.0") },
+ { assertEquals(3500.0, monitor.taskGpuDemands[0]?.get(1), "Task 0 supply should be 3500.0") },
+ { assertEquals(500.0, monitor.taskGpuDemands[1]?.get(1), "Task 1 demand should be 500.0") },
+ { assertEquals(500.0, monitor.taskGpuSupplied[1]?.get(1), "Task 1 supply should be 500.0") },
// Host
{ assertEquals(2000.0, monitor.hostGpuDemands["DualGpuHost"]?.get(1)?.get(0), "GPU 0 demand at host should be 2000.0") },
{ assertEquals(2000.0, monitor.hostGpuSupplied["DualGpuHost"]?.get(1)?.get(0), "GPU 0 supplied at host should be 2000.0") },
@@ -616,25 +642,31 @@ class DistributionPoliciesTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 2000.0, 2),
+ TraceFragment(10 * 60 * 1000, 0.0, 2000.0),
),
+ cpuCount = 0,
+ gpuCount = 2,
),
createTestTask(
- name = "1",
+ id = 1,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 2000.0, 2),
+ TraceFragment(10 * 60 * 1000, 0.0, 2000.0),
),
+ cpuCount = 0,
+ gpuCount = 2,
),
createTestTask(
- name = "2",
+ id = 2,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 2000.0, 2),
+ TraceFragment(10 * 60 * 1000, 0.0, 2000.0),
),
+ cpuCount = 0,
+ gpuCount = 2,
),
)
@@ -645,21 +677,21 @@ class DistributionPoliciesTest {
// Best effort should distribute fairly among all tasks in a round-robin manner
assertAll(
// Task Demands at start
- { assertEquals(2000.0, monitor.taskGpuDemands["0"]?.get(1), "Task 0 demand should be 2000.0") },
- { assertEquals(2000.0, monitor.taskGpuDemands["1"]?.get(1), "Task 1 demand should be 2000.0") },
- { assertEquals(2000.0, monitor.taskGpuDemands["2"]?.get(1), "Task 2 demand should be 2000.0") },
+ { assertEquals(2000.0, monitor.taskGpuDemands[0]?.get(1), "Task 0 demand should be 2000.0") },
+ { assertEquals(2000.0, monitor.taskGpuDemands[1]?.get(1), "Task 1 demand should be 2000.0") },
+ { assertEquals(2000.0, monitor.taskGpuDemands[2]?.get(1), "Task 2 demand should be 2000.0") },
// Task supplies at start
- { assertEquals(2000.0, monitor.taskGpuSupplied["0"]?.get(1), "Task 0 supply at the start should be 2000.0") },
- { assertEquals(0.0, monitor.taskGpuSupplied["1"]?.get(1), "Task 1 supply at the start should be 2000.0") },
- { assertEquals(2000.0, monitor.taskGpuSupplied["2"]?.get(1), "Task 2 supply at the start should be 0.0") },
+ { assertEquals(2000.0, monitor.taskGpuSupplied[0]?.get(1), "Task 0 supply at the start should be 2000.0") },
+ { assertEquals(0.0, monitor.taskGpuSupplied[1]?.get(1), "Task 1 supply at the start should be 2000.0") },
+ { assertEquals(2000.0, monitor.taskGpuSupplied[2]?.get(1), "Task 2 supply at the start should be 0.0") },
// Task supplies second step
- { assertEquals(0.0, monitor.taskGpuSupplied["0"]?.get(2), "Task 0 supply at the second step should be 2000.0") },
- { assertEquals(2000.0, monitor.taskGpuSupplied["1"]?.get(2), "Task 1 supply at the second step should be 0.0") },
- { assertEquals(2000.0, monitor.taskGpuSupplied["2"]?.get(2), "Task 2 supply at the second step should be 2000.0") },
+ { assertEquals(0.0, monitor.taskGpuSupplied[0]?.get(2), "Task 0 supply at the second step should be 2000.0") },
+ { assertEquals(2000.0, monitor.taskGpuSupplied[1]?.get(2), "Task 1 supply at the second step should be 0.0") },
+ { assertEquals(2000.0, monitor.taskGpuSupplied[2]?.get(2), "Task 2 supply at the second step should be 2000.0") },
// Task supplies third step
- { assertEquals(2000.0, monitor.taskGpuSupplied["0"]?.get(3), "Task 0 supply at the third step should be 2000.0") },
- { assertEquals(2000.0, monitor.taskGpuSupplied["1"]?.get(3), "Task 1 supply at the third step should be 2000.0") },
- { assertEquals(0.0, monitor.taskGpuSupplied["2"]?.get(3), "Task 2 supply at the third step should be 0.0") },
+ { assertEquals(2000.0, monitor.taskGpuSupplied[0]?.get(3), "Task 0 supply at the third step should be 2000.0") },
+ { assertEquals(2000.0, monitor.taskGpuSupplied[1]?.get(3), "Task 1 supply at the third step should be 2000.0") },
+ { assertEquals(0.0, monitor.taskGpuSupplied[2]?.get(3), "Task 2 supply at the third step should be 0.0") },
// Host
// At start
{ assertEquals(3000.0, monitor.hostGpuDemands["DualGpuHost"]?.get(1)?.get(0), "GPU 0 demand at host should be 2000.0") },
@@ -689,18 +721,22 @@ class DistributionPoliciesTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 1500.0, 1),
+ TraceFragment(10 * 60 * 1000, 0.0, 1500.0),
),
+ cpuCount = 0,
+ gpuCount = 2,
),
createTestTask(
- name = "1",
+ id = 1,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 0.0, 1000.0),
),
+ cpuCount = 0,
+ gpuCount = 2,
),
)
@@ -711,11 +747,11 @@ class DistributionPoliciesTest {
// Total demand (2500.0) is less than total capacity (4000.0), so all should be satisfied
assertAll(
// Task demands should remain as requested
- { assertEquals(1500.0, monitor.taskGpuDemands["0"]?.get(1), "Task 0 GPU demand should be 1500.0") },
- { assertEquals(1000.0, monitor.taskGpuDemands["1"]?.get(1), "Task 1 GPU demand should be 1000.0") },
+ { assertEquals(1500.0, monitor.taskGpuDemands[0]?.get(1), "Task 0 GPU demand should be 1500.0") },
+ { assertEquals(1000.0, monitor.taskGpuDemands[1]?.get(1), "Task 1 GPU demand should be 1000.0") },
// All tasks should be fully satisfied
- { assertEquals(1500.0, monitor.taskGpuSupplied["0"]?.get(1), "Task 0 GPU supply should be 1500.0") },
- { assertEquals(1000.0, monitor.taskGpuSupplied["1"]?.get(1), "Task 1 GPU supply should be 1000.0") },
+ { assertEquals(1500.0, monitor.taskGpuSupplied[0]?.get(1), "Task 0 GPU supply should be 1500.0") },
+ { assertEquals(1000.0, monitor.taskGpuSupplied[1]?.get(1), "Task 1 GPU supply should be 1000.0") },
// First GPU should handle both tasks (total 2500.0, within its 2000.0 capacity limit per task)
{ assertEquals(2000.0, monitor.hostGpuDemands["DualGpuHost"]?.get(1)?.get(0), "GPU 0 demand should be 2000.0") },
{ assertEquals(2000.0, monitor.hostGpuSupplied["DualGpuHost"]?.get(1)?.get(0), "GPU 0 supply should be 2000.0") },
@@ -735,25 +771,31 @@ class DistributionPoliciesTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 2000.0, 1),
+ TraceFragment(10 * 60 * 1000, 0.0, 2000.0),
),
+ cpuCount = 0,
+ gpuCount = 1,
),
createTestTask(
- name = "1",
+ id = 1,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 2000.0, 1),
+ TraceFragment(10 * 60 * 1000, 0.0, 2000.0),
),
+ cpuCount = 0,
+ gpuCount = 1,
),
createTestTask(
- name = "2",
+ id = 2,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 1500.0, 1),
+ TraceFragment(10 * 60 * 1000, 0.0, 1500.0),
),
+ cpuCount = 0,
+ gpuCount = 1,
),
)
@@ -764,14 +806,14 @@ class DistributionPoliciesTest {
// only tasks that can fit on individual GPUs should be satisfied
assertAll(
// Task demands should remain as requested
- { assertEquals(2000.0, monitor.taskGpuDemands["0"]?.get(1), "Task 0 GPU demand should be 2000.0") },
- { assertEquals(2000.0, monitor.taskGpuDemands["1"]?.get(1), "Task 1 GPU demand should be 2000.0") },
- { assertEquals(1500.0, monitor.taskGpuDemands["2"]?.get(1), "Task 2 GPU demand should be 1500.0") },
+ { assertEquals(2000.0, monitor.taskGpuDemands[0]?.get(1), "Task 0 GPU demand should be 2000.0") },
+ { assertEquals(2000.0, monitor.taskGpuDemands[1]?.get(1), "Task 1 GPU demand should be 2000.0") },
+ { assertEquals(1500.0, monitor.taskGpuDemands[2]?.get(1), "Task 2 GPU demand should be 1500.0") },
// First two tasks should be satisfied (each fits on one GPU)
- { assertEquals(2000.0, monitor.taskGpuSupplied["0"]?.get(1), "Task 0 should be fully satisfied") },
- { assertEquals(2000.0, monitor.taskGpuSupplied["1"]?.get(1), "Task 1 should be fully satisfied") },
+ { assertEquals(2000.0, monitor.taskGpuSupplied[0]?.get(1), "Task 0 should be fully satisfied") },
+ { assertEquals(2000.0, monitor.taskGpuSupplied[1]?.get(1), "Task 1 should be fully satisfied") },
// Third task should receive no supply as no single GPU can satisfy it after first two are allocated
- { assertEquals(0.0, monitor.taskGpuSupplied["2"]?.get(1), "Task 2 should receive no supply") },
+ { assertEquals(0.0, monitor.taskGpuSupplied[2]?.get(1), "Task 2 should receive no supply") },
// Both GPUs should be fully utilized by the first two tasks
{ assertEquals(2000.0, monitor.hostGpuDemands["DualGpuHost"]?.get(1)?.get(0), "GPU 0 should have 2000.0 demand") },
{ assertEquals(2000.0, monitor.hostGpuSupplied["DualGpuHost"]?.get(1)?.get(0), "GPU 0 should supply 2000.0") },
diff --git a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/ExperimentTest.kt b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/ExperimentTest.kt
index 582fdbee..28096bb8 100644
--- a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/ExperimentTest.kt
+++ b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/ExperimentTest.kt
@@ -52,11 +52,12 @@ class ExperimentTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0),
),
+ cpuCount = 1,
),
)
@@ -88,18 +89,20 @@ class ExperimentTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0),
),
+ cpuCount = 1,
),
createTestTask(
- name = "1",
+ id = 1,
fragments =
arrayListOf(
- TraceFragment(5 * 60 * 1000, 2000.0, 1),
+ TraceFragment(5 * 60 * 1000, 2000.0),
),
+ cpuCount = 1,
),
)
@@ -139,18 +142,20 @@ class ExperimentTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0),
),
+ cpuCount = 1,
),
createTestTask(
- name = "1",
+ id = 1,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0),
),
+ cpuCount = 1,
),
)
@@ -182,18 +187,20 @@ class ExperimentTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0),
),
+ cpuCount = 1,
),
createTestTask(
- name = "1",
+ id = 1,
fragments =
arrayListOf(
- TraceFragment(5 * 60 * 1000, 2000.0, 1),
+ TraceFragment(5 * 60 * 1000, 2000.0),
),
+ cpuCount = 1,
submissionTime = "1970-01-01T00:20",
),
)
@@ -226,11 +233,13 @@ class ExperimentTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 0.0, 1000.0),
),
+ cpuCount = 0,
+ gpuCount = 1,
),
)
@@ -272,11 +281,13 @@ class ExperimentTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0, 1000.0),
),
+ cpuCount = 1,
+ gpuCount = 1,
),
)
@@ -317,11 +328,13 @@ class ExperimentTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1, 2000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0, 2000.0),
),
+ cpuCount = 1,
+ gpuCount = 1,
),
)
@@ -361,11 +374,13 @@ class ExperimentTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 2000.0, 1, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 2000.0, 1000.0),
),
+ cpuCount = 1,
+ gpuCount = 1,
),
)
val topology = createTopology("Gpus/single_gpu_no_vendor_no_memory.json")
@@ -404,18 +419,22 @@ class ExperimentTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0, 1000.0),
),
+ cpuCount = 1,
+ gpuCount = 1,
),
createTestTask(
- name = "1",
+ id = 1,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0, 1000.0),
),
+ cpuCount = 1,
+ gpuCount = 1,
),
)
@@ -455,18 +474,22 @@ class ExperimentTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1, 0.0, 0),
+ TraceFragment(10 * 60 * 1000, 1000.0, 0.0),
),
+ cpuCount = 1,
+ gpuCount = 0,
),
createTestTask(
- name = "1",
+ id = 1,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 0.0, 1000.0),
),
+ cpuCount = 0,
+ gpuCount = 1,
),
)
diff --git a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/FailuresAndCheckpointingTest.kt b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/FailuresAndCheckpointingTest.kt
index 4278ca41..892ea1c1 100644
--- a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/FailuresAndCheckpointingTest.kt
+++ b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/FailuresAndCheckpointingTest.kt
@@ -50,11 +50,12 @@ class FailuresAndCheckpointingTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0),
),
+ cpuCount = 1,
),
)
@@ -90,11 +91,12 @@ class FailuresAndCheckpointingTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0),
),
+ cpuCount = 1,
),
)
@@ -133,11 +135,12 @@ class FailuresAndCheckpointingTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0),
),
+ cpuCount = 1,
),
)
@@ -178,11 +181,12 @@ class FailuresAndCheckpointingTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0),
),
+ cpuCount = 1,
),
)
@@ -237,11 +241,12 @@ class FailuresAndCheckpointingTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0),
),
+ cpuCount = 1,
checkpointInterval = 60 * 1000L,
checkpointDuration = 1000L,
),
@@ -291,12 +296,13 @@ class FailuresAndCheckpointingTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 2000.0, 1),
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 2000.0),
+ TraceFragment(10 * 60 * 1000, 1000.0),
),
+ cpuCount = 1,
checkpointInterval = 60 * 1000L,
checkpointDuration = 1000L,
),
@@ -344,12 +350,13 @@ class FailuresAndCheckpointingTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
- TraceFragment(10 * 60 * 1000, 2000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0),
+ TraceFragment(10 * 60 * 1000, 2000.0),
),
+ cpuCount = 1,
checkpointInterval = 60 * 1000L,
checkpointDuration = 1000L,
),
@@ -393,11 +400,12 @@ class FailuresAndCheckpointingTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0),
),
+ cpuCount = 1,
checkpointInterval = 60 * 1000L,
checkpointDuration = 1000L,
checkpointIntervalScaling = 1.5,
@@ -435,11 +443,12 @@ class FailuresAndCheckpointingTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0),
),
+ cpuCount = 1,
checkpointInterval = 60 * 1000L,
checkpointDuration = 1000L,
),
@@ -477,11 +486,12 @@ class FailuresAndCheckpointingTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0),
),
+ cpuCount = 1,
checkpointInterval = 60 * 1000L,
checkpointDuration = 1000L,
),
diff --git a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/FlowDistributorTest.kt b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/FlowDistributorTest.kt
index 606ba571..53ff068e 100644
--- a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/FlowDistributorTest.kt
+++ b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/FlowDistributorTest.kt
@@ -43,12 +43,13 @@ class FlowDistributorTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
- TraceFragment(10 * 60 * 1000, 2000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0),
+ TraceFragment(10 * 60 * 1000, 2000.0),
),
+ cpuCount = 1,
),
)
val topology = createTopology("single_1_2000.json")
@@ -56,10 +57,10 @@ class FlowDistributorTest {
val monitor = runTest(topology, workload)
assertAll(
- { assertEquals(1000.0, monitor.taskCpuDemands["0"]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(2000.0, monitor.taskCpuDemands["0"]?.get(10)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskCpuSupplied["0"]?.get(1)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(2000.0, monitor.taskCpuSupplied["0"]?.get(10)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuDemands[0]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuDemands[0]?.get(10)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuSupplied[0]?.get(1)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied[0]?.get(10)) { "The cpu used by task 0 is incorrect" } },
{ assertEquals(1000.0, monitor.hostCpuDemands["H01"]?.get(1)) { "The cpu demanded by the host is incorrect" } },
{ assertEquals(2000.0, monitor.hostCpuDemands["H01"]?.get(10)) { "The cpu demanded by the host is incorrect" } },
{ assertEquals(1000.0, monitor.hostCpuSupplied["H01"]?.get(1)) { "The cpu used by the host is incorrect" } },
@@ -78,12 +79,13 @@ class FlowDistributorTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 3000.0, 1),
- TraceFragment(10 * 60 * 1000, 4000.0, 1),
+ TraceFragment(10 * 60 * 1000, 3000.0),
+ TraceFragment(10 * 60 * 1000, 4000.0),
),
+ cpuCount = 1,
),
)
val topology = createTopology("single_1_2000.json")
@@ -91,10 +93,10 @@ class FlowDistributorTest {
val monitor = runTest(topology, workload)
assertAll(
- { assertEquals(3000.0, monitor.taskCpuDemands["0"]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(4000.0, monitor.taskCpuDemands["0"]?.get(10)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(2000.0, monitor.taskCpuSupplied["0"]?.get(1)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(2000.0, monitor.taskCpuSupplied["0"]?.get(10)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuDemands[0]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(4000.0, monitor.taskCpuDemands[0]?.get(10)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied[0]?.get(1)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied[0]?.get(10)) { "The cpu used by task 0 is incorrect" } },
{ assertEquals(3000.0, monitor.hostCpuDemands["H01"]?.get(1)) { "The cpu demanded by the host is incorrect" } },
{ assertEquals(4000.0, monitor.hostCpuDemands["H01"]?.get(10)) { "The cpu demanded by the host is incorrect" } },
{ assertEquals(2000.0, monitor.hostCpuSupplied["H01"]?.get(1)) { "The cpu used by the host is incorrect" } },
@@ -113,12 +115,13 @@ class FlowDistributorTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
- TraceFragment(10 * 60 * 1000, 4000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0),
+ TraceFragment(10 * 60 * 1000, 4000.0),
),
+ cpuCount = 1,
),
)
val topology = createTopology("single_1_2000.json")
@@ -126,10 +129,10 @@ class FlowDistributorTest {
val monitor = runTest(topology, workload)
assertAll(
- { assertEquals(1000.0, monitor.taskCpuDemands["0"]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(4000.0, monitor.taskCpuDemands["0"]?.get(10)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskCpuSupplied["0"]?.get(1)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(2000.0, monitor.taskCpuSupplied["0"]?.get(10)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuDemands[0]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(4000.0, monitor.taskCpuDemands[0]?.get(10)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuSupplied[0]?.get(1)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied[0]?.get(10)) { "The cpu used by task 0 is incorrect" } },
{ assertEquals(1000.0, monitor.hostCpuDemands["H01"]?.get(1)) { "The cpu demanded by the host is incorrect" } },
{ assertEquals(4000.0, monitor.hostCpuDemands["H01"]?.get(10)) { "The cpu demanded by the host is incorrect" } },
{ assertEquals(1000.0, monitor.hostCpuSupplied["H01"]?.get(1)) { "The cpu used by the host is incorrect" } },
@@ -148,12 +151,13 @@ class FlowDistributorTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 4000.0, 1),
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 4000.0),
+ TraceFragment(10 * 60 * 1000, 1000.0),
),
+ cpuCount = 1,
),
)
val topology = createTopology("single_1_2000.json")
@@ -161,10 +165,10 @@ class FlowDistributorTest {
val monitor = runTest(topology, workload)
assertAll(
- { assertEquals(4000.0, monitor.taskCpuDemands["0"]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskCpuDemands["0"]?.get(10)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(2000.0, monitor.taskCpuSupplied["0"]?.get(1)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskCpuSupplied["0"]?.get(10)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(4000.0, monitor.taskCpuDemands[0]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuDemands[0]?.get(10)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied[0]?.get(1)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuSupplied[0]?.get(10)) { "The cpu used by task 0 is incorrect" } },
{ assertEquals(4000.0, monitor.hostCpuDemands["H01"]?.get(1)) { "The cpu demanded by the host is incorrect" } },
{ assertEquals(1000.0, monitor.hostCpuDemands["H01"]?.get(10)) { "The cpu demanded by the host is incorrect" } },
{ assertEquals(2000.0, monitor.hostCpuSupplied["H01"]?.get(1)) { "The cpu used by the host is incorrect" } },
@@ -183,12 +187,13 @@ class FlowDistributorTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 4000.0, 1),
- TraceFragment(10 * 60 * 1000, 2000.0, 1),
+ TraceFragment(10 * 60 * 1000, 4000.0),
+ TraceFragment(10 * 60 * 1000, 2000.0),
),
+ cpuCount = 1,
),
)
val topology = createTopology("single_1_2000.json")
@@ -196,10 +201,10 @@ class FlowDistributorTest {
val monitor = runTest(topology, workload)
assertAll(
- { assertEquals(4000.0, monitor.taskCpuDemands["0"]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(2000.0, monitor.taskCpuDemands["0"]?.get(10)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(2000.0, monitor.taskCpuSupplied["0"]?.get(1)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(2000.0, monitor.taskCpuSupplied["0"]?.get(10)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(4000.0, monitor.taskCpuDemands[0]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuDemands[0]?.get(10)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied[0]?.get(1)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied[0]?.get(10)) { "The cpu used by task 0 is incorrect" } },
{ assertEquals(4000.0, monitor.hostCpuDemands["H01"]?.get(1)) { "The cpu demanded by the host is incorrect" } },
{ assertEquals(2000.0, monitor.hostCpuDemands["H01"]?.get(10)) { "The cpu demanded by the host is incorrect" } },
{ assertEquals(2000.0, monitor.hostCpuSupplied["H01"]?.get(1)) { "The cpu used by the host is incorrect" } },
@@ -218,20 +223,22 @@ class FlowDistributorTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
- TraceFragment(10 * 60 * 1000, 3000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0),
+ TraceFragment(10 * 60 * 1000, 3000.0),
),
+ cpuCount = 1,
),
createTestTask(
- name = "1",
+ id = 1,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 3000.0, 1),
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 3000.0),
+ TraceFragment(10 * 60 * 1000, 1000.0),
),
+ cpuCount = 1,
),
)
val topology = createTopology("single_2_2000.json")
@@ -239,14 +246,14 @@ class FlowDistributorTest {
val monitor = runTest(topology, workload)
assertAll(
- { assertEquals(1000.0, monitor.taskCpuDemands["0"]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(3000.0, monitor.taskCpuDemands["0"]?.get(10)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskCpuSupplied["0"]?.get(1)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(3000.0, monitor.taskCpuSupplied["0"]?.get(10)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(3000.0, monitor.taskCpuDemands["1"]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskCpuDemands["1"]?.get(10)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(3000.0, monitor.taskCpuSupplied["1"]?.get(1)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskCpuSupplied["1"]?.get(10)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuDemands[0]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuDemands[0]?.get(10)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuSupplied[0]?.get(1)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuSupplied[0]?.get(10)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuDemands[1]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuDemands[1]?.get(10)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuSupplied[1]?.get(1)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuSupplied[1]?.get(10)) { "The cpu used by task 0 is incorrect" } },
{ assertEquals(4000.0, monitor.hostCpuDemands["H01"]?.get(1)) { "The cpu demanded by the host is incorrect" } },
{ assertEquals(4000.0, monitor.hostCpuDemands["H01"]?.get(10)) { "The cpu demanded by the host is incorrect" } },
{ assertEquals(4000.0, monitor.hostCpuSupplied["H01"]?.get(1)) { "The cpu used by the host is incorrect" } },
@@ -265,20 +272,22 @@ class FlowDistributorTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 6000.0, 1),
- TraceFragment(10 * 60 * 1000, 5000.0, 1),
+ TraceFragment(10 * 60 * 1000, 6000.0),
+ TraceFragment(10 * 60 * 1000, 5000.0),
),
+ cpuCount = 1,
),
createTestTask(
- name = "1",
+ id = 1,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 5000.0, 1),
- TraceFragment(10 * 60 * 1000, 6000.0, 1),
+ TraceFragment(10 * 60 * 1000, 5000.0),
+ TraceFragment(10 * 60 * 1000, 6000.0),
),
+ cpuCount = 1,
),
)
val topology = createTopology("single_2_2000.json")
@@ -286,14 +295,14 @@ class FlowDistributorTest {
val monitor = runTest(topology, workload)
assertAll(
- { assertEquals(6000.0, monitor.taskCpuDemands["0"]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(5000.0, monitor.taskCpuDemands["0"]?.get(10)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(2000.0, monitor.taskCpuSupplied["0"]?.get(1)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(2000.0, monitor.taskCpuSupplied["0"]?.get(10)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(5000.0, monitor.taskCpuDemands["1"]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(6000.0, monitor.taskCpuDemands["1"]?.get(10)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(2000.0, monitor.taskCpuSupplied["1"]?.get(1)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(2000.0, monitor.taskCpuSupplied["1"]?.get(10)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(6000.0, monitor.taskCpuDemands[0]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(5000.0, monitor.taskCpuDemands[0]?.get(10)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied[0]?.get(1)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied[0]?.get(10)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(5000.0, monitor.taskCpuDemands[1]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(6000.0, monitor.taskCpuDemands[1]?.get(10)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied[1]?.get(1)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied[1]?.get(10)) { "The cpu used by task 0 is incorrect" } },
{ assertEquals(11000.0, monitor.hostCpuDemands["H01"]?.get(1)) { "The cpu demanded by the host is incorrect" } },
{ assertEquals(11000.0, monitor.hostCpuDemands["H01"]?.get(10)) { "The cpu demanded by the host is incorrect" } },
{ assertEquals(4000.0, monitor.hostCpuSupplied["H01"]?.get(1)) { "The cpu used by the host is incorrect" } },
@@ -311,21 +320,23 @@ class FlowDistributorTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
submissionTime = "2024-02-01T10:00",
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
- TraceFragment(10 * 60 * 1000, 2000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0),
+ TraceFragment(10 * 60 * 1000, 2000.0),
),
+ cpuCount = 1,
),
createTestTask(
- name = "1",
+ id = 1,
submissionTime = "2024-02-01T10:05",
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 2000.0, 1),
+ TraceFragment(10 * 60 * 1000, 2000.0),
),
+ cpuCount = 1,
),
)
val topology = createTopology("single_2_2000.json")
@@ -333,18 +344,18 @@ class FlowDistributorTest {
val monitor = runTest(topology, workload)
assertAll(
- { assertEquals(1000.0, monitor.taskCpuDemands["0"]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskCpuDemands["0"]?.get(5)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(2000.0, monitor.taskCpuDemands["0"]?.get(9)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(2000.0, monitor.taskCpuDemands["0"]?.get(14)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskCpuSupplied["0"]?.get(1)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskCpuSupplied["0"]?.get(5)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(2000.0, monitor.taskCpuSupplied["0"]?.get(9)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(2000.0, monitor.taskCpuSupplied["0"]?.get(14)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(2000.0, monitor.taskCpuDemands["1"]?.get(1)) { "The cpu demanded by task 1 is incorrect" } },
- { assertEquals(2000.0, monitor.taskCpuDemands["1"]?.get(6)) { "The cpu demanded by task 1 is incorrect" } },
- { assertEquals(2000.0, monitor.taskCpuSupplied["1"]?.get(1)) { "The cpu used by task 1 is incorrect" } },
- { assertEquals(2000.0, monitor.taskCpuSupplied["1"]?.get(6)) { "The cpu used by task 1 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuDemands[0]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuDemands[0]?.get(5)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuDemands[0]?.get(9)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuDemands[0]?.get(14)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuSupplied[0]?.get(1)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuSupplied[0]?.get(5)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied[0]?.get(9)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied[0]?.get(14)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuDemands[1]?.get(1)) { "The cpu demanded by task 1 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuDemands[1]?.get(6)) { "The cpu demanded by task 1 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied[1]?.get(1)) { "The cpu used by task 1 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied[1]?.get(6)) { "The cpu used by task 1 is incorrect" } },
{ assertEquals(1000.0, monitor.hostCpuDemands["H01"]?.get(1)) { "The cpu demanded by the host is incorrect" } },
{ assertEquals(3000.0, monitor.hostCpuDemands["H01"]?.get(5)) { "The cpu demanded by the host is incorrect" } },
{ assertEquals(4000.0, monitor.hostCpuDemands["H01"]?.get(9)) { "The cpu demanded by the host is incorrect" } },
@@ -369,20 +380,22 @@ class FlowDistributorTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
submissionTime = "2024-02-01T10:00",
fragments =
arrayListOf(
- TraceFragment(20 * 60 * 1000, 3000.0, 1),
+ TraceFragment(20 * 60 * 1000, 3000.0),
),
+ cpuCount = 1,
),
createTestTask(
- name = "1",
+ id = 1,
submissionTime = "2024-02-01T10:05",
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1500.0, 1),
+ TraceFragment(10 * 60 * 1000, 1500.0),
),
+ cpuCount = 1,
),
)
val topology = createTopology("single_2_2000.json")
@@ -390,18 +403,18 @@ class FlowDistributorTest {
val monitor = runTest(topology, workload)
assertAll(
- { assertEquals(3000.0, monitor.taskCpuDemands["0"]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(3000.0, monitor.taskCpuDemands["0"]?.get(5)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(3000.0, monitor.taskCpuDemands["0"]?.get(9)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(3000.0, monitor.taskCpuDemands["0"]?.get(14)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(3000.0, monitor.taskCpuSupplied["0"]?.get(1)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(2500.0, monitor.taskCpuSupplied["0"]?.get(5)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(2500.0, monitor.taskCpuSupplied["0"]?.get(9)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(3000.0, monitor.taskCpuSupplied["0"]?.get(14)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(1500.0, monitor.taskCpuDemands["1"]?.get(1)) { "The cpu demanded by task 1 is incorrect" } },
- { assertEquals(1500.0, monitor.taskCpuDemands["1"]?.get(6)) { "The cpu demanded by task 1 is incorrect" } },
- { assertEquals(1500.0, monitor.taskCpuSupplied["1"]?.get(1)) { "The cpu used by task 1 is incorrect" } },
- { assertEquals(1500.0, monitor.taskCpuSupplied["1"]?.get(6)) { "The cpu used by task 1 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuDemands[0]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuDemands[0]?.get(5)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuDemands[0]?.get(9)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuDemands[0]?.get(14)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuSupplied[0]?.get(1)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(2500.0, monitor.taskCpuSupplied[0]?.get(5)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(2500.0, monitor.taskCpuSupplied[0]?.get(9)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuSupplied[0]?.get(14)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(1500.0, monitor.taskCpuDemands[1]?.get(1)) { "The cpu demanded by task 1 is incorrect" } },
+ { assertEquals(1500.0, monitor.taskCpuDemands[1]?.get(6)) { "The cpu demanded by task 1 is incorrect" } },
+ { assertEquals(1500.0, monitor.taskCpuSupplied[1]?.get(1)) { "The cpu used by task 1 is incorrect" } },
+ { assertEquals(1500.0, monitor.taskCpuSupplied[1]?.get(6)) { "The cpu used by task 1 is incorrect" } },
{ assertEquals(3000.0, monitor.hostCpuDemands["H01"]?.get(1)) { "The cpu demanded by the host is incorrect" } },
{ assertEquals(4500.0, monitor.hostCpuDemands["H01"]?.get(5)) { "The cpu demanded by the host is incorrect" } },
{ assertEquals(3000.0, monitor.hostCpuDemands["H01"]?.get(14)) { "The cpu demanded by the host is incorrect" } },
@@ -423,21 +436,23 @@ class FlowDistributorTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(5 * 60 * 1000, 1000.0, 1),
- TraceFragment(5 * 60 * 1000, 1500.0, 1),
- TraceFragment(5 * 60 * 1000, 2500.0, 1),
- TraceFragment(5 * 60 * 1000, 1000.0, 1),
+ TraceFragment(5 * 60 * 1000, 1000.0),
+ TraceFragment(5 * 60 * 1000, 1500.0),
+ TraceFragment(5 * 60 * 1000, 2500.0),
+ TraceFragment(5 * 60 * 1000, 1000.0),
),
+ cpuCount = 1,
),
createTestTask(
- name = "1",
+ id = 1,
fragments =
arrayListOf(
- TraceFragment(20 * 60 * 1000, 3000.0, 1),
+ TraceFragment(20 * 60 * 1000, 3000.0),
),
+ cpuCount = 1,
),
)
val topology = createTopology("single_2_2000.json")
@@ -445,22 +460,22 @@ class FlowDistributorTest {
val monitor = runTest(topology, workload)
assertAll(
- { assertEquals(1000.0, monitor.taskCpuDemands["0"]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(1500.0, monitor.taskCpuDemands["0"]?.get(5)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(2500.0, monitor.taskCpuDemands["0"]?.get(9)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskCpuDemands["0"]?.get(14)) { "The cpu demanded is incorrect" } },
- { assertEquals(1000.0, monitor.taskCpuSupplied["0"]?.get(1)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(1500.0, monitor.taskCpuSupplied["0"]?.get(5)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(2000.0, monitor.taskCpuSupplied["0"]?.get(9)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskCpuSupplied["0"]?.get(14)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(3000.0, monitor.taskCpuDemands["1"]?.get(1)) { "The cpu demanded by task 1 is incorrect" } },
- { assertEquals(3000.0, monitor.taskCpuDemands["1"]?.get(5)) { "The cpu demanded by task 1 is incorrect" } },
- { assertEquals(3000.0, monitor.taskCpuDemands["1"]?.get(9)) { "The cpu demanded by task 1 is incorrect" } },
- { assertEquals(3000.0, monitor.taskCpuDemands["1"]?.get(14)) { "The cpu demanded by task 1 is incorrect" } },
- { assertEquals(3000.0, monitor.taskCpuSupplied["1"]?.get(1)) { "The cpu used by task 1 is incorrect" } },
- { assertEquals(2500.0, monitor.taskCpuSupplied["1"]?.get(5)) { "The cpu used by task 1 is incorrect" } },
- { assertEquals(2000.0, monitor.taskCpuSupplied["1"]?.get(9)) { "The cpu used by task 1 is incorrect" } },
- { assertEquals(3000.0, monitor.taskCpuSupplied["1"]?.get(14)) { "The cpu used by task 1 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuDemands[0]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1500.0, monitor.taskCpuDemands[0]?.get(5)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(2500.0, monitor.taskCpuDemands[0]?.get(9)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuDemands[0]?.get(14)) { "The cpu demanded is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuSupplied[0]?.get(1)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(1500.0, monitor.taskCpuSupplied[0]?.get(5)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied[0]?.get(9)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuSupplied[0]?.get(14)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuDemands[1]?.get(1)) { "The cpu demanded by task 1 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuDemands[1]?.get(5)) { "The cpu demanded by task 1 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuDemands[1]?.get(9)) { "The cpu demanded by task 1 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuDemands[1]?.get(14)) { "The cpu demanded by task 1 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuSupplied[1]?.get(1)) { "The cpu used by task 1 is incorrect" } },
+ { assertEquals(2500.0, monitor.taskCpuSupplied[1]?.get(5)) { "The cpu used by task 1 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied[1]?.get(9)) { "The cpu used by task 1 is incorrect" } },
+ { assertEquals(3000.0, monitor.taskCpuSupplied[1]?.get(14)) { "The cpu used by task 1 is incorrect" } },
{ assertEquals(4000.0, monitor.hostCpuDemands["H01"]?.get(1)) { "The cpu demanded by the host is incorrect" } },
{ assertEquals(4500.0, monitor.hostCpuDemands["H01"]?.get(5)) { "The cpu demanded by the host is incorrect" } },
{ assertEquals(5500.0, monitor.hostCpuDemands["H01"]?.get(9)) { "The cpu demanded by the host is incorrect" } },
@@ -484,11 +499,12 @@ class FlowDistributorTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf<TraceFragment>().apply {
- repeat(1) { this.add(TraceFragment(10 * 60 * 1000, 3000.0, 1)) }
+ repeat(1) { this.add(TraceFragment(10 * 60 * 1000, 3000.0)) }
},
+ cpuCount = 1,
),
)
val topology = createTopology("single_5000_2000.json")
@@ -512,11 +528,12 @@ class FlowDistributorTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf<TraceFragment>().apply {
- repeat(1000) { this.add(TraceFragment(10 * 60 * 1000, 2000.0, 1)) }
+ repeat(1000) { this.add(TraceFragment(10 * 60 * 1000, 2000.0)) }
},
+ cpuCount = 1,
),
)
val topology = createTopology("single_1_2000.json")
@@ -542,9 +559,10 @@ class FlowDistributorTest {
repeat(1000) {
this.add(
createTestTask(
- name = "0",
+ id = 0,
fragments =
- arrayListOf(TraceFragment(10 * 60 * 1000, 2000.0, 1)),
+ arrayListOf(TraceFragment(10 * 60 * 1000, 2000.0)),
+ cpuCount = 1,
),
)
}
@@ -568,12 +586,13 @@ class FlowDistributorTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 1000.0, 1),
- TraceFragment(10 * 60 * 1000, 0.0, 0, 2000.0, 1),
+ TraceFragment(10 * 60 * 1000, 0.0, 1000.0),
+ TraceFragment(10 * 60 * 1000, 0.0, 2000.0),
),
+ cpuCount = 1,
),
)
@@ -584,10 +603,10 @@ class FlowDistributorTest {
assertAll(
// CPU
// task
- { assertEquals(0.0, monitor.taskCpuDemands["0"]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(0.0, monitor.taskCpuDemands["0"]?.get(10)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(0.0, monitor.taskCpuSupplied["0"]?.get(1)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(0.0, monitor.taskCpuSupplied["0"]?.get(10)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(0.0, monitor.taskCpuDemands[0]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(0.0, monitor.taskCpuDemands[0]?.get(10)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(0.0, monitor.taskCpuSupplied[0]?.get(1)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(0.0, monitor.taskCpuSupplied[0]?.get(10)) { "The cpu used by task 0 is incorrect" } },
// host
{ assertEquals(0.0, monitor.hostCpuDemands["H01"]?.get(1)) { "The cpu demanded by the host is incorrect" } },
{ assertEquals(0.0, monitor.hostCpuDemands["H01"]?.get(10)) { "The cpu demanded by the host is incorrect" } },
@@ -595,10 +614,10 @@ class FlowDistributorTest {
{ assertEquals(0.0, monitor.hostCpuSupplied["H01"]?.get(10)) { "The cpu used by the host is incorrect" } },
// GPU
// task
- { assertEquals(1000.0, monitor.taskGpuDemands["0"]?.get(1)) { "The gpu demanded by task 0 is incorrect" } },
- { assertEquals(2000.0, monitor.taskGpuDemands["0"]?.get(10)) { "The gpu demanded by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskGpuSupplied["0"]?.get(1)) { "The gpu used by task 0 is incorrect" } },
- { assertEquals(2000.0, monitor.taskGpuSupplied["0"]?.get(10)) { "The gpu used by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskGpuDemands[0]?.get(1)) { "The gpu demanded by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskGpuDemands[0]?.get(10)) { "The gpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskGpuSupplied[0]?.get(1)) { "The gpu used by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskGpuSupplied[0]?.get(10)) { "The gpu used by task 0 is incorrect" } },
// host
{ assertEquals(1000.0, monitor.hostGpuDemands["H01"]?.get(1)?.get(0)) { "The gpu demanded by the host is incorrect" } },
{ assertEquals(2000.0, monitor.hostGpuDemands["H01"]?.get(10)?.get(0)) { "The gpu demanded by the host is incorrect" } },
@@ -617,11 +636,12 @@ class FlowDistributorTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0, 1000.0),
),
+ cpuCount = 1,
),
)
@@ -632,10 +652,10 @@ class FlowDistributorTest {
assertAll(
// CPU
// task
- { assertEquals(1000.0, monitor.taskCpuDemands["0"]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(0.0, monitor.taskCpuDemands["0"]?.get(9)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskCpuSupplied["0"]?.get(0)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(0.0, monitor.taskCpuSupplied["0"]?.get(9)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuDemands[0]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(0.0, monitor.taskCpuDemands[0]?.get(9)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuSupplied[0]?.get(0)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(0.0, monitor.taskCpuSupplied[0]?.get(9)) { "The cpu used by task 0 is incorrect" } },
// host
{ assertEquals(1000.0, monitor.hostCpuDemands["H01"]?.get(1)) { "The cpu demanded by the host is incorrect" } },
{ assertEquals(0.0, monitor.hostCpuDemands["H01"]?.get(10)) { "The cpu demanded by the host is incorrect" } },
@@ -643,10 +663,10 @@ class FlowDistributorTest {
{ assertEquals(0.0, monitor.hostCpuSupplied["H01"]?.get(10)) { "The cpu used by the host is incorrect" } },
// GPU
// task
- { assertEquals(1000.0, monitor.taskGpuDemands["0"]?.get(0)) { "The gpu demanded by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskGpuDemands["0"]?.get(9)) { "The gpu demanded by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskGpuSupplied["0"]?.get(0)) { "The gpu used by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskGpuSupplied["0"]?.get(9)) { "The gpu used by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskGpuDemands[0]?.get(0)) { "The gpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskGpuDemands[0]?.get(9)) { "The gpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskGpuSupplied[0]?.get(0)) { "The gpu used by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskGpuSupplied[0]?.get(9)) { "The gpu used by task 0 is incorrect" } },
// host
{ assertEquals(1000.0, monitor.hostGpuDemands["H01"]?.get(1)?.get(0)) { "The gpu demanded by the host is incorrect" } },
{ assertEquals(1000.0, monitor.hostGpuSupplied["H01"]?.get(1)?.get(0)) { "The gpu used by the host is incorrect" } },
@@ -665,11 +685,12 @@ class FlowDistributorTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1, 2000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0, 2000.0),
),
+ cpuCount = 1,
),
)
@@ -680,10 +701,10 @@ class FlowDistributorTest {
assertAll(
// CPU
// task
- { assertEquals(1000.0, monitor.taskCpuDemands["0"]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(0.0, monitor.taskCpuDemands["0"]?.get(9)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskCpuSupplied["0"]?.get(0)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(0.0, monitor.taskCpuSupplied["0"]?.get(9)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuDemands[0]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(0.0, monitor.taskCpuDemands[0]?.get(9)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuSupplied[0]?.get(0)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(0.0, monitor.taskCpuSupplied[0]?.get(9)) { "The cpu used by task 0 is incorrect" } },
// host
{ assertEquals(1000.0, monitor.hostCpuDemands["H01"]?.get(1)) { "The cpu demanded by the host is incorrect" } },
{ assertEquals(0.0, monitor.hostCpuDemands["H01"]?.get(10)) { "The cpu demanded by the host is incorrect" } },
@@ -691,10 +712,10 @@ class FlowDistributorTest {
{ assertEquals(0.0, monitor.hostCpuSupplied["H01"]?.get(10)) { "The cpu used by the host is incorrect" } },
// GPU
// task
- { assertEquals(2000.0, monitor.taskGpuDemands["0"]?.get(0)) { "The gpu demanded by task 0 is incorrect" } },
- { assertEquals(2000.0, monitor.taskGpuDemands["0"]?.get(9)) { "The gpu demanded by task 0 is incorrect" } },
- { assertEquals(2000.0, monitor.taskGpuSupplied["0"]?.get(0)) { "The gpu used by task 0 is incorrect" } },
- { assertEquals(2000.0, monitor.taskGpuSupplied["0"]?.get(9)) { "The gpu used by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskGpuDemands[0]?.get(0)) { "The gpu demanded by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskGpuDemands[0]?.get(9)) { "The gpu demanded by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskGpuSupplied[0]?.get(0)) { "The gpu used by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskGpuSupplied[0]?.get(9)) { "The gpu used by task 0 is incorrect" } },
// host
{ assertEquals(2000.0, monitor.hostGpuDemands["H01"]?.get(1)?.get(0)) { "The gpu demanded by the host is incorrect" } },
{ assertEquals(0.0, monitor.hostGpuDemands["H01"]?.get(10)?.get(0)) { "The gpu demanded by the host is incorrect" } },
@@ -713,11 +734,12 @@ class FlowDistributorTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 2000.0, 1, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 2000.0, 1000.0),
),
+ cpuCount = 1,
),
)
val topology = createTopology("Gpus/single_gpu_no_vendor_no_memory.json")
@@ -726,10 +748,10 @@ class FlowDistributorTest {
assertAll(
// CPU
// task
- { assertEquals(2000.0, monitor.taskCpuDemands["0"]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(0.0, monitor.taskCpuDemands["0"]?.get(9)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(2000.0, monitor.taskCpuSupplied["0"]?.get(0)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(0.0, monitor.taskCpuSupplied["0"]?.get(9)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuDemands[0]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(0.0, monitor.taskCpuDemands[0]?.get(9)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskCpuSupplied[0]?.get(0)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(0.0, monitor.taskCpuSupplied[0]?.get(9)) { "The cpu used by task 0 is incorrect" } },
// host
{ assertEquals(2000.0, monitor.hostCpuDemands["H01"]?.get(1)) { "The cpu demanded by the host is incorrect" } },
{ assertEquals(0.0, monitor.hostCpuDemands["H01"]?.get(10)) { "The cpu demanded by the host is incorrect" } },
@@ -737,10 +759,10 @@ class FlowDistributorTest {
{ assertEquals(0.0, monitor.hostCpuSupplied["H01"]?.get(10)) { "The cpu used by the host is incorrect" } },
// GPU
// task
- { assertEquals(1000.0, monitor.taskGpuDemands["0"]?.get(1)) { "The gpu demanded by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskGpuDemands["0"]?.get(9)) { "The gpu demanded by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskGpuSupplied["0"]?.get(1)) { "The gpu used by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskGpuSupplied["0"]?.get(9)) { "The gpu used by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskGpuDemands[0]?.get(1)) { "The gpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskGpuDemands[0]?.get(9)) { "The gpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskGpuSupplied[0]?.get(1)) { "The gpu used by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskGpuSupplied[0]?.get(9)) { "The gpu used by task 0 is incorrect" } },
// host
{ assertEquals(1000.0, monitor.hostGpuDemands["H01"]?.get(1)?.get(0)) { "The gpu demanded by the host is incorrect" } },
{ assertEquals(0.0, monitor.hostGpuDemands["H01"]?.get(10)?.get(0)) { "The gpu demanded by the host is incorrect" } },
@@ -761,18 +783,20 @@ class FlowDistributorTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0, 1000.0),
),
+ cpuCount = 1,
),
createTestTask(
- name = "1",
+ id = 1,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0, 1000.0),
),
+ cpuCount = 1,
),
)
@@ -781,17 +805,17 @@ class FlowDistributorTest {
assertAll(
// CPU
// task 0
- { assertEquals(1000.0, monitor.taskCpuDemands["0"]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(0.0, monitor.taskCpuDemands["0"]?.get(9)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskCpuSupplied["0"]?.get(0)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(0.0, monitor.taskCpuSupplied["0"]?.get(9)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuDemands[0]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(0.0, monitor.taskCpuDemands[0]?.get(9)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuSupplied[0]?.get(0)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(0.0, monitor.taskCpuSupplied[0]?.get(9)) { "The cpu used by task 0 is incorrect" } },
// task 1
- { assertEquals(0.0, monitor.taskCpuDemands["1"]?.get(1)) { "The cpu demanded by task 1 is incorrect" } },
- { assertEquals(1000.0, monitor.taskCpuDemands["1"]?.get(10)) { "The cpu demanded by task 1 is incorrect" } },
- { assertEquals(0.0, monitor.taskCpuDemands["1"]?.get(19)) { "The cpu demanded by task 1 is incorrect" } },
- { assertEquals(0.0, monitor.taskCpuSupplied["1"]?.get(1)) { "The cpu used by task 1 is incorrect" } },
- { assertEquals(1000.0, monitor.taskCpuSupplied["1"]?.get(10)) { "The cpu used by task 1 is incorrect" } },
- { assertEquals(0.0, monitor.taskCpuSupplied["1"]?.get(19)) { "The cpu used by task 1 is incorrect" } },
+ { assertEquals(0.0, monitor.taskCpuDemands[1]?.get(1)) { "The cpu demanded by task 1 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuDemands[1]?.get(10)) { "The cpu demanded by task 1 is incorrect" } },
+ { assertEquals(0.0, monitor.taskCpuDemands[1]?.get(19)) { "The cpu demanded by task 1 is incorrect" } },
+ { assertEquals(0.0, monitor.taskCpuSupplied[1]?.get(1)) { "The cpu used by task 1 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuSupplied[1]?.get(10)) { "The cpu used by task 1 is incorrect" } },
+ { assertEquals(0.0, monitor.taskCpuSupplied[1]?.get(19)) { "The cpu used by task 1 is incorrect" } },
// host
{ assertEquals(1000.0, monitor.hostCpuDemands["H01"]?.get(1)) { "The cpu demanded by the host is incorrect" } },
{ assertEquals(1000.0, monitor.hostCpuDemands["H01"]?.get(10)) { "The cpu demanded by the host is incorrect" } },
@@ -799,17 +823,17 @@ class FlowDistributorTest {
{ assertEquals(1000.0, monitor.hostCpuSupplied["H01"]?.get(10)) { "The cpu used by the host is incorrect" } },
// GPU
// task 0
- { assertEquals(1000.0, monitor.taskGpuDemands["0"]?.get(0)) { "The gpu demanded by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskGpuDemands["0"]?.get(9)) { "The gpu demanded by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskGpuSupplied["0"]?.get(0)) { "The gpu used by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskGpuSupplied["0"]?.get(9)) { "The gpu used by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskGpuDemands[0]?.get(0)) { "The gpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskGpuDemands[0]?.get(9)) { "The gpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskGpuSupplied[0]?.get(0)) { "The gpu used by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskGpuSupplied[0]?.get(9)) { "The gpu used by task 0 is incorrect" } },
// task 1
- { assertEquals(0.0, monitor.taskGpuDemands["1"]?.get(0)) { "The gpu demanded by task 1 is incorrect" } },
- { assertEquals(1000.0, monitor.taskGpuDemands["1"]?.get(10)) { "The gpu demanded by task 1 is incorrect" } },
- { assertEquals(1000.0, monitor.taskGpuDemands["1"]?.get(19)) { "The gpu used by task 1 is incorrect" } },
- { assertEquals(0.0, monitor.taskGpuSupplied["1"]?.get(0)) { "The gpu used by task 1 is incorrect" } },
- { assertEquals(1000.0, monitor.taskGpuSupplied["1"]?.get(10)) { "The gpu used by task 1 is incorrect" } },
- { assertEquals(1000.0, monitor.taskGpuSupplied["1"]?.get(19)) { "The gpu used by task 1 is incorrect" } },
+ { assertEquals(0.0, monitor.taskGpuDemands[1]?.get(0)) { "The gpu demanded by task 1 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskGpuDemands[1]?.get(10)) { "The gpu demanded by task 1 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskGpuDemands[1]?.get(19)) { "The gpu used by task 1 is incorrect" } },
+ { assertEquals(0.0, monitor.taskGpuSupplied[1]?.get(0)) { "The gpu used by task 1 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskGpuSupplied[1]?.get(10)) { "The gpu used by task 1 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskGpuSupplied[1]?.get(19)) { "The gpu used by task 1 is incorrect" } },
// host
{ assertEquals(1000.0, monitor.hostGpuDemands["H01"]?.get(1)?.get(0)) { "The gpu demanded by the host is incorrect" } },
{ assertEquals(1000.0, monitor.hostGpuDemands["H01"]?.get(10)?.get(0)) { "The gpu demanded by the host is incorrect" } },
@@ -828,18 +852,21 @@ class FlowDistributorTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1, 0.0, 0),
+ TraceFragment(10 * 60 * 1000, 1000.0, 0.0),
),
+ cpuCount = 1,
),
createTestTask(
- name = "1",
+ id = 1,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 0.0, 1000.0),
),
+ cpuCount = 0,
+ gpuCount = 1,
),
)
@@ -849,15 +876,15 @@ class FlowDistributorTest {
assertAll(
// CPU
// task 0
- { assertEquals(1000.0, monitor.taskCpuDemands["0"]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(0.0, monitor.taskCpuDemands["0"]?.get(9)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskCpuSupplied["0"]?.get(0)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(0.0, monitor.taskCpuSupplied["0"]?.get(9)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuDemands[0]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(0.0, monitor.taskCpuDemands[0]?.get(9)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuSupplied[0]?.get(0)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(0.0, monitor.taskCpuSupplied[0]?.get(9)) { "The cpu used by task 0 is incorrect" } },
// task 1
- { assertEquals(0.0, monitor.taskCpuDemands["1"]?.get(0)) { "The cpu demanded by task 1 is incorrect" } },
- { assertEquals(0.0, monitor.taskCpuDemands["1"]?.get(9)) { "The cpu demanded by task 1 is incorrect" } },
- { assertEquals(0.0, monitor.taskCpuSupplied["1"]?.get(0)) { "The cpu used by task 1 is incorrect" } },
- { assertEquals(0.0, monitor.taskCpuSupplied["1"]?.get(9)) { "The cpu used by task 1 is incorrect" } },
+ { assertEquals(0.0, monitor.taskCpuDemands[1]?.get(0)) { "The cpu demanded by task 1 is incorrect" } },
+ { assertEquals(0.0, monitor.taskCpuDemands[1]?.get(9)) { "The cpu demanded by task 1 is incorrect" } },
+ { assertEquals(0.0, monitor.taskCpuSupplied[1]?.get(0)) { "The cpu used by task 1 is incorrect" } },
+ { assertEquals(0.0, monitor.taskCpuSupplied[1]?.get(9)) { "The cpu used by task 1 is incorrect" } },
// host
{ assertEquals(1000.0, monitor.hostCpuDemands["H01"]?.get(1)) { "The cpu demanded by the host is incorrect" } },
{ assertEquals(0.0, monitor.hostCpuDemands["H01"]?.get(10)) { "The cpu demanded by the host is incorrect" } },
@@ -865,15 +892,15 @@ class FlowDistributorTest {
{ assertEquals(0.0, monitor.hostCpuSupplied["H01"]?.get(10)) { "The cpu used by the host is incorrect" } },
// GPU
// task 0
- { assertEquals(0.0, monitor.taskGpuDemands["0"]?.get(0)) { "The gpu demanded by task 0 is incorrect" } },
- { assertEquals(0.0, monitor.taskGpuDemands["0"]?.get(9)) { "The gpu demanded by task 0 is incorrect" } },
- { assertEquals(0.0, monitor.taskGpuSupplied["0"]?.get(0)) { "The gpu used by task 0 is incorrect" } },
- { assertEquals(0.0, monitor.taskGpuSupplied["0"]?.get(9)) { "The gpu used by task 0 is incorrect" } },
+ { assertEquals(0.0, monitor.taskGpuDemands[0]?.get(0)) { "The gpu demanded by task 0 is incorrect" } },
+ { assertEquals(0.0, monitor.taskGpuDemands[0]?.get(9)) { "The gpu demanded by task 0 is incorrect" } },
+ { assertEquals(0.0, monitor.taskGpuSupplied[0]?.get(0)) { "The gpu used by task 0 is incorrect" } },
+ { assertEquals(0.0, monitor.taskGpuSupplied[0]?.get(9)) { "The gpu used by task 0 is incorrect" } },
// task 1
- { assertEquals(1000.0, monitor.taskGpuDemands["1"]?.get(0)) { "The gpu demanded by task 1 is incorrect" } },
- { assertEquals(1000.0, monitor.taskGpuDemands["1"]?.get(9)) { "The gpu demanded by task 1 is incorrect" } },
- { assertEquals(1000.0, monitor.taskGpuSupplied["1"]?.get(0)) { "The gpu used by task 1 is incorrect" } },
- { assertEquals(1000.0, monitor.taskGpuSupplied["1"]?.get(9)) { "The gpu used by task 1 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskGpuDemands[1]?.get(0)) { "The gpu demanded by task 1 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskGpuDemands[1]?.get(9)) { "The gpu demanded by task 1 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskGpuSupplied[1]?.get(0)) { "The gpu used by task 1 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskGpuSupplied[1]?.get(9)) { "The gpu used by task 1 is incorrect" } },
// host
{ assertEquals(1000.0, monitor.hostGpuDemands["H01"]?.get(1)?.get(0)) { "The gpu demanded by the host is incorrect" } },
{ assertEquals(0.0, monitor.hostGpuDemands["H01"]?.get(10)?.get(0)) { "The gpu demanded by the host is incorrect" } },
diff --git a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/FragmentScalingTest.kt b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/FragmentScalingTest.kt
index b0aa3555..ce26cc3a 100644
--- a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/FragmentScalingTest.kt
+++ b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/FragmentScalingTest.kt
@@ -45,12 +45,13 @@ class FragmentScalingTest {
val workloadNoDelay: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 2000.0, 1),
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 2000.0),
+ TraceFragment(10 * 60 * 1000, 1000.0),
),
+ cpuCount = 1,
scalingPolicy = NoDelayScaling(),
),
)
@@ -58,12 +59,13 @@ class FragmentScalingTest {
val workloadPerfect: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 2000.0, 1),
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 2000.0),
+ TraceFragment(10 * 60 * 1000, 1000.0),
),
+ cpuCount = 1,
scalingPolicy = PerfectScaling(),
),
)
@@ -75,14 +77,14 @@ class FragmentScalingTest {
assertAll(
{ assertEquals(1200000, monitorNoDelay.maxTimestamp) { "The workload took longer to finish than expected." } },
{ assertEquals(1200000, monitorPerfect.maxTimestamp) { "The workload took longer to finish than expected." } },
- { assertEquals(2000.0, monitorNoDelay.taskCpuDemands["0"]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(2000.0, monitorPerfect.taskCpuDemands["0"]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(2000.0, monitorNoDelay.taskCpuSupplied["0"]?.get(0)) { "The cpu supplied to task 0 is incorrect" } },
- { assertEquals(2000.0, monitorPerfect.taskCpuSupplied["0"]?.get(0)) { "The cpu supplied to task 0 is incorrect" } },
- { assertEquals(1000.0, monitorNoDelay.taskCpuDemands["0"]?.get(9)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(1000.0, monitorPerfect.taskCpuDemands["0"]?.get(9)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(1000.0, monitorNoDelay.taskCpuSupplied["0"]?.get(9)) { "The cpu supplied to task 0 is incorrect" } },
- { assertEquals(1000.0, monitorPerfect.taskCpuSupplied["0"]?.get(9)) { "The cpu supplied to task 0 is incorrect" } },
+ { assertEquals(2000.0, monitorNoDelay.taskCpuDemands[0]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitorPerfect.taskCpuDemands[0]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitorNoDelay.taskCpuSupplied[0]?.get(0)) { "The cpu supplied to task 0 is incorrect" } },
+ { assertEquals(2000.0, monitorPerfect.taskCpuSupplied[0]?.get(0)) { "The cpu supplied to task 0 is incorrect" } },
+ { assertEquals(1000.0, monitorNoDelay.taskCpuDemands[0]?.get(9)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitorPerfect.taskCpuDemands[0]?.get(9)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitorNoDelay.taskCpuSupplied[0]?.get(9)) { "The cpu supplied to task 0 is incorrect" } },
+ { assertEquals(1000.0, monitorPerfect.taskCpuSupplied[0]?.get(9)) { "The cpu supplied to task 0 is incorrect" } },
)
}
@@ -99,11 +101,12 @@ class FragmentScalingTest {
val workloadNoDelay: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 4000.0, 1),
+ TraceFragment(10 * 60 * 1000, 4000.0),
),
+ cpuCount = 1,
scalingPolicy = NoDelayScaling(),
),
)
@@ -111,11 +114,12 @@ class FragmentScalingTest {
val workloadPerfect: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 4000.0, 1),
+ TraceFragment(10 * 60 * 1000, 4000.0),
),
+ cpuCount = 1,
scalingPolicy = PerfectScaling(),
),
)
@@ -127,10 +131,10 @@ class FragmentScalingTest {
assertAll(
{ assertEquals(600000, monitorNoDelay.maxTimestamp) { "The workload took longer to finish than expected." } },
{ assertEquals(1200000, monitorPerfect.maxTimestamp) { "The workload took longer to finish than expected." } },
- { assertEquals(4000.0, monitorNoDelay.taskCpuDemands["0"]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(4000.0, monitorPerfect.taskCpuDemands["0"]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(2000.0, monitorNoDelay.taskCpuSupplied["0"]?.get(0)) { "The cpu supplied to task 0 is incorrect" } },
- { assertEquals(2000.0, monitorPerfect.taskCpuSupplied["0"]?.get(0)) { "The cpu supplied to task 0 is incorrect" } },
+ { assertEquals(4000.0, monitorNoDelay.taskCpuDemands[0]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(4000.0, monitorPerfect.taskCpuDemands[0]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitorNoDelay.taskCpuSupplied[0]?.get(0)) { "The cpu supplied to task 0 is incorrect" } },
+ { assertEquals(2000.0, monitorPerfect.taskCpuSupplied[0]?.get(0)) { "The cpu supplied to task 0 is incorrect" } },
)
}
@@ -148,13 +152,14 @@ class FragmentScalingTest {
val workloadNoDelay: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
- TraceFragment(10 * 60 * 1000, 4000.0, 1),
- TraceFragment(10 * 60 * 1000, 1500.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0),
+ TraceFragment(10 * 60 * 1000, 4000.0),
+ TraceFragment(10 * 60 * 1000, 1500.0),
),
+ cpuCount = 1,
scalingPolicy = NoDelayScaling(),
),
)
@@ -162,13 +167,14 @@ class FragmentScalingTest {
val workloadPerfect: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
- TraceFragment(10 * 60 * 1000, 4000.0, 1),
- TraceFragment(10 * 60 * 1000, 1500.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0),
+ TraceFragment(10 * 60 * 1000, 4000.0),
+ TraceFragment(10 * 60 * 1000, 1500.0),
),
+ cpuCount = 1,
scalingPolicy = PerfectScaling(),
),
)
@@ -180,20 +186,20 @@ class FragmentScalingTest {
assertAll(
{ assertEquals(1800000, monitorNoDelay.maxTimestamp) { "The workload took longer to finish than expected." } },
{ assertEquals(2400000, monitorPerfect.maxTimestamp) { "The workload took longer to finish than expected." } },
- { assertEquals(1000.0, monitorNoDelay.taskCpuDemands["0"]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(1000.0, monitorPerfect.taskCpuDemands["0"]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(1000.0, monitorNoDelay.taskCpuSupplied["0"]?.get(0)) { "The cpu supplied to task 0 is incorrect" } },
- { assertEquals(1000.0, monitorPerfect.taskCpuSupplied["0"]?.get(0)) { "The cpu supplied to task 0 is incorrect" } },
- { assertEquals(4000.0, monitorNoDelay.taskCpuDemands["0"]?.get(9)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(4000.0, monitorPerfect.taskCpuDemands["0"]?.get(9)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(2000.0, monitorNoDelay.taskCpuSupplied["0"]?.get(9)) { "The cpu supplied to task 0 is incorrect" } },
- { assertEquals(2000.0, monitorPerfect.taskCpuSupplied["0"]?.get(9)) { "The cpu supplied to task 0 is incorrect" } },
- { assertEquals(1500.0, monitorNoDelay.taskCpuDemands["0"]?.get(19)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(4000.0, monitorPerfect.taskCpuDemands["0"]?.get(19)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(1500.0, monitorNoDelay.taskCpuSupplied["0"]?.get(19)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(2000.0, monitorPerfect.taskCpuSupplied["0"]?.get(19)) { "The cpu supplied to task 0 is incorrect" } },
- { assertEquals(1500.0, monitorPerfect.taskCpuDemands["0"]?.get(29)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(1500.0, monitorPerfect.taskCpuSupplied["0"]?.get(29)) { "The cpu supplied to task 0 is incorrect" } },
+ { assertEquals(1000.0, monitorNoDelay.taskCpuDemands[0]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitorPerfect.taskCpuDemands[0]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitorNoDelay.taskCpuSupplied[0]?.get(0)) { "The cpu supplied to task 0 is incorrect" } },
+ { assertEquals(1000.0, monitorPerfect.taskCpuSupplied[0]?.get(0)) { "The cpu supplied to task 0 is incorrect" } },
+ { assertEquals(4000.0, monitorNoDelay.taskCpuDemands[0]?.get(9)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(4000.0, monitorPerfect.taskCpuDemands[0]?.get(9)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitorNoDelay.taskCpuSupplied[0]?.get(9)) { "The cpu supplied to task 0 is incorrect" } },
+ { assertEquals(2000.0, monitorPerfect.taskCpuSupplied[0]?.get(9)) { "The cpu supplied to task 0 is incorrect" } },
+ { assertEquals(1500.0, monitorNoDelay.taskCpuDemands[0]?.get(19)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(4000.0, monitorPerfect.taskCpuDemands[0]?.get(19)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1500.0, monitorNoDelay.taskCpuSupplied[0]?.get(19)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitorPerfect.taskCpuSupplied[0]?.get(19)) { "The cpu supplied to task 0 is incorrect" } },
+ { assertEquals(1500.0, monitorPerfect.taskCpuDemands[0]?.get(29)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1500.0, monitorPerfect.taskCpuSupplied[0]?.get(29)) { "The cpu supplied to task 0 is incorrect" } },
)
}
@@ -208,19 +214,21 @@ class FragmentScalingTest {
val workloadNoDelay: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0),
),
+ cpuCount = 1,
scalingPolicy = NoDelayScaling(),
),
createTestTask(
- name = "1",
+ id = 1,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 3000.0, 1),
+ TraceFragment(10 * 60 * 1000, 3000.0),
),
+ cpuCount = 1,
scalingPolicy = NoDelayScaling(),
),
)
@@ -228,19 +236,21 @@ class FragmentScalingTest {
val workloadPerfect: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0),
),
+ cpuCount = 1,
scalingPolicy = PerfectScaling(),
),
createTestTask(
- name = "1",
+ id = 1,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 3000.0, 1),
+ TraceFragment(10 * 60 * 1000, 3000.0),
),
+ cpuCount = 1,
scalingPolicy = PerfectScaling(),
),
)
@@ -252,14 +262,14 @@ class FragmentScalingTest {
assertAll(
{ assertEquals(600000, monitorNoDelay.maxTimestamp) { "The workload took longer to finish than expected." } },
{ assertEquals(600000, monitorPerfect.maxTimestamp) { "The workload took longer to finish than expected." } },
- { assertEquals(1000.0, monitorNoDelay.taskCpuDemands["0"]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(3000.0, monitorNoDelay.taskCpuDemands["1"]?.get(0)) { "The cpu demanded by task 1 is incorrect" } },
- { assertEquals(1000.0, monitorPerfect.taskCpuDemands["0"]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(3000.0, monitorPerfect.taskCpuDemands["1"]?.get(0)) { "The cpu demanded by task 1 is incorrect" } },
- { assertEquals(1000.0, monitorNoDelay.taskCpuSupplied["0"]?.get(0)) { "The cpu supplied to task 0 is incorrect" } },
- { assertEquals(3000.0, monitorNoDelay.taskCpuSupplied["1"]?.get(0)) { "The cpu supplied to task 1 is incorrect" } },
- { assertEquals(1000.0, monitorPerfect.taskCpuSupplied["0"]?.get(0)) { "The cpu supplied to task 0 is incorrect" } },
- { assertEquals(3000.0, monitorPerfect.taskCpuSupplied["1"]?.get(0)) { "The cpu supplied to task 1 is incorrect" } },
+ { assertEquals(1000.0, monitorNoDelay.taskCpuDemands[0]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(3000.0, monitorNoDelay.taskCpuDemands[1]?.get(0)) { "The cpu demanded by task 1 is incorrect" } },
+ { assertEquals(1000.0, monitorPerfect.taskCpuDemands[0]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(3000.0, monitorPerfect.taskCpuDemands[1]?.get(0)) { "The cpu demanded by task 1 is incorrect" } },
+ { assertEquals(1000.0, monitorNoDelay.taskCpuSupplied[0]?.get(0)) { "The cpu supplied to task 0 is incorrect" } },
+ { assertEquals(3000.0, monitorNoDelay.taskCpuSupplied[1]?.get(0)) { "The cpu supplied to task 1 is incorrect" } },
+ { assertEquals(1000.0, monitorPerfect.taskCpuSupplied[0]?.get(0)) { "The cpu supplied to task 0 is incorrect" } },
+ { assertEquals(3000.0, monitorPerfect.taskCpuSupplied[1]?.get(0)) { "The cpu supplied to task 1 is incorrect" } },
)
}
@@ -278,19 +288,21 @@ class FragmentScalingTest {
val workloadNoDelay: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 2000.0, 1),
+ TraceFragment(10 * 60 * 1000, 2000.0),
),
+ cpuCount = 1,
scalingPolicy = NoDelayScaling(),
),
createTestTask(
- name = "1",
+ id = 1,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 4000.0, 1),
+ TraceFragment(10 * 60 * 1000, 4000.0),
),
+ cpuCount = 1,
scalingPolicy = NoDelayScaling(),
),
)
@@ -298,19 +310,21 @@ class FragmentScalingTest {
val workloadPerfect: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 2000.0, 1),
+ TraceFragment(10 * 60 * 1000, 2000.0),
),
+ cpuCount = 1,
scalingPolicy = PerfectScaling(),
),
createTestTask(
- name = "1",
+ id = 1,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 4000.0, 1),
+ TraceFragment(10 * 60 * 1000, 4000.0),
),
+ cpuCount = 1,
scalingPolicy = PerfectScaling(),
),
)
@@ -323,15 +337,15 @@ class FragmentScalingTest {
// { assertEquals(600000, monitorNoDelay.maxTimestamp) { "The workload took longer to finish than expected." } },
// { assertEquals(900000, monitorPerfect.maxTimestamp) { "The workload took longer to finish than expected." } },
//
-// { assertEquals(1000.0, monitorNoDelay.taskCpuDemands["0"]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
-// { assertEquals(3000.0, monitorNoDelay.taskCpuDemands["1"]?.get(0)) { "The cpu demanded by task 1 is incorrect" } },
-// { assertEquals(1000.0, monitorPerfect.taskCpuDemands["0"]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
-// { assertEquals(3000.0, monitorPerfect.taskCpuDemands["1"]?.get(0)) { "The cpu demanded by task 1 is incorrect" } },
+// { assertEquals(1000.0, monitorNoDelay.taskCpuDemands[0]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
+// { assertEquals(3000.0, monitorNoDelay.taskCpuDemands[1]?.get(0)) { "The cpu demanded by task 1 is incorrect" } },
+// { assertEquals(1000.0, monitorPerfect.taskCpuDemands[0]?.get(0)) { "The cpu demanded by task 0 is incorrect" } },
+// { assertEquals(3000.0, monitorPerfect.taskCpuDemands[1]?.get(0)) { "The cpu demanded by task 1 is incorrect" } },
//
-// { assertEquals(1000.0, monitorNoDelay.taskCpuSupplied["0"]?.get(0)) { "The cpu supplied to task 0 is incorrect" } },
-// { assertEquals(3000.0, monitorNoDelay.taskCpuSupplied["1"]?.get(0)) { "The cpu supplied to task 1 is incorrect" } },
-// { assertEquals(1000.0, monitorPerfect.taskCpuSupplied["0"]?.get(0)) { "The cpu supplied to task 0 is incorrect" } },
-// { assertEquals(3000.0, monitorPerfect.taskCpuSupplied["1"]?.get(0)) { "The cpu supplied to task 1 is incorrect" } },
+// { assertEquals(1000.0, monitorNoDelay.taskCpuSupplied[0]?.get(0)) { "The cpu supplied to task 0 is incorrect" } },
+// { assertEquals(3000.0, monitorNoDelay.taskCpuSupplied[1]?.get(0)) { "The cpu supplied to task 1 is incorrect" } },
+// { assertEquals(1000.0, monitorPerfect.taskCpuSupplied[0]?.get(0)) { "The cpu supplied to task 0 is incorrect" } },
+// { assertEquals(3000.0, monitorPerfect.taskCpuSupplied[1]?.get(0)) { "The cpu supplied to task 1 is incorrect" } },
// )
}
}
diff --git a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/GpuTest.kt b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/GpuTest.kt
index 2778e613..35da6944 100644
--- a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/GpuTest.kt
+++ b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/GpuTest.kt
@@ -178,7 +178,6 @@ class GpuTest {
fun testGpuHostCreationMultiWithMemoryWithVendor() {
val topology = createTopology("Gpus/multi_gpu_full.json")
// temporary implementation, to account for GPU concatenation
- val count = 5
assertGpuConfiguration(
topology,
// cuda cores
@@ -203,18 +202,22 @@ class GpuTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1, 2000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0, 2000.0),
),
+ cpuCount = 1,
+ gpuCount = 1,
),
createTestTask(
- name = "1",
+ id = 1,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1, 2000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0, 2000.0),
),
+ cpuCount = 1,
+ gpuCount = 1,
),
)
val topology = createTopology("Gpus/multi_gpu_host.json")
@@ -225,15 +228,15 @@ class GpuTest {
{ assertEquals(10 * 60 * 1000, monitor.maxTimestamp) { "The expected runtime is exceeded" } },
// CPU
// task 0
- { assertEquals(1000.0, monitor.taskCpuDemands["0"]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskCpuDemands["0"]?.get(8)) { "The cpu demanded by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskCpuSupplied["0"]?.get(1)) { "The cpu used by task 0 is incorrect" } },
- { assertEquals(1000.0, monitor.taskCpuSupplied["0"]?.get(8)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuDemands[0]?.get(1)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuDemands[0]?.get(8)) { "The cpu demanded by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuSupplied[0]?.get(1)) { "The cpu used by task 0 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuSupplied[0]?.get(8)) { "The cpu used by task 0 is incorrect" } },
// task 1
- { assertEquals(1000.0, monitor.taskCpuDemands["1"]?.get(1)) { "The cpu demanded by task 1 is incorrect" } },
- { assertEquals(1000.0, monitor.taskCpuDemands["1"]?.get(8)) { "The cpu demanded by task 1 is incorrect" } },
- { assertEquals(1000.0, monitor.taskCpuSupplied["1"]?.get(1)) { "The cpu used by task 1 is incorrect" } },
- { assertEquals(1000.0, monitor.taskCpuSupplied["1"]?.get(8)) { "The cpu used by task 1 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuDemands[1]?.get(1)) { "The cpu demanded by task 1 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuDemands[1]?.get(8)) { "The cpu demanded by task 1 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuSupplied[1]?.get(1)) { "The cpu used by task 1 is incorrect" } },
+ { assertEquals(1000.0, monitor.taskCpuSupplied[1]?.get(8)) { "The cpu used by task 1 is incorrect" } },
// host
{ assertEquals(2000.0, monitor.hostCpuDemands["DualGpuHost"]?.get(1)) { "The cpu demanded by the host is incorrect" } },
{ assertEquals(2000.0, monitor.hostCpuDemands["DualGpuHost"]?.get(9)) { "The cpu demanded by the host is incorrect" } },
@@ -241,15 +244,15 @@ class GpuTest {
{ assertEquals(2000.0, monitor.hostCpuSupplied["DualGpuHost"]?.get(9)) { "The cpu used by the host is incorrect" } },
// GPU
// task 0
- { assertEquals(2000.0, monitor.taskGpuDemands["0"]?.get(1)) { "The gpu demanded by task 0 is incorrect" } },
- { assertEquals(2000.0, monitor.taskGpuDemands["0"]?.get(8)) { "The gpu demanded by task 0 is incorrect" } },
- { assertEquals(2000.0, monitor.taskGpuSupplied["0"]?.get(1)) { "The gpu used by task 0 is incorrect" } },
- { assertEquals(2000.0, monitor.taskGpuSupplied["0"]?.get(8)) { "The gpu used by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskGpuDemands[0]?.get(1)) { "The gpu demanded by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskGpuDemands[0]?.get(8)) { "The gpu demanded by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskGpuSupplied[0]?.get(1)) { "The gpu used by task 0 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskGpuSupplied[0]?.get(8)) { "The gpu used by task 0 is incorrect" } },
// task 1
- { assertEquals(2000.0, monitor.taskGpuDemands["1"]?.get(1)) { "The gpu demanded by task 1 is incorrect" } },
- { assertEquals(2000.0, monitor.taskGpuDemands["1"]?.get(8)) { "The gpu demanded by task 1 is incorrect" } },
- { assertEquals(2000.0, monitor.taskGpuSupplied["1"]?.get(1)) { "The gpu used by task 1 is incorrect" } },
- { assertEquals(2000.0, monitor.taskGpuSupplied["1"]?.get(8)) { "The gpu used by task 1 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskGpuDemands[1]?.get(1)) { "The gpu demanded by task 1 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskGpuDemands[1]?.get(8)) { "The gpu demanded by task 1 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskGpuSupplied[1]?.get(1)) { "The gpu used by task 1 is incorrect" } },
+ { assertEquals(2000.0, monitor.taskGpuSupplied[1]?.get(8)) { "The gpu used by task 1 is incorrect" } },
// host
// GPU 0
{ assertEquals(2000.0, monitor.hostGpuDemands["DualGpuHost"]?.get(1)?.get(0)) { "The gpu demanded by the host is incorrect" } },
diff --git a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/SchedulerTest.kt b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/SchedulerTest.kt
index efb83814..c7d32828 100644
--- a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/SchedulerTest.kt
+++ b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/SchedulerTest.kt
@@ -43,18 +43,20 @@ class SchedulerTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0),
),
+ cpuCount = 1,
),
createTestTask(
- name = "1",
+ id = 1,
fragments =
arrayListOf(
- TraceFragment(5 * 60 * 1000, 2000.0, 1),
+ TraceFragment(5 * 60 * 1000, 2000.0),
),
+ cpuCount = 1,
submissionTime = "1970-01-01T00:20",
),
)
@@ -92,18 +94,22 @@ class SchedulerTest {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1, 2000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0, 2000.0),
),
+ cpuCount = 1,
+ gpuCount = 1,
),
createTestTask(
- name = "1",
+ id = 1,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1, 2000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0, 2000.0),
),
+ cpuCount = 1,
+ gpuCount = 1,
submissionTime = "1970-01-01T00:20",
),
)
diff --git a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/TestingUtils.kt b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/TestingUtils.kt
index c5411179..f34160f7 100644
--- a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/TestingUtils.kt
+++ b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/TestingUtils.kt
@@ -52,7 +52,6 @@ import org.opendc.simulator.kotlin.runSimulation
import java.time.Duration
import java.time.LocalDateTime
import java.time.ZoneOffset
-import java.util.UUID
import kotlin.collections.ArrayList
/**
@@ -64,10 +63,13 @@ fun createTopology(name: String): List<ClusterSpec> {
}
fun createTestTask(
- name: String,
+ id: Int,
+ name: String = "",
memCapacity: Long = 0L,
submissionTime: String = "1970-01-01T00:00",
duration: Long = 0L,
+ cpuCount: Int = 1,
+ gpuCount: Int = 0,
fragments: ArrayList<TraceFragment>,
checkpointInterval: Long = 0L,
checkpointDuration: Long = 0L,
@@ -83,19 +85,19 @@ fun createTestTask(
}
return Task(
- UUID.nameUUIDFromBytes(name.toByteArray()),
+ id,
name,
LocalDateTime.parse(submissionTime).toInstant(ZoneOffset.UTC).toEpochMilli(),
duration,
emptySet(),
emptySet(),
- fragments.maxOf { it.cpuCoreCount() },
+ cpuCount,
fragments.maxOf { it.cpuUsage },
1800000.0,
memCapacity,
- gpuCount = fragments.maxOfOrNull { it.gpuCoreCount() } ?: 0,
+ gpuCount = gpuCount,
gpuCapacity = fragments.maxOfOrNull { it.gpuUsage } ?: 0.0,
- gpuMemCapacity = fragments.maxOfOrNull { it.gpuMemoryUsage } ?: 0L,
+ gpuMemCapacity = 0L,
"",
-1,
TraceWorkload(
@@ -104,7 +106,7 @@ fun createTestTask(
checkpointDuration,
checkpointIntervalScaling,
scalingPolicy,
- name,
+ id,
usedResources,
),
)
@@ -147,13 +149,13 @@ fun runTest(
}
class TestComputeMonitor : ComputeMonitor {
- var taskCpuDemands = mutableMapOf<String, ArrayList<Double>>()
- var taskCpuSupplied = mutableMapOf<String, ArrayList<Double>>()
- var taskGpuDemands = mutableMapOf<String, ArrayList<Double?>?>()
- var taskGpuSupplied = mutableMapOf<String, ArrayList<Double?>?>()
+ var taskCpuDemands = mutableMapOf<Int, ArrayList<Double>>()
+ var taskCpuSupplied = mutableMapOf<Int, ArrayList<Double>>()
+ var taskGpuDemands = mutableMapOf<Int, ArrayList<Double?>?>()
+ var taskGpuSupplied = mutableMapOf<Int, ArrayList<Double?>?>()
override fun record(reader: TaskTableReader) {
- val taskName: String = reader.taskInfo.name
+ val taskName: Int = reader.taskInfo.id
if (taskName in taskCpuDemands) {
taskCpuDemands[taskName]?.add(reader.cpuDemand)
diff --git a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/VirtualizationOverheadTests.kt b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/VirtualizationOverheadTests.kt
index 18936a15..3aa6c354 100644
--- a/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/VirtualizationOverheadTests.kt
+++ b/opendc-experiments/opendc-experiments-base/src/test/kotlin/org/opendc/experiments/base/VirtualizationOverheadTests.kt
@@ -117,17 +117,19 @@ class VirtualizationOverheadTests {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0, 1000.0),
),
+ cpuCount = 1,
+ gpuCount = 1,
),
)
val monitor = runTest(topology, workload)
- assertEquals(1000.0, monitor.taskGpuDemands["0"]?.get(1), "Task 0 should have gpu demand 1000.0")
- assertEquals(1000.0, monitor.taskGpuSupplied["0"]?.get(1), "Task 0 should have gpu supplied 1000.0 ")
+ assertEquals(1000.0, monitor.taskGpuDemands[0]?.get(1), "Task 0 should have gpu demand 1000.0")
+ assertEquals(1000.0, monitor.taskGpuSupplied[0]?.get(1), "Task 0 should have gpu supplied 1000.0 ")
assertEquals(1000.0, monitor.hostGpuDemands["H01"]?.get(1)?.get(0), "Host H01 should have gpu demand 1000.0")
assertEquals(1000.0, monitor.hostGpuSupplied["H01"]?.get(1)?.get(0), "Host H01 should have gpu supply 1000.0")
}
@@ -141,18 +143,20 @@ class VirtualizationOverheadTests {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0, 1000.0),
),
+ cpuCount = 1,
+ gpuCount = 1,
),
)
val monitor = runTest(topology, workload)
assertAll(
- { assertEquals(1000.0, monitor.taskGpuDemands["0"]?.get(1), "Task 0 should have gpu demand 1000.0") },
- { assertEquals(0.95 * 1000.0, monitor.taskGpuSupplied["0"]?.get(1), "Task 0 should have gpu supplied 950.0 ") },
+ { assertEquals(1000.0, monitor.taskGpuDemands[0]?.get(1), "Task 0 should have gpu demand 1000.0") },
+ { assertEquals(0.95 * 1000.0, monitor.taskGpuSupplied[0]?.get(1), "Task 0 should have gpu supplied 950.0 ") },
{ assertEquals(1000.0, monitor.hostGpuDemands["H01"]?.get(1)?.get(0), "Host H01 should have gpu demand 1000.0") },
{ assertEquals(0.95 * 1000.0, monitor.hostGpuSupplied["H01"]?.get(1)?.get(0), "Host H01 should have gpu supply 950.0") },
)
@@ -167,18 +171,20 @@ class VirtualizationOverheadTests {
val workload: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0, 1000.0),
),
+ cpuCount = 1,
+ gpuCount = 1,
),
)
val monitor = runTest(topology, workload)
assertAll(
- { assertEquals(1000.0, monitor.taskGpuDemands["0"]?.get(1), "Task 0 should have gpu demand 1000.0") },
- { assertEquals(0.75 * 1000.0, monitor.taskGpuSupplied["0"]?.get(1), "Task 0 should have gpu supplied 750.0 ") },
+ { assertEquals(1000.0, monitor.taskGpuDemands[0]?.get(1), "Task 0 should have gpu demand 1000.0") },
+ { assertEquals(0.75 * 1000.0, monitor.taskGpuSupplied[0]?.get(1), "Task 0 should have gpu supplied 750.0 ") },
{ assertEquals(1000.0, monitor.hostGpuDemands["H01"]?.get(1)?.get(0), "Host H01 should have gpu demand 1000.0") },
{ assertEquals(0.75 * 1000.0, monitor.hostGpuSupplied["H01"]?.get(1)?.get(0), "Host H01 should have gpu supply 750.0") },
)
@@ -193,54 +199,66 @@ class VirtualizationOverheadTests {
val workload1: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 1000.0, 1, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 1000.0, 1000.0),
),
+ cpuCount = 1,
+ gpuCount = 1,
),
)
val workload2: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 0.0, 1000.0),
),
+ cpuCount = 0,
+ gpuCount = 1,
),
createTestTask(
- name = "1",
+ id = 1,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 0.0, 1000.0),
),
+ cpuCount = 0,
+ gpuCount = 1,
),
)
val workload3: ArrayList<Task> =
arrayListOf(
createTestTask(
- name = "0",
+ id = 0,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 0.0, 1000.0),
),
+ cpuCount = 0,
+ gpuCount = 1,
),
createTestTask(
- name = "1",
+ id = 1,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 0.0, 1000.0),
),
+ cpuCount = 0,
+ gpuCount = 1,
),
createTestTask(
- name = "2",
+ id = 2,
fragments =
arrayListOf(
- TraceFragment(10 * 60 * 1000, 0.0, 0, 1000.0, 1),
+ TraceFragment(10 * 60 * 1000, 0.0, 1000.0),
),
+ cpuCount = 0,
+ gpuCount = 1,
),
)
@@ -250,24 +268,24 @@ class VirtualizationOverheadTests {
assertAll(
// Test with one VM
- { assertEquals(1000.0, monitor1.taskGpuDemands["0"]?.get(1), "Task 0 should have gpu demand 1000.0") },
- { assertEquals(1000.0, monitor1.taskGpuSupplied["0"]?.get(1), "Task 0 should have gpu supplied 1000.0 ") },
+ { assertEquals(1000.0, monitor1.taskGpuDemands[0]?.get(1), "Task 0 should have gpu demand 1000.0") },
+ { assertEquals(1000.0, monitor1.taskGpuSupplied[0]?.get(1), "Task 0 should have gpu supplied 1000.0 ") },
{ assertEquals(1000.0, monitor1.hostGpuDemands["H01"]?.get(1)?.get(0), "Host H01 should have gpu demand 1000.0") },
{ assertEquals(1000.0, monitor1.hostGpuSupplied["H01"]?.get(1)?.get(0), "Host H01 should have gpu supply 1000.0") },
// Test with two VMs
- { assertEquals(1000.0, monitor2.taskGpuDemands["0"]?.get(1), "Task 0 should have gpu demand 1000.0") },
- { assertEquals(500.0, monitor2.taskGpuSupplied["0"]?.get(1), "Task 0 should have gpu supplied 500.0") },
- { assertEquals(1000.0, monitor2.taskGpuDemands["1"]?.get(1), "Task 0 should have gpu demand 1000.0") },
- { assertEquals(500.0, monitor2.taskGpuSupplied["1"]?.get(1), "Task 0 should have gpu supplied 500.0") },
+ { assertEquals(1000.0, monitor2.taskGpuDemands[0]?.get(1), "Task 0 should have gpu demand 1000.0") },
+ { assertEquals(500.0, monitor2.taskGpuSupplied[0]?.get(1), "Task 0 should have gpu supplied 500.0") },
+ { assertEquals(1000.0, monitor2.taskGpuDemands[1]?.get(1), "Task 0 should have gpu demand 1000.0") },
+ { assertEquals(500.0, monitor2.taskGpuSupplied[1]?.get(1), "Task 0 should have gpu supplied 500.0") },
{ assertEquals(2000.0, monitor2.hostGpuDemands["H01"]?.get(1)?.get(0), "Host H01 should have gpu demand 2000.0") },
{ assertEquals(1000.0, monitor2.hostGpuSupplied["H01"]?.get(1)?.get(0), "Host H01 should have gpu supply 1000.0") },
// Test with three VMs
- { assertEquals(1000.0, monitor3.taskGpuDemands["0"]?.get(1), "Task 0 should have gpu demand 1000.0") },
- { assertEquals(333.3, monitor3.taskGpuSupplied["0"]?.get(1) ?: 0.0, 0.05, "Task 0 should have gpu supplied 333.3 ") },
- { assertEquals(1000.0, monitor3.taskGpuDemands["1"]?.get(1), "Task 0 should have gpu demand 1000.0") },
- { assertEquals(333.3, monitor3.taskGpuSupplied["1"]?.get(1) ?: 0.0, 0.05, "Task 0 should have gpu supplied 333.3 ") },
- { assertEquals(1000.0, monitor3.taskGpuDemands["2"]?.get(1), "Task 0 should have gpu demand 1000.0") },
- { assertEquals(333.3, monitor3.taskGpuSupplied["2"]?.get(1) ?: 0.0, 0.05, "Task 0 should have gpu supplied 333.3 ") },
+ { assertEquals(1000.0, monitor3.taskGpuDemands[0]?.get(1), "Task 0 should have gpu demand 1000.0") },
+ { assertEquals(333.3, monitor3.taskGpuSupplied[0]?.get(1) ?: 0.0, 0.05, "Task 0 should have gpu supplied 333.3 ") },
+ { assertEquals(1000.0, monitor3.taskGpuDemands[1]?.get(1), "Task 0 should have gpu demand 1000.0") },
+ { assertEquals(333.3, monitor3.taskGpuSupplied[1]?.get(1) ?: 0.0, 0.05, "Task 0 should have gpu supplied 333.3 ") },
+ { assertEquals(1000.0, monitor3.taskGpuDemands[2]?.get(1), "Task 0 should have gpu demand 1000.0") },
+ { assertEquals(333.3, monitor3.taskGpuSupplied[2]?.get(1) ?: 0.0, 0.05, "Task 0 should have gpu supplied 333.3 ") },
{ assertEquals(3000.0, monitor3.hostGpuDemands["H01"]?.get(1)?.get(0), "Host H01 should have gpu demand 3000.0") },
{ assertEquals(1000.0, monitor3.hostGpuSupplied["H01"]?.get(1)?.get(0), "Host H01 should have gpu supply 700.0") },
)
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/bitbrains-small/fragments.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/bitbrains-small/fragments.parquet
index 240f58e3..84e2ab7a 100644
--- a/opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/bitbrains-small/fragments.parquet
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/bitbrains-small/fragments.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/bitbrains-small/interference-model.json b/opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/bitbrains-small/interference-model.json
deleted file mode 100644
index 51fc6366..00000000
--- a/opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/bitbrains-small/interference-model.json
+++ /dev/null
@@ -1,21 +0,0 @@
-[
- {
- "vms": [
- "141",
- "379",
- "851",
- "116"
- ],
- "minServerLoad": 0.0,
- "performanceScore": 0.8830158730158756
- },
- {
- "vms": [
- "205",
- "116",
- "463"
- ],
- "minServerLoad": 0.0,
- "performanceScore": 0.7133055555552751
- }
-]
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/bitbrains-small/tasks.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/bitbrains-small/tasks.parquet
index 8e9dcea7..88d60c54 100644
--- a/opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/bitbrains-small/tasks.parquet
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/bitbrains-small/tasks.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/single_task/fragments.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/single_task/fragments.parquet
index 94a2d69e..cd749642 100644
--- a/opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/single_task/fragments.parquet
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/single_task/fragments.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/single_task/tasks.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/single_task/tasks.parquet
index 2a7da2eb..08457a8d 100644
--- a/opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/single_task/tasks.parquet
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/single_task/tasks.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/small_gpu/fragments.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/small_gpu/fragments.parquet
index 7dda2c97..689f2276 100644
--- a/opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/small_gpu/fragments.parquet
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/small_gpu/fragments.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/small_gpu/tasks.parquet b/opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/small_gpu/tasks.parquet
index 23331729..fb38512c 100644
--- a/opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/small_gpu/tasks.parquet
+++ b/opendc-experiments/opendc-experiments-base/src/test/resources/workloadTraces/small_gpu/tasks.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-m3sa/src/test/kotlin/org/opendc/experiments/m3sa/M3SARunnerTest.kt b/opendc-experiments/opendc-experiments-m3sa/src/test/kotlin/org/opendc/experiments/m3sa/M3SARunnerTest.kt
index 8d8bae5f..3152151f 100644
--- a/opendc-experiments/opendc-experiments-m3sa/src/test/kotlin/org/opendc/experiments/m3sa/M3SARunnerTest.kt
+++ b/opendc-experiments/opendc-experiments-m3sa/src/test/kotlin/org/opendc/experiments/m3sa/M3SARunnerTest.kt
@@ -31,7 +31,7 @@ import java.io.File
class M3SARunnerTest {
@Test
fun `Run M3SA-OpenDC full integration 1`() {
- val scenarioJson = "src/test/resources/scenarios/experiment1/scenario_metamodel.json"
+ val experimentJson = "src/test/resources/scenarios/experiment1/scenario_metamodel.json"
val outDir = "src/test/resources/outputs/"
val m3saPath = "src/test/resources/m3saSetups/experiment1/m3saSetup.json"
val m3saExecPath = "src/main/python"
@@ -39,7 +39,7 @@ class M3SARunnerTest {
assertDoesNotThrow {
main(
- arrayOf("--experiment-path", scenarioJson, "--m3sa-setup-path", m3saPath, "--m3sa-exec-path", m3saExecPath),
+ arrayOf("--experiment-path", experimentJson, "--m3sa-setup-path", m3saPath, "--m3sa-exec-path", m3saExecPath),
)
}
diff --git a/opendc-experiments/opendc-experiments-m3sa/src/test/resources/workloadTraces/experiment1/fragments.parquet b/opendc-experiments/opendc-experiments-m3sa/src/test/resources/workloadTraces/experiment1/fragments.parquet
index 07fb97d4..8de8ba9e 100644
--- a/opendc-experiments/opendc-experiments-m3sa/src/test/resources/workloadTraces/experiment1/fragments.parquet
+++ b/opendc-experiments/opendc-experiments-m3sa/src/test/resources/workloadTraces/experiment1/fragments.parquet
Binary files differ
diff --git a/opendc-experiments/opendc-experiments-m3sa/src/test/resources/workloadTraces/experiment1/tasks.parquet b/opendc-experiments/opendc-experiments-m3sa/src/test/resources/workloadTraces/experiment1/tasks.parquet
index 1e35963b..30811fd7 100644
--- a/opendc-experiments/opendc-experiments-m3sa/src/test/resources/workloadTraces/experiment1/tasks.parquet
+++ b/opendc-experiments/opendc-experiments-m3sa/src/test/resources/workloadTraces/experiment1/tasks.parquet
Binary files differ
diff --git a/opendc-simulator/opendc-simulator-compute/src/main/java/org/opendc/simulator/compute/workload/trace/SimTraceWorkload.java b/opendc-simulator/opendc-simulator-compute/src/main/java/org/opendc/simulator/compute/workload/trace/SimTraceWorkload.java
index 8b3a7188..95487476 100644
--- a/opendc-simulator/opendc-simulator-compute/src/main/java/org/opendc/simulator/compute/workload/trace/SimTraceWorkload.java
+++ b/opendc-simulator/opendc-simulator-compute/src/main/java/org/opendc/simulator/compute/workload/trace/SimTraceWorkload.java
@@ -66,7 +66,7 @@ public class SimTraceWorkload extends SimWorkload implements FlowConsumer {
private final TraceWorkload snapshot;
private final ScalingPolicy scalingPolicy;
- private final String taskName;
+ private final int taskId;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Basic Getters and Setters
@@ -107,7 +107,7 @@ public class SimTraceWorkload extends SimWorkload implements FlowConsumer {
this.scalingPolicy = workload.getScalingPolicy();
this.remainingFragments = new LinkedList<>(workload.getFragments());
this.fragmentIndex = 0;
- this.taskName = workload.getTaskName();
+ this.taskId = workload.getTaskId();
this.startOfFragment = this.clock.millis();
@@ -135,7 +135,7 @@ public class SimTraceWorkload extends SimWorkload implements FlowConsumer {
this.scalingPolicy = workload.getScalingPolicy();
this.remainingFragments = new LinkedList<>(workload.getFragments());
this.fragmentIndex = 0;
- this.taskName = workload.getTaskName();
+ this.taskId = workload.getTaskId();
this.startOfFragment = this.clock.millis();
@@ -325,9 +325,7 @@ public class SimTraceWorkload extends SimWorkload implements FlowConsumer {
TraceFragment newFragment = new TraceFragment(
remainingDuration,
currentFragment.cpuUsage(),
- currentFragment.cpuCoreCount(),
currentFragment.gpuUsage(),
- currentFragment.gpuCoreCount(),
currentFragment.gpuMemoryUsage());
// Alter the snapshot by removing finished fragments
@@ -340,9 +338,7 @@ public class SimTraceWorkload extends SimWorkload implements FlowConsumer {
TraceFragment snapshotFragment = new TraceFragment(
this.checkpointDuration,
this.snapshot.getMaxCpuDemand(),
- this.snapshot.getMaxCoreCount(),
this.snapshot.getMaxGpuDemand(),
- this.snapshot.getMaxGpuCoreCount(),
this.snapshot.getMaxGpuMemoryDemand());
this.remainingFragments.addFirst(snapshotFragment);
diff --git a/opendc-simulator/opendc-simulator-compute/src/main/java/org/opendc/simulator/compute/workload/trace/TraceFragment.java b/opendc-simulator/opendc-simulator-compute/src/main/java/org/opendc/simulator/compute/workload/trace/TraceFragment.java
index bc3685a3..c17671a7 100644
--- a/opendc-simulator/opendc-simulator-compute/src/main/java/org/opendc/simulator/compute/workload/trace/TraceFragment.java
+++ b/opendc-simulator/opendc-simulator-compute/src/main/java/org/opendc/simulator/compute/workload/trace/TraceFragment.java
@@ -24,19 +24,18 @@ package org.opendc.simulator.compute.workload.trace;
import org.opendc.common.ResourceType;
-public record TraceFragment(
- long duration, double cpuUsage, int cpuCoreCount, double gpuUsage, int gpuCoreCount, Long gpuMemoryUsage) {
+public record TraceFragment(long duration, double cpuUsage, double gpuUsage, int gpuMemoryUsage) {
- public TraceFragment(long start, long duration, double cpuUsage, int cpuCoreCount) {
- this(duration, cpuUsage, cpuCoreCount, 0.0, 0, 0L);
+ public TraceFragment(long start, long duration, double cpuUsage) {
+ this(duration, cpuUsage, 0.0, 0);
}
- public TraceFragment(long duration, double cpuUsage, int cpuCoreCount) {
- this(duration, cpuUsage, cpuCoreCount, 0.0, 0, 0L);
+ public TraceFragment(long duration, double cpuUsage) {
+ this(duration, cpuUsage, 0.0, 0);
}
- public TraceFragment(long duration, double cpuUsage, int cpuCoreCount, double gpuUsage, int gpuCoreCount) {
- this(duration, cpuUsage, cpuCoreCount, gpuUsage, gpuCoreCount, 0L);
+ public TraceFragment(long duration, double cpuUsage, double gpuUsage) {
+ this(duration, cpuUsage, gpuUsage, 0);
}
/**
@@ -53,18 +52,4 @@ public record TraceFragment(
default -> throw new IllegalArgumentException("Invalid resource type: " + resourceType);
};
}
-
- /**
- * Returns the core count for the specified resource type.
- *
- * @param resourceType the type of resource
- * @return the core count for the specified resource type
- */
- public int getCoreCount(ResourceType resourceType) throws IllegalArgumentException {
- return switch (resourceType) {
- case CPU -> cpuCoreCount;
- case GPU -> gpuCoreCount;
- default -> throw new IllegalArgumentException("Invalid resource type: " + resourceType);
- };
- }
}
diff --git a/opendc-simulator/opendc-simulator-compute/src/main/java/org/opendc/simulator/compute/workload/trace/TraceWorkload.java b/opendc-simulator/opendc-simulator-compute/src/main/java/org/opendc/simulator/compute/workload/trace/TraceWorkload.java
index d698a48d..53ce9f31 100644
--- a/opendc-simulator/opendc-simulator-compute/src/main/java/org/opendc/simulator/compute/workload/trace/TraceWorkload.java
+++ b/opendc-simulator/opendc-simulator-compute/src/main/java/org/opendc/simulator/compute/workload/trace/TraceWorkload.java
@@ -41,12 +41,10 @@ public class TraceWorkload implements Workload {
private final long checkpointDuration;
private final double checkpointIntervalScaling;
private final double maxCpuDemand;
- private final int maxCpuCoreCount;
private final double maxGpuDemand;
- private final int maxGpuCoreCount;
- private final long maxGpuMemoryDemand;
- private final String taskName;
- private ResourceType[] resourceTypes = new ResourceType[ResourceType.values().length];
+ private final int maxGpuMemoryDemand;
+ private final int taskId;
+ private final ResourceType[] resourceTypes;
public ScalingPolicy getScalingPolicy() {
return scalingPolicy;
@@ -60,36 +58,25 @@ public class TraceWorkload implements Workload {
long checkpointDuration,
double checkpointIntervalScaling,
ScalingPolicy scalingPolicy,
- String taskName,
+ int taskId,
ResourceType[] resourceTypes) {
this.fragments = fragments;
this.checkpointInterval = checkpointInterval;
this.checkpointDuration = checkpointDuration;
this.checkpointIntervalScaling = checkpointIntervalScaling;
this.scalingPolicy = scalingPolicy;
- this.taskName = taskName;
+ this.taskId = taskId;
// TODO: remove if we decide not to use it.
this.maxCpuDemand = fragments.stream()
.max(Comparator.comparing(TraceFragment::cpuUsage))
.get()
- // .cpuUsage();
.getResourceUsage(ResourceType.CPU);
- this.maxCpuCoreCount = fragments.stream()
- .max(Comparator.comparing(TraceFragment::cpuCoreCount))
- .get()
- // .cpuCoreCount();
- .getCoreCount(ResourceType.CPU);
-
this.maxGpuDemand = fragments.stream()
.max(Comparator.comparing(TraceFragment::gpuUsage))
.get()
.getResourceUsage(ResourceType.GPU);
- this.maxGpuCoreCount = fragments.stream()
- .max(Comparator.comparing(TraceFragment::gpuCoreCount))
- .get()
- .getCoreCount(ResourceType.GPU);
- this.maxGpuMemoryDemand = 0L; // TODO: add GPU memory demand to the trace fragments
+ this.maxGpuMemoryDemand = 0; // TODO: add GPU memory demand to the trace fragments
this.resourceTypes = resourceTypes;
}
@@ -113,10 +100,6 @@ public class TraceWorkload implements Workload {
return checkpointIntervalScaling;
}
- public int getMaxCoreCount() {
- return maxCpuCoreCount;
- }
-
public double getMaxCpuDemand() {
return maxCpuDemand;
}
@@ -125,16 +108,12 @@ public class TraceWorkload implements Workload {
return maxGpuDemand;
}
- public int getMaxGpuCoreCount() {
- return maxGpuCoreCount;
- }
-
- public long getMaxGpuMemoryDemand() {
+ public int getMaxGpuMemoryDemand() {
return maxGpuMemoryDemand;
}
- public String getTaskName() {
- return taskName;
+ public int getTaskId() {
+ return taskId;
}
public void removeFragments(int numberOfFragments) {
@@ -171,8 +150,8 @@ public class TraceWorkload implements Workload {
long checkpointDuration,
double checkpointIntervalScaling,
ScalingPolicy scalingPolicy,
- String taskName) {
- return new Builder(checkpointInterval, checkpointDuration, checkpointIntervalScaling, scalingPolicy, taskName);
+ int taskId) {
+ return new Builder(checkpointInterval, checkpointDuration, checkpointIntervalScaling, scalingPolicy, taskId);
}
public static final class Builder {
@@ -181,7 +160,7 @@ public class TraceWorkload implements Workload {
private final long checkpointDuration;
private final double checkpointIntervalScaling;
private final ScalingPolicy scalingPolicy;
- private final String taskName;
+ private final int taskId;
private final ResourceType[] resourceTypes = new ResourceType[ResourceType.values().length];
/**
@@ -192,13 +171,13 @@ public class TraceWorkload implements Workload {
long checkpointDuration,
double checkpointIntervalScaling,
ScalingPolicy scalingPolicy,
- String taskName) {
+ int taskId) {
this.fragments = new ArrayList<>();
this.checkpointInterval = checkpointInterval;
this.checkpointDuration = checkpointDuration;
this.checkpointIntervalScaling = checkpointIntervalScaling;
this.scalingPolicy = scalingPolicy;
- this.taskName = taskName;
+ this.taskId = taskId;
}
/**
@@ -206,22 +185,17 @@ public class TraceWorkload implements Workload {
*
* @param duration The timestamp at which the fragment ends (in epoch millis).
* @param cpuUsage The CPU usage at this fragment.
- * @param cpuCores The number of cores used during this fragment.
* @param gpuUsage The GPU usage at this fragment.
- * @param gpuCores The number of GPU cores used during this fragment.
* @param gpuMemoryUsage The GPU memory usage at this fragment.
*/
- public void add(
- long duration, double cpuUsage, int cpuCores, double gpuUsage, int gpuCores, long gpuMemoryUsage) {
+ public void add(long duration, double cpuUsage, double gpuUsage, int gpuMemoryUsage) {
if (cpuUsage > 0.0) {
this.resourceTypes[ResourceType.CPU.ordinal()] = ResourceType.CPU;
}
if (gpuUsage > 0.0) {
this.resourceTypes[ResourceType.GPU.ordinal()] = ResourceType.GPU;
}
- fragments.add(
- fragments.size(),
- new TraceFragment(duration, cpuUsage, cpuCores, gpuUsage, gpuCores, gpuMemoryUsage));
+ fragments.add(fragments.size(), new TraceFragment(duration, cpuUsage, gpuUsage, gpuMemoryUsage));
}
/**
@@ -234,7 +208,7 @@ public class TraceWorkload implements Workload {
this.checkpointDuration,
this.checkpointIntervalScaling,
this.scalingPolicy,
- this.taskName,
+ this.taskId,
this.resourceTypes);
}
}
diff --git a/opendc-simulator/opendc-simulator-flow/src/main/java/org/opendc/simulator/engine/graph/FlowDistributor.java b/opendc-simulator/opendc-simulator-flow/src/main/java/org/opendc/simulator/engine/graph/FlowDistributor.java
index c388293b..501bbf10 100644
--- a/opendc-simulator/opendc-simulator-flow/src/main/java/org/opendc/simulator/engine/graph/FlowDistributor.java
+++ b/opendc-simulator/opendc-simulator-flow/src/main/java/org/opendc/simulator/engine/graph/FlowDistributor.java
@@ -42,7 +42,7 @@ import org.slf4j.LoggerFactory;
* It also provides methods to update outgoing demands and supplies based on the incoming demands and supplies.
* This class is abstract and should be extended by specific implementations that define the distribution strategy.
* It uses a {@link FlowDistributorFactory.DistributionPolicy} to determine how to distribute the supply among the consumers.
- * The default distribution policy is {@link MaxMinFairnessPolicy}, which distributes the supply fairly among the consumers.
+ * The default distribution policy is MaxMinFairnessPolicy, which distributes the supply fairly among the consumers.
*/
public abstract class FlowDistributor extends FlowNode implements FlowSupplier, FlowConsumer {
protected static final Logger LOGGER = LoggerFactory.getLogger(FlowDistributor.class);
@@ -178,7 +178,9 @@ public abstract class FlowDistributor extends FlowNode implements FlowSupplier,
// supplierIndex not always set, so we use 0 as default to avoid index out of bounds
int idx = supplierEdge.getSupplierIndex() == -1 ? 0 : supplierEdge.getSupplierIndex();
// to keep index consistent, entries are neutralized instead of removed
- this.supplierEdges.put(idx, null);
+ // this.supplierEdges.put(idx, null);
+
+ this.supplierEdges.remove(idx);
this.capacity -= supplierEdge.getCapacity();
this.currentIncomingSupplies.put(idx, 0.0);
diff --git a/opendc-simulator/opendc-simulator-flow/src/main/java/org/opendc/simulator/engine/graph/distributionPolicies/MaxMinFairnessFlowDistributor.java b/opendc-simulator/opendc-simulator-flow/src/main/java/org/opendc/simulator/engine/graph/distributionPolicies/MaxMinFairnessFlowDistributor.java
index 9b48f204..371015a4 100644
--- a/opendc-simulator/opendc-simulator-flow/src/main/java/org/opendc/simulator/engine/graph/distributionPolicies/MaxMinFairnessFlowDistributor.java
+++ b/opendc-simulator/opendc-simulator-flow/src/main/java/org/opendc/simulator/engine/graph/distributionPolicies/MaxMinFairnessFlowDistributor.java
@@ -44,12 +44,8 @@ public class MaxMinFairnessFlowDistributor extends FlowDistributor {
}
protected void updateOutgoingDemand() {
- // equally distribute the demand to all suppliers
for (FlowEdge supplierEdge : this.supplierEdges.values()) {
this.pushOutgoingDemand(supplierEdge, this.totalIncomingDemand / this.supplierEdges.size());
- // alternatively a relative share could be used, based on capacity minus current incoming supply
- // this.pushOutgoingDemand(supplierEdge, this.totalIncomingDemand * (supplierEdge.getCapacity() -
- // currentIncomingSupplies.get(idx) / supplierEdges.size()));
}
this.outgoingDemandUpdateNeeded = false;
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/CarbonIntensityColumns.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/CarbonColumns.kt
index de74c4fd..32cdd78b 100644
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/CarbonIntensityColumns.kt
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/CarbonColumns.kt
@@ -20,16 +20,16 @@
* SOFTWARE.
*/
-@file:JvmName("CarbonIntensityColumns")
+@file:JvmName("CarbonColumns")
package org.opendc.trace.conv
/**
- * A column containing the task identifier.
+ * A column containing the timestamp of the carbon intensity measurement.
*/
-public const val CARBON_INTENSITY_TIMESTAMP: String = "timestamp"
+public const val CARBON_TIMESTAMP: String = "timestamp"
/**
- * A column containing the task identifier.
+ * A column containing the intensity of the carbon when sampled.
*/
-public const val CARBON_INTENSITY_VALUE: String = "carbon_intensity"
+public const val CARBON_INTENSITY: String = "carbon_intensity"
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/InterferenceGroupColumns.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/FragmentColumns.kt
index fbbfdea9..e0d01ef2 100644
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/InterferenceGroupColumns.kt
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/FragmentColumns.kt
@@ -20,21 +20,26 @@
* SOFTWARE.
*/
-@file:JvmName("InterferenceGroupColumns")
+@file:JvmName("FragmentColumns")
package org.opendc.trace.conv
/**
- * Members of the interference group.
+ * Duration for the fragment.
*/
-public const val INTERFERENCE_GROUP_MEMBERS: String = "members"
+public const val FRAGMENT_DURATION: String = "duration"
/**
- * Target load after which the interference occurs.
+ * Total CPU usage during the fragment in MHz.
*/
-public const val INTERFERENCE_GROUP_TARGET: String = "target"
+public const val FRAGMENT_CPU_USAGE: String = "cpu_usage"
/**
- * Performance score when the interference occurs.
+ * Total GPU usage during the fragment in MHz.
*/
-public const val INTERFERENCE_GROUP_SCORE: String = "score"
+public const val FRAGMENT_GPU_USAGE: String = "gpu_usage"
+
+/**
+ * Memory usage during the fragment in KB.
+ */
+public const val FRAGMENT_MEM_USAGE: String = "mem_usage"
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/ResourceColumns.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/ResourceColumns.kt
deleted file mode 100644
index 3d0341b2..00000000
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/ResourceColumns.kt
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Copyright (c) 2022 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-@file:JvmName("ResourceColumns")
-
-package org.opendc.trace.conv
-
-/**
- * Identifier of the resource.
- */
-@JvmField
-public val resourceID: String = "id"
-
-/**
- * The cluster to which the resource belongs.
- */
-@JvmField
-public val resourceClusterID: String = "cluster_id"
-
-/**
- * Start time for the resource.
- */
-@JvmField
-public val resourceSubmissionTime: String = "submission_time"
-
-/**
- * Carbon intensity of the resource.
- */
-@JvmField
-public val resourceCarbonIntensity: String = "carbon_intensity"
-
-/**
- * End time for the resource.
- */
-@JvmField
-public val resourceDuration: String = "duration"
-
-/**
- * Number of CPUs for the resource.
- */
-@JvmField
-public val resourceCpuCount: String = "cpu_count"
-
-/**
- * Total CPU capacity of the resource in MHz.
- */
-@JvmField
-public val resourceCpuCapacity: String = "cpu_capacity"
-
-/**
- * Memory capacity for the resource in KB.
- */
-@JvmField
-public val resourceMemCapacity: String = "mem_capacity"
-
-/**
- * Number of GPU cores for the resource.
- */
-@JvmField
-public val resourceGpuCount: String = "gpu_count"
-
-/**
- * Total GPU capacity of the resource in MHz.
- */
-@JvmField
-public val resourceGpuCapacity: String = "gpu_capacity"
-
-/**
- * Total GPU memory capacity of the resource in MB.
- */
-@JvmField
-public val resourceGpuMemCapacity: String = "gpu_mem_capacity"
-
-/**
- * The parents of the resource that need to be completed before this resource can be used.
- */
-@JvmField
-public val resourceParents: String = "parents"
-
-/**
- * The children of the resource that cannot be started before this is completed.
- */
-@JvmField
-public val resourceChildren: String = "children"
-
-/**
- * Nature of the task. Delayable, interruptible, etc.
- */
-@JvmField
-public val resourceNature: String = "nature"
-
-/**
- * Deadline of the task.
- */
-@JvmField
-public val resourceDeadline: String = "deadline"
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/ResourceStateColumns.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/ResourceStateColumns.kt
deleted file mode 100644
index f4ab7759..00000000
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/ResourceStateColumns.kt
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright (c) 2022 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-@file:JvmName("ResourceStateColumns")
-
-package org.opendc.trace.conv
-
-/**
- * The timestamp at which the state was recorded.
- */
-@JvmField
-public val resourceStateTimestamp: String = "timestamp"
-
-/**
- * Duration for the state.
- */
-@JvmField
-public val resourceStateDuration: String = "duration"
-
-/**
- * A flag to indicate that the resource is powered on.
- */
-@JvmField
-public val resourceStatePoweredOn: String = "powered_on"
-
-/**
- * Total CPU usage of the resource in MHz.
- */
-@JvmField
-public val resourceStateCpuUsage: String = "cpu_usage"
-
-/**
- * Total CPU usage of the resource in percentage.
- */
-@JvmField
-public val resourceStateCpuUsagePct: String = "cpu_usage_pct"
-
-/**
- * Total CPU demand of the resource in MHz.
- */
-@JvmField
-public val resourceStateCpuDemand: String = "cpu_demand"
-
-/**
- * CPU ready percentage.
- */
-@JvmField
-public val resourceStateCpuReadyPct: String = "cpu_ready_pct"
-
-/**
- * Memory usage of the resource in KB.
- */
-@JvmField
-public val resourceStateMemUsage: String = "mem_usage"
-
-/**
- * Disk read throughput of the resource in KB/s.
- */
-@JvmField
-public val resourceStateDiskRead: String = "disk_read"
-
-/**
- * Disk write throughput of the resource in KB/s.
- */
-@JvmField
-public val resourceStateDiskWrite: String = "disk_write"
-
-/**
- * Network receive throughput of the resource in KB/s.
- */
-@JvmField
-public val resourceStateNetRx: String = "net_rx"
-
-/**
- * Network transmit throughput of the resource in KB/s.
- */
-@JvmField
-public val resourceStateNetTx: String = "net_tx"
-
-/**
- * Total GPU capacity of the resource in MHz.
- */
-@JvmField
-public val resourceStateGpuUsage: String = "gpu_usage"
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/Tables.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/Tables.kt
index d4019f73..310d268a 100644
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/Tables.kt
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/Tables.kt
@@ -25,30 +25,21 @@
package org.opendc.trace.conv
/**
- * A table containing all workflows in a workload.
- */
-public const val TABLE_WORKFLOWS: String = "workflows"
-
-/**
* A table containing all tasks in a workload.
*/
public const val TABLE_TASKS: String = "tasks"
/**
- * A table containing all resources in a workload.
+ * A table containing all resource states in a workload.
*/
-public const val TABLE_RESOURCES: String = "resources"
+public const val TABLE_FRAGMENTS: String = "fragments"
/**
- * A table containing all resource states in a workload.
+ * A table containing the carbon intensities of the region
*/
-public const val TABLE_RESOURCE_STATES: String = "resource_states"
+public const val TABLE_CARBON: String = "carbon"
/**
- * A table containing the groups of resources that interfere when run on the same execution platform.
+ * A table containing failures that can be injected during simulation.
*/
-public const val TABLE_INTERFERENCE_GROUPS: String = "interference_groups"
-
-public const val TABLE_CARBON_INTENSITIES: String = "carbon_intensities"
-
public const val TABLE_FAILURES: String = "failures"
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/TaskColumns.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/TaskColumns.kt
index 6ca87a60..0df52c71 100644
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/TaskColumns.kt
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/conv/TaskColumns.kt
@@ -25,61 +25,71 @@
package org.opendc.trace.conv
/**
- * A column containing the task identifier.
+ * Identifier of the task.
*/
public const val TASK_ID: String = "id"
/**
- * A column containing the identifier of the workflow.
+ * Identifier of the task.
*/
-public const val TASK_WORKFLOW_ID: String = "workflow_id"
+public const val TASK_NAME: String = "name"
/**
- * A column containing the submission time of the task.
+ * The time of submission of the task.
*/
-public const val TASK_SUBMIT_TIME: String = "submit_time"
+public const val TASK_SUBMISSION_TIME: String = "submission_time"
/**
- * A column containing the wait time of the task.
+ * The duration of a task in ms
*/
-public const val TASK_WAIT_TIME: String = "wait_time"
+public const val TASK_DURATION: String = "duration"
/**
- * A column containing the runtime time of the task.
+ * Number of CPUs for the task.
*/
-public const val TASK_RUNTIME: String = "runtime"
+public const val TASK_CPU_COUNT: String = "cpu_count"
/**
- * A column containing the parents of a task.
+ * Total CPU capacity of the task in MHz.
*/
-public const val TASK_PARENTS: String = "parents"
+public const val TASK_CPU_CAPACITY: String = "cpu_capacity"
/**
- * A column containing the children of a task.
+ * Memory capacity for the task in KB.
*/
-public const val TASK_CHILDREN: String = "children"
+public const val TASK_MEM_CAPACITY: String = "mem_capacity"
+
+/**
+ * Number of GPU cores for the task.
+ */
+public const val TASK_GPU_COUNT: String = "gpu_count"
/**
- * A column containing the requested CPUs of a task.
+ * Total GPU capacity of the task in MHz.
*/
-public const val TASK_REQ_NCPUS: String = "req_ncpus"
+public const val TASK_GPU_CAPACITY: String = "gpu_capacity"
/**
- * A column containing the allocated CPUs of a task.
+ * Total GPU memory capacity of the task in MB.
*/
-public const val TASK_ALLOC_NCPUS: String = "alloc_ncpus"
+public const val TASK_GPU_MEM_CAPACITY: String = "gpu_mem_capacity"
/**
- * A column containing the status of a task.
+ * The parents of the task that need to be completed before this task can be used.
*/
-public const val TASK_STATUS: String = "status"
+public const val TASK_PARENTS: String = "parents"
+
+/**
+ * The children of the task that cannot be started before this is completed.
+ */
+public const val TASK_CHILDREN: String = "children"
/**
- * A column containing the group id of a task.
+ * Nature of the task. Delayable, interruptible, etc.
*/
-public const val TASK_GROUP_ID: String = "group_id"
+public const val TASK_NATURE: String = "nature"
/**
- * A column containing the user id of a task.
+ * Deadline of the task.
*/
-public const val TASK_USER_ID: String = "user_id"
+public const val TASK_DEADLINE: String = "deadline"
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/azure/AzureResourceStateTableReader.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/azure/AzureResourceStateTableReader.kt
deleted file mode 100644
index bcf6ff52..00000000
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/azure/AzureResourceStateTableReader.kt
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * Copyright (c) 2021 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.azure
-
-import com.fasterxml.jackson.core.JsonToken
-import com.fasterxml.jackson.dataformat.csv.CsvParser
-import com.fasterxml.jackson.dataformat.csv.CsvSchema
-import org.opendc.trace.TableReader
-import org.opendc.trace.conv.resourceID
-import org.opendc.trace.conv.resourceStateCpuUsagePct
-import org.opendc.trace.conv.resourceStateTimestamp
-import java.time.Duration
-import java.time.Instant
-import java.util.UUID
-
-/**
- * A [TableReader] for the Azure v1 VM resource state table.
- */
-internal class AzureResourceStateTableReader(private val parser: CsvParser) : TableReader {
- /**
- * A flag to indicate whether a single row has been read already.
- */
- private var isStarted = false
-
- init {
- parser.schema = schema
- }
-
- override fun nextRow(): Boolean {
- if (!isStarted) {
- isStarted = true
- }
-
- reset()
-
- if (!nextStart()) {
- return false
- }
-
- while (true) {
- val token = parser.nextValue()
-
- if (token == null || token == JsonToken.END_OBJECT) {
- break
- }
-
- when (parser.currentName) {
- "timestamp" -> timestamp = Instant.ofEpochSecond(parser.longValue)
- "vm id" -> id = parser.text
- "CPU avg cpu" -> cpuUsagePct = (parser.doubleValue / 100.0) // Convert from % to [0, 1]
- }
- }
-
- return true
- }
-
- private val colID = 0
- private val colTimestamp = 1
- private val colCpuUsagePct = 2
-
- override fun resolve(name: String): Int {
- return when (name) {
- resourceID -> colID
- resourceStateTimestamp -> colTimestamp
- resourceStateCpuUsagePct -> colCpuUsagePct
- else -> -1
- }
- }
-
- override fun isNull(index: Int): Boolean {
- require(index in 0..colCpuUsagePct) { "Invalid column index" }
- return false
- }
-
- override fun getBoolean(index: Int): Boolean {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getInt(index: Int): Int {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getLong(index: Int): Long {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getFloat(index: Int): Float {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getDouble(index: Int): Double {
- checkActive()
- return when (index) {
- colCpuUsagePct -> cpuUsagePct
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getString(index: Int): String? {
- checkActive()
- return when (index) {
- colID -> id
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getUUID(index: Int): UUID? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getInstant(index: Int): Instant? {
- checkActive()
- return when (index) {
- colTimestamp -> timestamp
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getDuration(index: Int): Duration? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun <T> getList(
- index: Int,
- elementType: Class<T>,
- ): List<T>? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun <K, V> getMap(
- index: Int,
- keyType: Class<K>,
- valueType: Class<V>,
- ): Map<K, V>? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun <T> getSet(
- index: Int,
- elementType: Class<T>,
- ): Set<T>? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun close() {
- parser.close()
- }
-
- /**
- * Helper method to check if the reader is active.
- */
- private fun checkActive() {
- check(isStarted && !parser.isClosed) { "No active row. Did you call nextRow()?" }
- }
-
- /**
- * Advance the parser until the next object start.
- */
- private fun nextStart(): Boolean {
- var token = parser.nextValue()
-
- while (token != null && token != JsonToken.START_OBJECT) {
- token = parser.nextValue()
- }
-
- return token != null
- }
-
- /**
- * State fields of the reader.
- */
- private var id: String? = null
- private var timestamp: Instant? = null
- private var cpuUsagePct = Double.NaN
-
- /**
- * Reset the state.
- */
- private fun reset() {
- id = null
- timestamp = null
- cpuUsagePct = Double.NaN
- }
-
- companion object {
- /**
- * The [CsvSchema] that is used to parse the trace.
- */
- private val schema =
- CsvSchema.builder()
- .addColumn("timestamp", CsvSchema.ColumnType.NUMBER)
- .addColumn("vm id", CsvSchema.ColumnType.STRING)
- .addColumn("CPU min cpu", CsvSchema.ColumnType.NUMBER)
- .addColumn("CPU max cpu", CsvSchema.ColumnType.NUMBER)
- .addColumn("CPU avg cpu", CsvSchema.ColumnType.NUMBER)
- .setAllowComments(true)
- .build()
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/azure/AzureResourceTableReader.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/azure/AzureResourceTableReader.kt
deleted file mode 100644
index 55f26fa6..00000000
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/azure/AzureResourceTableReader.kt
+++ /dev/null
@@ -1,246 +0,0 @@
-/*
- * Copyright (c) 2021 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.azure
-
-import com.fasterxml.jackson.core.JsonToken
-import com.fasterxml.jackson.dataformat.csv.CsvParser
-import com.fasterxml.jackson.dataformat.csv.CsvSchema
-import org.opendc.trace.TableReader
-import org.opendc.trace.conv.resourceCpuCount
-import org.opendc.trace.conv.resourceDuration
-import org.opendc.trace.conv.resourceID
-import org.opendc.trace.conv.resourceMemCapacity
-import org.opendc.trace.conv.resourceSubmissionTime
-import java.time.Duration
-import java.time.Instant
-import java.util.UUID
-
-/**
- * A [TableReader] for the Azure v1 VM resources table.
- */
-internal class AzureResourceTableReader(private val parser: CsvParser) : TableReader {
- /**
- * A flag to indicate whether a single row has been read already.
- */
- private var isStarted = false
-
- init {
- parser.schema = schema
- }
-
- override fun nextRow(): Boolean {
- if (!isStarted) {
- isStarted = true
- }
-
- reset()
-
- if (!nextStart()) {
- return false
- }
-
- while (true) {
- val token = parser.nextValue()
-
- if (token == null || token == JsonToken.END_OBJECT) {
- break
- }
-
- when (parser.currentName) {
- "vm id" -> id = parser.text
- "timestamp vm created" -> startTime = Instant.ofEpochSecond(parser.longValue)
- "timestamp vm deleted" -> stopTime = Instant.ofEpochSecond(parser.longValue)
- "vm virtual core count" -> cpuCores = parser.intValue
- "vm memory" -> memCapacity = parser.doubleValue * 1e6 // GB to KB
- }
- }
-
- return true
- }
-
- private val colID = 0
- private val colStartTime = 1
- private val colStopTime = 2
- private val colCpuCount = 3
- private val colMemCapacity = 4
-
- override fun resolve(name: String): Int {
- return when (name) {
- resourceID -> colID
- resourceSubmissionTime -> colStartTime
- resourceDuration -> colStopTime
- resourceCpuCount -> colCpuCount
- resourceMemCapacity -> colMemCapacity
- else -> -1
- }
- }
-
- override fun isNull(index: Int): Boolean {
- require(index in 0..colMemCapacity) { "Invalid column index" }
- return false
- }
-
- override fun getBoolean(index: Int): Boolean {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getInt(index: Int): Int {
- checkActive()
- return when (index) {
- colCpuCount -> cpuCores
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getLong(index: Int): Long {
- checkActive()
- return when (index) {
- colCpuCount -> cpuCores.toLong()
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getFloat(index: Int): Float {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getDouble(index: Int): Double {
- checkActive()
- return when (index) {
- colMemCapacity -> memCapacity
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getString(index: Int): String? {
- checkActive()
- return when (index) {
- colID -> id
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getUUID(index: Int): UUID? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getInstant(index: Int): Instant? {
- checkActive()
- return when (index) {
- colStartTime -> startTime
- colStopTime -> stopTime
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getDuration(index: Int): Duration? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun <T> getList(
- index: Int,
- elementType: Class<T>,
- ): List<T>? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun <T> getSet(
- index: Int,
- elementType: Class<T>,
- ): Set<T>? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun <K, V> getMap(
- index: Int,
- keyType: Class<K>,
- valueType: Class<V>,
- ): Map<K, V>? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun close() {
- parser.close()
- }
-
- /**
- * Helper method to check if the reader is active.
- */
- private fun checkActive() {
- check(isStarted && !parser.isClosed) { "No active row. Did you call nextRow()?" }
- }
-
- /**
- * Advance the parser until the next object start.
- */
- private fun nextStart(): Boolean {
- var token = parser.nextValue()
-
- while (token != null && token != JsonToken.START_OBJECT) {
- token = parser.nextValue()
- }
-
- return token != null
- }
-
- /**
- * State fields of the reader.
- */
- private var id: String? = null
- private var startTime: Instant? = null
- private var stopTime: Instant? = null
- private var cpuCores = -1
- private var memCapacity = Double.NaN
-
- /**
- * Reset the state.
- */
- private fun reset() {
- id = null
- startTime = null
- stopTime = null
- cpuCores = -1
- memCapacity = Double.NaN
- }
-
- companion object {
- /**
- * The [CsvSchema] that is used to parse the trace.
- */
- private val schema =
- CsvSchema.builder()
- .addColumn("vm id", CsvSchema.ColumnType.NUMBER)
- .addColumn("subscription id", CsvSchema.ColumnType.STRING)
- .addColumn("deployment id", CsvSchema.ColumnType.NUMBER)
- .addColumn("timestamp vm created", CsvSchema.ColumnType.NUMBER)
- .addColumn("timestamp vm deleted", CsvSchema.ColumnType.NUMBER)
- .addColumn("max cpu", CsvSchema.ColumnType.NUMBER)
- .addColumn("avg cpu", CsvSchema.ColumnType.NUMBER)
- .addColumn("p95 cpu", CsvSchema.ColumnType.NUMBER)
- .addColumn("vm category", CsvSchema.ColumnType.NUMBER)
- .addColumn("vm virtual core count", CsvSchema.ColumnType.NUMBER)
- .addColumn("vm memory", CsvSchema.ColumnType.NUMBER)
- .setAllowComments(true)
- .build()
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/azure/AzureTraceFormat.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/azure/AzureTraceFormat.kt
deleted file mode 100644
index 7ce1c11a..00000000
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/azure/AzureTraceFormat.kt
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Copyright (c) 2021 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.azure
-
-import com.fasterxml.jackson.dataformat.csv.CsvFactory
-import com.fasterxml.jackson.dataformat.csv.CsvParser
-import org.opendc.trace.TableColumn
-import org.opendc.trace.TableColumnType
-import org.opendc.trace.TableReader
-import org.opendc.trace.TableWriter
-import org.opendc.trace.conv.TABLE_RESOURCES
-import org.opendc.trace.conv.TABLE_RESOURCE_STATES
-import org.opendc.trace.conv.resourceCpuCount
-import org.opendc.trace.conv.resourceDuration
-import org.opendc.trace.conv.resourceID
-import org.opendc.trace.conv.resourceMemCapacity
-import org.opendc.trace.conv.resourceStateCpuUsagePct
-import org.opendc.trace.conv.resourceStateTimestamp
-import org.opendc.trace.conv.resourceSubmissionTime
-import org.opendc.trace.spi.TableDetails
-import org.opendc.trace.spi.TraceFormat
-import org.opendc.trace.util.CompositeTableReader
-import java.nio.file.Files
-import java.nio.file.Path
-import java.util.stream.Collectors
-import java.util.zip.GZIPInputStream
-import kotlin.io.path.inputStream
-import kotlin.io.path.name
-
-/**
- * A format implementation for the Azure v1 format.
- */
-public class AzureTraceFormat : TraceFormat {
- /**
- * The name of this trace format.
- */
- override val name: String = "azure"
-
- /**
- * The [CsvFactory] used to create the parser.
- */
- private val factory =
- CsvFactory()
- .enable(CsvParser.Feature.ALLOW_COMMENTS)
- .enable(CsvParser.Feature.TRIM_SPACES)
-
- override fun create(path: Path) {
- throw UnsupportedOperationException("Writing not supported for this format")
- }
-
- override fun getTables(path: Path): List<String> = listOf(TABLE_RESOURCES, TABLE_RESOURCE_STATES)
-
- override fun getDetails(
- path: Path,
- table: String,
- ): TableDetails {
- return when (table) {
- TABLE_RESOURCES ->
- TableDetails(
- listOf(
- TableColumn(resourceID, TableColumnType.String),
- TableColumn(resourceSubmissionTime, TableColumnType.Instant),
- TableColumn(resourceDuration, TableColumnType.Instant),
- TableColumn(resourceCpuCount, TableColumnType.Int),
- TableColumn(resourceMemCapacity, TableColumnType.Double),
- ),
- )
- TABLE_RESOURCE_STATES ->
- TableDetails(
- listOf(
- TableColumn(resourceID, TableColumnType.String),
- TableColumn(resourceStateTimestamp, TableColumnType.Instant),
- TableColumn(resourceStateCpuUsagePct, TableColumnType.Double),
- ),
- )
- else -> throw IllegalArgumentException("Table $table not supported")
- }
- }
-
- override fun newReader(
- path: Path,
- table: String,
- projection: List<String>?,
- ): TableReader {
- return when (table) {
- TABLE_RESOURCES -> {
- val stream = GZIPInputStream(path.resolve("vmtable/vmtable.csv.gz").inputStream())
- AzureResourceTableReader(factory.createParser(stream))
- }
- TABLE_RESOURCE_STATES -> newResourceStateReader(path)
- else -> throw IllegalArgumentException("Table $table not supported")
- }
- }
-
- override fun newWriter(
- path: Path,
- table: String,
- ): TableWriter {
- throw UnsupportedOperationException("Writing not supported for this format")
- }
-
- /**
- * Construct a [TableReader] for reading over all VM CPU readings.
- */
- private fun newResourceStateReader(path: Path): TableReader {
- val partitions =
- Files.walk(path.resolve("vm_cpu_readings"), 1)
- .filter { !Files.isDirectory(it) && it.name.endsWith(".csv.gz") }
- .collect(Collectors.toMap({ it.name.removeSuffix(".csv.gz") }, { it }))
- .toSortedMap()
- val it = partitions.iterator()
-
- return object : CompositeTableReader() {
- override fun nextReader(): TableReader? {
- return if (it.hasNext()) {
- val (_, partPath) = it.next()
- val stream = GZIPInputStream(partPath.inputStream())
- return AzureResourceStateTableReader(factory.createParser(stream))
- } else {
- null
- }
- }
-
- override fun toString(): String = "AzureCompositeTableReader"
- }
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/bitbrains/BitbrainsExResourceStateTableReader.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/bitbrains/BitbrainsExResourceStateTableReader.kt
deleted file mode 100644
index 8387d1ed..00000000
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/bitbrains/BitbrainsExResourceStateTableReader.kt
+++ /dev/null
@@ -1,292 +0,0 @@
-/*
- * Copyright (c) 2021 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.bitbrains
-
-import org.opendc.trace.TableReader
-import org.opendc.trace.conv.resourceClusterID
-import org.opendc.trace.conv.resourceCpuCapacity
-import org.opendc.trace.conv.resourceCpuCount
-import org.opendc.trace.conv.resourceID
-import org.opendc.trace.conv.resourceMemCapacity
-import org.opendc.trace.conv.resourceStateCpuDemand
-import org.opendc.trace.conv.resourceStateCpuReadyPct
-import org.opendc.trace.conv.resourceStateCpuUsage
-import org.opendc.trace.conv.resourceStateCpuUsagePct
-import org.opendc.trace.conv.resourceStateDiskRead
-import org.opendc.trace.conv.resourceStateDiskWrite
-import org.opendc.trace.conv.resourceStateTimestamp
-import java.io.BufferedReader
-import java.time.Duration
-import java.time.Instant
-import java.util.UUID
-
-/**
- * A [TableReader] for the Bitbrains resource state table.
- */
-internal class BitbrainsExResourceStateTableReader(private val reader: BufferedReader) : TableReader {
- private var state = State.Pending
-
- override fun nextRow(): Boolean {
- val state = state
- if (state == State.Closed) {
- return false
- } else if (state == State.Pending) {
- this.state = State.Active
- }
-
- reset()
-
- var line: String?
- var num = 0
-
- while (true) {
- line = reader.readLine()
-
- if (line == null) {
- this.state = State.Closed
- return false
- }
-
- num++
-
- if (line[0] == '#' || line.isBlank()) {
- // Ignore empty lines or comments
- continue
- }
-
- break
- }
-
- line = line!!.trim()
-
- val length = line.length
- var col = 0
- var start: Int
- var end = 0
-
- while (end < length) {
- // Trim all whitespace before the field
- start = end
- while (start < length && line[start].isWhitespace()) {
- start++
- }
-
- end = line.indexOf(' ', start)
-
- if (end < 0) {
- end = length
- }
-
- val field = line.subSequence(start, end) as String
- when (col++) {
- colTimestamp -> timestamp = Instant.ofEpochSecond(field.toLong(10))
- colCpuUsage -> cpuUsage = field.toDouble()
- colCpuDemand -> cpuDemand = field.toDouble()
- colDiskRead -> diskRead = field.toDouble()
- colDiskWrite -> diskWrite = field.toDouble()
- colClusterID -> cluster = field.trim()
- colNcpus -> cpuCores = field.toInt(10)
- colCpuReadyPct -> cpuReadyPct = field.toDouble()
- colPoweredOn -> poweredOn = field.toInt(10) == 1
- colCpuCapacity -> cpuCapacity = field.toDouble()
- colID -> id = field.trim()
- colMemCapacity -> memCapacity = field.toDouble() * 1000 // Convert from MB to KB
- }
- }
-
- return true
- }
-
- override fun resolve(name: String): Int {
- return when (name) {
- resourceID -> colID
- resourceClusterID -> colClusterID
- resourceStateTimestamp -> colTimestamp
- resourceCpuCount -> colNcpus
- resourceCpuCapacity -> colCpuCapacity
- resourceStateCpuUsage -> colCpuUsage
- resourceStateCpuUsagePct -> colCpuUsagePct
- resourceStateCpuDemand -> colCpuDemand
- resourceStateCpuReadyPct -> colCpuReadyPct
- resourceMemCapacity -> colMemCapacity
- resourceStateDiskRead -> colDiskRead
- resourceStateDiskWrite -> colDiskWrite
- else -> -1
- }
- }
-
- override fun isNull(index: Int): Boolean {
- require(index in 0 until colMax) { "Invalid column index" }
- return false
- }
-
- override fun getBoolean(index: Int): Boolean {
- check(state == State.Active) { "No active row" }
- return when (index) {
- colPoweredOn -> poweredOn
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getInt(index: Int): Int {
- check(state == State.Active) { "No active row" }
- return when (index) {
- colNcpus -> cpuCores
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getLong(index: Int): Long {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getFloat(index: Int): Float {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getDouble(index: Int): Double {
- check(state == State.Active) { "No active row" }
- return when (index) {
- colCpuCapacity -> cpuCapacity
- colCpuUsage -> cpuUsage
- colCpuUsagePct -> cpuUsage / cpuCapacity
- colCpuReadyPct -> cpuReadyPct
- colCpuDemand -> cpuDemand
- colMemCapacity -> memCapacity
- colDiskRead -> diskRead
- colDiskWrite -> diskWrite
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getString(index: Int): String? {
- check(state == State.Active) { "No active row" }
- return when (index) {
- colID -> id
- colClusterID -> cluster
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getUUID(index: Int): UUID? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getInstant(index: Int): Instant? {
- check(state == State.Active) { "No active row" }
- return when (index) {
- colTimestamp -> timestamp
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getDuration(index: Int): Duration? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun <T> getList(
- index: Int,
- elementType: Class<T>,
- ): List<T>? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun <T> getSet(
- index: Int,
- elementType: Class<T>,
- ): Set<T>? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun <K, V> getMap(
- index: Int,
- keyType: Class<K>,
- valueType: Class<V>,
- ): Map<K, V>? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun close() {
- reader.close()
- reset()
- state = State.Closed
- }
-
- /**
- * State fields of the reader.
- */
- private var id: String? = null
- private var cluster: String? = null
- private var timestamp: Instant? = null
- private var cpuCores = -1
- private var cpuCapacity = Double.NaN
- private var cpuUsage = Double.NaN
- private var cpuDemand = Double.NaN
- private var cpuReadyPct = Double.NaN
- private var memCapacity = Double.NaN
- private var diskRead = Double.NaN
- private var diskWrite = Double.NaN
- private var poweredOn: Boolean = false
-
- /**
- * Reset the state of the reader.
- */
- private fun reset() {
- id = null
- timestamp = null
- cluster = null
- cpuCores = -1
- cpuCapacity = Double.NaN
- cpuUsage = Double.NaN
- cpuDemand = Double.NaN
- cpuReadyPct = Double.NaN
- memCapacity = Double.NaN
- diskRead = Double.NaN
- diskWrite = Double.NaN
- poweredOn = false
- }
-
- /**
- * Default column indices for the extended Bitbrains format.
- */
- private val colTimestamp = 0
- private val colCpuUsage = 1
- private val colCpuDemand = 2
- private val colDiskRead = 4
- private val colDiskWrite = 6
- private val colClusterID = 10
- private val colNcpus = 12
- private val colCpuReadyPct = 13
- private val colPoweredOn = 14
- private val colCpuCapacity = 18
- private val colID = 19
- private val colMemCapacity = 20
- private val colCpuUsagePct = 21
- private val colMax = colCpuUsagePct + 1
-
- private enum class State {
- Pending,
- Active,
- Closed,
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/bitbrains/BitbrainsExTraceFormat.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/bitbrains/BitbrainsExTraceFormat.kt
deleted file mode 100644
index 6115953f..00000000
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/bitbrains/BitbrainsExTraceFormat.kt
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Copyright (c) 2021 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.bitbrains
-
-import org.opendc.trace.TableColumn
-import org.opendc.trace.TableColumnType
-import org.opendc.trace.TableReader
-import org.opendc.trace.TableWriter
-import org.opendc.trace.conv.TABLE_RESOURCE_STATES
-import org.opendc.trace.conv.resourceClusterID
-import org.opendc.trace.conv.resourceCpuCapacity
-import org.opendc.trace.conv.resourceCpuCount
-import org.opendc.trace.conv.resourceID
-import org.opendc.trace.conv.resourceMemCapacity
-import org.opendc.trace.conv.resourceStateCpuDemand
-import org.opendc.trace.conv.resourceStateCpuReadyPct
-import org.opendc.trace.conv.resourceStateCpuUsage
-import org.opendc.trace.conv.resourceStateCpuUsagePct
-import org.opendc.trace.conv.resourceStateDiskRead
-import org.opendc.trace.conv.resourceStateDiskWrite
-import org.opendc.trace.conv.resourceStateTimestamp
-import org.opendc.trace.spi.TableDetails
-import org.opendc.trace.spi.TraceFormat
-import org.opendc.trace.util.CompositeTableReader
-import java.nio.file.Files
-import java.nio.file.Path
-import java.util.stream.Collectors
-import kotlin.io.path.bufferedReader
-import kotlin.io.path.extension
-import kotlin.io.path.nameWithoutExtension
-
-/**
- * A format implementation for the extended Bitbrains trace format.
- */
-public class BitbrainsExTraceFormat : TraceFormat {
- /**
- * The name of this trace format.
- */
- override val name: String = "bitbrains-ex"
-
- override fun create(path: Path) {
- throw UnsupportedOperationException("Writing not supported for this format")
- }
-
- override fun getTables(path: Path): List<String> = listOf(TABLE_RESOURCE_STATES)
-
- override fun getDetails(
- path: Path,
- table: String,
- ): TableDetails {
- return when (table) {
- TABLE_RESOURCE_STATES ->
- TableDetails(
- listOf(
- TableColumn(resourceID, TableColumnType.String),
- TableColumn(resourceClusterID, TableColumnType.String),
- TableColumn(resourceStateTimestamp, TableColumnType.Instant),
- TableColumn(resourceCpuCount, TableColumnType.Int),
- TableColumn(resourceCpuCapacity, TableColumnType.Double),
- TableColumn(resourceStateCpuUsage, TableColumnType.Double),
- TableColumn(resourceStateCpuUsagePct, TableColumnType.Double),
- TableColumn(resourceStateCpuDemand, TableColumnType.Double),
- TableColumn(resourceStateCpuReadyPct, TableColumnType.Double),
- TableColumn(resourceMemCapacity, TableColumnType.Double),
- TableColumn(resourceStateDiskRead, TableColumnType.Double),
- TableColumn(resourceStateDiskWrite, TableColumnType.Double),
- ),
- )
- else -> throw IllegalArgumentException("Table $table not supported")
- }
- }
-
- override fun newReader(
- path: Path,
- table: String,
- projection: List<String>?,
- ): TableReader {
- return when (table) {
- TABLE_RESOURCE_STATES -> newResourceStateReader(path)
- else -> throw IllegalArgumentException("Table $table not supported")
- }
- }
-
- override fun newWriter(
- path: Path,
- table: String,
- ): TableWriter {
- throw UnsupportedOperationException("Writing not supported for this format")
- }
-
- /**
- * Construct a [TableReader] for reading over all resource state partitions.
- */
- private fun newResourceStateReader(path: Path): TableReader {
- val partitions =
- Files.walk(path, 1)
- .filter { !Files.isDirectory(it) && it.extension == "txt" }
- .collect(Collectors.toMap({ it.nameWithoutExtension }, { it }))
- .toSortedMap()
- val it = partitions.iterator()
-
- return object : CompositeTableReader() {
- override fun nextReader(): TableReader? {
- return if (it.hasNext()) {
- val (_, partPath) = it.next()
- return BitbrainsExResourceStateTableReader(partPath.bufferedReader())
- } else {
- null
- }
- }
-
- override fun toString(): String = "BitbrainsExCompositeTableReader"
- }
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/bitbrains/BitbrainsResourceStateTableReader.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/bitbrains/BitbrainsResourceStateTableReader.kt
deleted file mode 100644
index e264fccb..00000000
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/bitbrains/BitbrainsResourceStateTableReader.kt
+++ /dev/null
@@ -1,365 +0,0 @@
-/*
- * Copyright (c) 2021 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.bitbrains
-
-import com.fasterxml.jackson.core.JsonParseException
-import com.fasterxml.jackson.core.JsonToken
-import com.fasterxml.jackson.dataformat.csv.CsvParser
-import com.fasterxml.jackson.dataformat.csv.CsvSchema
-import org.opendc.trace.TableReader
-import org.opendc.trace.conv.resourceCpuCapacity
-import org.opendc.trace.conv.resourceCpuCount
-import org.opendc.trace.conv.resourceID
-import org.opendc.trace.conv.resourceMemCapacity
-import org.opendc.trace.conv.resourceStateCpuUsage
-import org.opendc.trace.conv.resourceStateCpuUsagePct
-import org.opendc.trace.conv.resourceStateDiskRead
-import org.opendc.trace.conv.resourceStateDiskWrite
-import org.opendc.trace.conv.resourceStateMemUsage
-import org.opendc.trace.conv.resourceStateNetRx
-import org.opendc.trace.conv.resourceStateNetTx
-import org.opendc.trace.conv.resourceStateTimestamp
-import java.text.NumberFormat
-import java.time.Duration
-import java.time.Instant
-import java.time.LocalDateTime
-import java.time.ZoneOffset
-import java.time.format.DateTimeFormatter
-import java.time.format.DateTimeParseException
-import java.util.Locale
-import java.util.UUID
-
-/**
- * A [TableReader] for the Bitbrains resource state table.
- */
-internal class BitbrainsResourceStateTableReader(private val partition: String, private val parser: CsvParser) : TableReader {
- /**
- * A flag to indicate whether a single row has been read already.
- */
- private var isStarted = false
-
- /**
- * The [DateTimeFormatter] used to parse the timestamps in case of the Materna trace.
- */
- private val formatter = DateTimeFormatter.ofPattern("dd.MM.yyyy HH:mm:ss")
-
- /**
- * The type of timestamps in the trace.
- */
- private var timestampType: TimestampType = TimestampType.UNDECIDED
-
- /**
- * The [NumberFormat] used to parse doubles containing a comma.
- */
- private val nf = NumberFormat.getInstance(Locale.GERMAN)
-
- /**
- * A flag to indicate that the trace contains decimals with a comma separator.
- */
- private var usesCommaDecimalSeparator = false
-
- init {
- parser.schema = schema
- }
-
- override fun nextRow(): Boolean {
- if (!isStarted) {
- isStarted = true
- }
-
- // Reset the row state
- reset()
-
- if (!nextStart()) {
- return false
- }
-
- while (true) {
- val token = parser.nextValue()
-
- if (token == null || token == JsonToken.END_OBJECT) {
- break
- }
-
- when (parser.currentName) {
- "Timestamp [ms]" -> {
- timestamp =
- when (timestampType) {
- TimestampType.UNDECIDED -> {
- try {
- val res = LocalDateTime.parse(parser.text, formatter).toInstant(ZoneOffset.UTC)
- timestampType = TimestampType.DATE_TIME
- res
- } catch (e: DateTimeParseException) {
- timestampType = TimestampType.EPOCH_MILLIS
- Instant.ofEpochSecond(parser.longValue)
- }
- }
- TimestampType.DATE_TIME -> LocalDateTime.parse(parser.text, formatter).toInstant(ZoneOffset.UTC)
- TimestampType.EPOCH_MILLIS -> Instant.ofEpochSecond(parser.longValue)
- }
- }
- "CPU cores" -> cpuCores = parser.intValue
- "CPU capacity provisioned [MHZ]" -> cpuCapacity = parseSafeDouble()
- "CPU usage [MHZ]" -> cpuUsage = parseSafeDouble()
- "CPU usage [%]" -> cpuUsagePct = parseSafeDouble() / 100.0 // Convert to range [0, 1]
- "Memory capacity provisioned [KB]" -> memCapacity = parseSafeDouble()
- "Memory usage [KB]" -> memUsage = parseSafeDouble()
- "Disk read throughput [KB/s]" -> diskRead = parseSafeDouble()
- "Disk write throughput [KB/s]" -> diskWrite = parseSafeDouble()
- "Network received throughput [KB/s]" -> netReceived = parseSafeDouble()
- "Network transmitted throughput [KB/s]" -> netTransmitted = parseSafeDouble()
- }
- }
-
- return true
- }
-
- private val colTimestamp = 0
- private val colCpuCount = 1
- private val colCpuCapacity = 2
- private val colCpuUsage = 3
- private val colCpuUsagePct = 4
- private val colMemCapacity = 5
- private val colMemUsage = 6
- private val colDiskRead = 7
- private val colDiskWrite = 8
- private val colNetRx = 9
- private val colNetTx = 10
- private val colID = 11
-
- override fun resolve(name: String): Int {
- return when (name) {
- resourceID -> colID
- resourceStateTimestamp -> colTimestamp
- resourceCpuCount -> colCpuCount
- resourceCpuCapacity -> colCpuCapacity
- resourceStateCpuUsage -> colCpuUsage
- resourceStateCpuUsagePct -> colCpuUsagePct
- resourceMemCapacity -> colMemCapacity
- resourceStateMemUsage -> colMemUsage
- resourceStateDiskRead -> colDiskRead
- resourceStateDiskWrite -> colDiskWrite
- resourceStateNetRx -> colNetRx
- resourceStateNetTx -> colNetTx
- else -> -1
- }
- }
-
- override fun isNull(index: Int): Boolean {
- require(index in 0..colID) { "Invalid column index" }
- return false
- }
-
- override fun getBoolean(index: Int): Boolean {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getInt(index: Int): Int {
- checkActive()
- return when (index) {
- colCpuCount -> cpuCores
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getLong(index: Int): Long {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getFloat(index: Int): Float {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getDouble(index: Int): Double {
- checkActive()
- return when (index) {
- colCpuCapacity -> cpuCapacity
- colCpuUsage -> cpuUsage
- colCpuUsagePct -> cpuUsagePct
- colMemCapacity -> memCapacity
- colMemUsage -> memUsage
- colDiskRead -> diskRead
- colDiskWrite -> diskWrite
- colNetRx -> netReceived
- colNetTx -> netTransmitted
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getString(index: Int): String {
- checkActive()
- return when (index) {
- colID -> partition
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getUUID(index: Int): UUID? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getInstant(index: Int): Instant? {
- checkActive()
- return when (index) {
- colTimestamp -> timestamp
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getDuration(index: Int): Duration? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun <T> getList(
- index: Int,
- elementType: Class<T>,
- ): List<T>? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun <T> getSet(
- index: Int,
- elementType: Class<T>,
- ): Set<T>? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun <K, V> getMap(
- index: Int,
- keyType: Class<K>,
- valueType: Class<V>,
- ): Map<K, V>? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun close() {
- parser.close()
- }
-
- /**
- * Helper method to check if the reader is active.
- */
- private fun checkActive() {
- check(isStarted && !parser.isClosed) { "No active row. Did you call nextRow()?" }
- }
-
- /**
- * Advance the parser until the next object start.
- */
- private fun nextStart(): Boolean {
- var token = parser.nextValue()
-
- while (token != null && token != JsonToken.START_OBJECT) {
- token = parser.nextValue()
- }
-
- return token != null
- }
-
- /**
- * Try to parse the current value safely as double.
- */
- private fun parseSafeDouble(): Double {
- if (!usesCommaDecimalSeparator) {
- try {
- return parser.doubleValue
- } catch (e: JsonParseException) {
- usesCommaDecimalSeparator = true
- }
- }
-
- val text = parser.text
- if (text.isBlank()) {
- return 0.0
- }
-
- return nf.parse(text).toDouble()
- }
-
- /**
- * State fields of the reader.
- */
- private var timestamp: Instant? = null
- private var cpuCores = -1
- private var cpuCapacity = Double.NaN
- private var cpuUsage = Double.NaN
- private var cpuUsagePct = Double.NaN
- private var memCapacity = Double.NaN
- private var memUsage = Double.NaN
- private var diskRead = Double.NaN
- private var diskWrite = Double.NaN
- private var netReceived = Double.NaN
- private var netTransmitted = Double.NaN
-
- /**
- * Reset the state.
- */
- private fun reset() {
- timestamp = null
- cpuCores = -1
- cpuCapacity = Double.NaN
- cpuUsage = Double.NaN
- cpuUsagePct = Double.NaN
- memCapacity = Double.NaN
- memUsage = Double.NaN
- diskRead = Double.NaN
- diskWrite = Double.NaN
- netReceived = Double.NaN
- netTransmitted = Double.NaN
- }
-
- /**
- * The type of the timestamp in the trace.
- */
- private enum class TimestampType {
- UNDECIDED,
- DATE_TIME,
- EPOCH_MILLIS,
- }
-
- companion object {
- /**
- * The [CsvSchema] that is used to parse the trace.
- */
- private val schema =
- CsvSchema.builder()
- .addColumn("Timestamp [ms]", CsvSchema.ColumnType.NUMBER_OR_STRING)
- .addColumn("CPU cores", CsvSchema.ColumnType.NUMBER)
- .addColumn("CPU capacity provisioned [MHZ]", CsvSchema.ColumnType.NUMBER)
- .addColumn("CPU usage [MHZ]", CsvSchema.ColumnType.NUMBER)
- .addColumn("CPU usage [%]", CsvSchema.ColumnType.NUMBER)
- .addColumn("Memory capacity provisioned [KB]", CsvSchema.ColumnType.NUMBER)
- .addColumn("Memory usage [KB]", CsvSchema.ColumnType.NUMBER)
- .addColumn("Memory usage [%]", CsvSchema.ColumnType.NUMBER)
- .addColumn("Disk read throughput [KB/s]", CsvSchema.ColumnType.NUMBER)
- .addColumn("Disk write throughput [KB/s]", CsvSchema.ColumnType.NUMBER)
- .addColumn("Disk size [GB]", CsvSchema.ColumnType.NUMBER)
- .addColumn("Network received throughput [KB/s]", CsvSchema.ColumnType.NUMBER)
- .addColumn("Network transmitted throughput [KB/s]", CsvSchema.ColumnType.NUMBER)
- .setAllowComments(true)
- .setUseHeader(true)
- .setColumnSeparator(';')
- .build()
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/bitbrains/BitbrainsResourceTableReader.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/bitbrains/BitbrainsResourceTableReader.kt
deleted file mode 100644
index a12785f0..00000000
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/bitbrains/BitbrainsResourceTableReader.kt
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright (c) 2021 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.bitbrains
-
-import com.fasterxml.jackson.dataformat.csv.CsvFactory
-import org.opendc.trace.TableReader
-import org.opendc.trace.conv.resourceID
-import java.nio.file.Path
-import java.time.Duration
-import java.time.Instant
-import java.util.UUID
-
-/**
- * A [TableReader] for the Bitbrains resource table.
- */
-internal class BitbrainsResourceTableReader(private val factory: CsvFactory, vms: Map<String, Path>) : TableReader {
- /**
- * An iterator to iterate over the resource entries.
- */
- private val it = vms.iterator()
-
- /**
- * The state of the reader.
- */
- private var state = State.Pending
-
- override fun nextRow(): Boolean {
- if (state == State.Pending) {
- state = State.Active
- }
-
- reset()
-
- while (it.hasNext()) {
- val (name, path) = it.next()
-
- val parser = factory.createParser(path.toFile())
- val reader = BitbrainsResourceStateTableReader(name, parser)
- val idCol = reader.resolve(resourceID)
-
- try {
- if (!reader.nextRow()) {
- continue
- }
-
- id = reader.getString(idCol)
- return true
- } finally {
- reader.close()
- }
- }
-
- state = State.Closed
- return false
- }
-
- private val colID = 0
-
- override fun resolve(name: String): Int {
- return when (name) {
- resourceID -> colID
- else -> -1
- }
- }
-
- override fun isNull(index: Int): Boolean {
- require(index in 0..colID) { "Invalid column index" }
- return false
- }
-
- override fun getBoolean(index: Int): Boolean {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getInt(index: Int): Int {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getLong(index: Int): Long {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getFloat(index: Int): Float {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getDouble(index: Int): Double {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getString(index: Int): String? {
- check(state == State.Active) { "No active row" }
- return when (index) {
- colID -> id
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getUUID(index: Int): UUID? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getInstant(index: Int): Instant? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getDuration(index: Int): Duration? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun <T> getList(
- index: Int,
- elementType: Class<T>,
- ): List<T>? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun <T> getSet(
- index: Int,
- elementType: Class<T>,
- ): Set<T>? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun <K, V> getMap(
- index: Int,
- keyType: Class<K>,
- valueType: Class<V>,
- ): Map<K, V>? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun close() {
- reset()
- state = State.Closed
- }
-
- /**
- * State fields of the reader.
- */
- private var id: String? = null
-
- /**
- * Reset the state of the reader.
- */
- private fun reset() {
- id = null
- }
-
- private enum class State {
- Pending,
- Active,
- Closed,
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/bitbrains/BitbrainsTraceFormat.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/bitbrains/BitbrainsTraceFormat.kt
deleted file mode 100644
index 23853077..00000000
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/bitbrains/BitbrainsTraceFormat.kt
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Copyright (c) 2021 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.bitbrains
-
-import com.fasterxml.jackson.dataformat.csv.CsvFactory
-import com.fasterxml.jackson.dataformat.csv.CsvParser
-import org.opendc.trace.TableColumn
-import org.opendc.trace.TableColumnType
-import org.opendc.trace.TableReader
-import org.opendc.trace.TableWriter
-import org.opendc.trace.conv.TABLE_RESOURCES
-import org.opendc.trace.conv.TABLE_RESOURCE_STATES
-import org.opendc.trace.conv.resourceCpuCapacity
-import org.opendc.trace.conv.resourceCpuCount
-import org.opendc.trace.conv.resourceID
-import org.opendc.trace.conv.resourceMemCapacity
-import org.opendc.trace.conv.resourceStateCpuUsage
-import org.opendc.trace.conv.resourceStateCpuUsagePct
-import org.opendc.trace.conv.resourceStateDiskRead
-import org.opendc.trace.conv.resourceStateDiskWrite
-import org.opendc.trace.conv.resourceStateMemUsage
-import org.opendc.trace.conv.resourceStateNetRx
-import org.opendc.trace.conv.resourceStateNetTx
-import org.opendc.trace.conv.resourceStateTimestamp
-import org.opendc.trace.spi.TableDetails
-import org.opendc.trace.spi.TraceFormat
-import org.opendc.trace.util.CompositeTableReader
-import java.nio.file.Files
-import java.nio.file.Path
-import java.util.stream.Collectors
-import kotlin.io.path.extension
-import kotlin.io.path.nameWithoutExtension
-
-/**
- * A format implementation for the GWF trace format.
- */
-public class BitbrainsTraceFormat : TraceFormat {
- /**
- * The name of this trace format.
- */
- override val name: String = "bitbrains"
-
- /**
- * The [CsvFactory] used to create the parser.
- */
- private val factory =
- CsvFactory()
- .enable(CsvParser.Feature.ALLOW_COMMENTS)
- .enable(CsvParser.Feature.TRIM_SPACES)
-
- override fun create(path: Path) {
- throw UnsupportedOperationException("Writing not supported for this format")
- }
-
- override fun getTables(path: Path): List<String> = listOf(TABLE_RESOURCES, TABLE_RESOURCE_STATES)
-
- override fun getDetails(
- path: Path,
- table: String,
- ): TableDetails {
- return when (table) {
- TABLE_RESOURCES ->
- TableDetails(
- listOf(
- TableColumn(resourceID, TableColumnType.String),
- ),
- )
- TABLE_RESOURCE_STATES ->
- TableDetails(
- listOf(
- TableColumn(resourceID, TableColumnType.String),
- TableColumn(resourceStateTimestamp, TableColumnType.Instant),
- TableColumn(resourceCpuCount, TableColumnType.Int),
- TableColumn(resourceCpuCapacity, TableColumnType.Double),
- TableColumn(resourceStateCpuUsage, TableColumnType.Double),
- TableColumn(resourceStateCpuUsagePct, TableColumnType.Double),
- TableColumn(resourceMemCapacity, TableColumnType.Double),
- TableColumn(resourceStateMemUsage, TableColumnType.Double),
- TableColumn(resourceStateDiskRead, TableColumnType.Double),
- TableColumn(resourceStateDiskWrite, TableColumnType.Double),
- TableColumn(resourceStateNetRx, TableColumnType.Double),
- TableColumn(resourceStateNetTx, TableColumnType.Double),
- ),
- )
- else -> throw IllegalArgumentException("Table $table not supported")
- }
- }
-
- override fun newReader(
- path: Path,
- table: String,
- projection: List<String>?,
- ): TableReader {
- return when (table) {
- TABLE_RESOURCES -> {
- val vms =
- Files.walk(path, 1)
- .filter { !Files.isDirectory(it) && it.extension == "csv" }
- .collect(Collectors.toMap({ it.nameWithoutExtension }, { it }))
- .toSortedMap()
- BitbrainsResourceTableReader(factory, vms)
- }
- TABLE_RESOURCE_STATES -> newResourceStateReader(path)
- else -> throw IllegalArgumentException("Table $table not supported")
- }
- }
-
- override fun newWriter(
- path: Path,
- table: String,
- ): TableWriter {
- throw UnsupportedOperationException("Writing not supported for this format")
- }
-
- /**
- * Construct a [TableReader] for reading over all resource state partitions.
- */
- private fun newResourceStateReader(path: Path): TableReader {
- val partitions =
- Files.walk(path, 1)
- .filter { !Files.isDirectory(it) && it.extension == "csv" }
- .collect(Collectors.toMap({ it.nameWithoutExtension }, { it }))
- .toSortedMap()
- val it = partitions.iterator()
-
- return object : CompositeTableReader() {
- override fun nextReader(): TableReader? {
- return if (it.hasNext()) {
- val (partition, partPath) = it.next()
- return BitbrainsResourceStateTableReader(partition, factory.createParser(partPath.toFile()))
- } else {
- null
- }
- }
-
- override fun toString(): String = "BitbrainsCompositeTableReader"
- }
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/CarbonTableReader.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/CarbonTableReader.kt
index 226c8806..c5face9f 100644
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/CarbonTableReader.kt
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/CarbonTableReader.kt
@@ -23,9 +23,9 @@
package org.opendc.trace.formats.carbon
import org.opendc.trace.TableReader
-import org.opendc.trace.conv.CARBON_INTENSITY_TIMESTAMP
-import org.opendc.trace.conv.CARBON_INTENSITY_VALUE
-import org.opendc.trace.formats.carbon.parquet.CarbonIntensityFragment
+import org.opendc.trace.conv.CARBON_INTENSITY
+import org.opendc.trace.conv.CARBON_TIMESTAMP
+import org.opendc.trace.formats.carbon.parquet.CarbonFragment
import org.opendc.trace.util.parquet.LocalParquetReader
import java.time.Duration
import java.time.Instant
@@ -34,11 +34,11 @@ import java.util.UUID
/**
* A [TableReader] implementation for the WTF format.
*/
-internal class CarbonTableReader(private val reader: LocalParquetReader<CarbonIntensityFragment>) : TableReader {
+internal class CarbonTableReader(private val reader: LocalParquetReader<CarbonFragment>) : TableReader {
/**
* The current record.
*/
- private var record: CarbonIntensityFragment? = null
+ private var record: CarbonFragment? = null
override fun nextRow(): Boolean {
try {
@@ -57,8 +57,8 @@ internal class CarbonTableReader(private val reader: LocalParquetReader<CarbonIn
override fun resolve(name: String): Int {
return when (name) {
- CARBON_INTENSITY_TIMESTAMP -> colTimestamp
- CARBON_INTENSITY_VALUE -> colCarbonIntensity
+ CARBON_TIMESTAMP -> colTimestamp
+ CARBON_INTENSITY -> colCarbonIntensity
else -> -1
}
}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/CarbonTraceFormat.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/CarbonTraceFormat.kt
index d8adc739..764bb349 100644
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/CarbonTraceFormat.kt
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/CarbonTraceFormat.kt
@@ -26,10 +26,10 @@ import org.opendc.trace.TableColumn
import org.opendc.trace.TableColumnType
import org.opendc.trace.TableReader
import org.opendc.trace.TableWriter
-import org.opendc.trace.conv.CARBON_INTENSITY_TIMESTAMP
-import org.opendc.trace.conv.CARBON_INTENSITY_VALUE
-import org.opendc.trace.conv.TABLE_CARBON_INTENSITIES
-import org.opendc.trace.formats.carbon.parquet.CarbonIntensityReadSupport
+import org.opendc.trace.conv.CARBON_INTENSITY
+import org.opendc.trace.conv.CARBON_TIMESTAMP
+import org.opendc.trace.conv.TABLE_CARBON
+import org.opendc.trace.formats.carbon.parquet.CarbonReadSupport
import org.opendc.trace.spi.TableDetails
import org.opendc.trace.spi.TraceFormat
import org.opendc.trace.util.parquet.LocalParquetReader
@@ -45,18 +45,18 @@ public class CarbonTraceFormat : TraceFormat {
throw UnsupportedOperationException("Writing not supported for this format")
}
- override fun getTables(path: Path): List<String> = listOf(TABLE_CARBON_INTENSITIES)
+ override fun getTables(path: Path): List<String> = listOf(TABLE_CARBON)
override fun getDetails(
path: Path,
table: String,
): TableDetails {
return when (table) {
- TABLE_CARBON_INTENSITIES ->
+ TABLE_CARBON ->
TableDetails(
listOf(
- TableColumn(CARBON_INTENSITY_TIMESTAMP, TableColumnType.Instant),
- TableColumn(CARBON_INTENSITY_VALUE, TableColumnType.Double),
+ TableColumn(CARBON_TIMESTAMP, TableColumnType.Instant),
+ TableColumn(CARBON_INTENSITY, TableColumnType.Double),
),
)
else -> throw IllegalArgumentException("Table $table not supported")
@@ -69,8 +69,8 @@ public class CarbonTraceFormat : TraceFormat {
projection: List<String>?,
): TableReader {
return when (table) {
- TABLE_CARBON_INTENSITIES -> {
- val reader = LocalParquetReader(path, CarbonIntensityReadSupport(projection))
+ TABLE_CARBON -> {
+ val reader = LocalParquetReader(path, CarbonReadSupport(projection))
CarbonTableReader(reader)
}
else -> throw IllegalArgumentException("Table $table not supported")
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/parquet/CarbonIntensityFragment.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/parquet/CarbonFragment.kt
index 3211cb6c..fe05876b 100644
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/parquet/CarbonIntensityFragment.kt
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/parquet/CarbonFragment.kt
@@ -27,7 +27,7 @@ import java.time.Instant
/**
* A task in the Workflow Trace Format.
*/
-internal data class CarbonIntensityFragment(
+internal data class CarbonFragment(
val timestamp: Instant,
val carbonIntensity: Double,
)
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/parquet/CarbonIntensityReadSupport.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/parquet/CarbonReadSupport.kt
index 2f4eac05..53087079 100644
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/parquet/CarbonIntensityReadSupport.kt
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/parquet/CarbonReadSupport.kt
@@ -26,26 +26,24 @@ import org.apache.hadoop.conf.Configuration
import org.apache.parquet.hadoop.api.InitContext
import org.apache.parquet.hadoop.api.ReadSupport
import org.apache.parquet.io.api.RecordMaterializer
-import org.apache.parquet.schema.LogicalTypeAnnotation
import org.apache.parquet.schema.MessageType
-import org.apache.parquet.schema.PrimitiveType
import org.apache.parquet.schema.Types
-import org.opendc.trace.conv.CARBON_INTENSITY_TIMESTAMP
-import org.opendc.trace.conv.CARBON_INTENSITY_VALUE
+import org.opendc.trace.conv.CARBON_INTENSITY
+import org.opendc.trace.conv.CARBON_TIMESTAMP
/**
- * A [ReadSupport] instance for [Task] objects.
+ * A [ReadSupport] instance for [CarbonFragment] objects.
*
* @param projection The projection of the table to read.
*/
-internal class CarbonIntensityReadSupport(private val projection: List<String>?) : ReadSupport<CarbonIntensityFragment>() {
+internal class CarbonReadSupport(private val projection: List<String>?) : ReadSupport<CarbonFragment>() {
/**
* Mapping of table columns to their Parquet column names.
*/
private val colMap =
mapOf(
- CARBON_INTENSITY_TIMESTAMP to "timestamp",
- CARBON_INTENSITY_VALUE to "carbon_intensity",
+ CARBON_TIMESTAMP to "timestamp",
+ CARBON_INTENSITY to "carbon_intensity",
)
override fun init(context: InitContext): ReadContext {
@@ -53,16 +51,16 @@ internal class CarbonIntensityReadSupport(private val projection: List<String>?)
if (projection != null) {
Types.buildMessage()
.apply {
- val fieldByName = READ_SCHEMA.fields.associateBy { it.name }
+ val fieldByName = CARBON_SCHEMA.fields.associateBy { it.name }
for (col in projection) {
val fieldName = colMap[col] ?: continue
addField(fieldByName.getValue(fieldName))
}
}
- .named(READ_SCHEMA.name)
+ .named(CARBON_SCHEMA.name)
} else {
- READ_SCHEMA
+ CARBON_SCHEMA
}
return ReadContext(projectedSchema)
}
@@ -72,24 +70,5 @@ internal class CarbonIntensityReadSupport(private val projection: List<String>?)
keyValueMetaData: Map<String, String>,
fileSchema: MessageType,
readContext: ReadContext,
- ): RecordMaterializer<CarbonIntensityFragment> = CarbonIntensityRecordMaterializer(readContext.requestedSchema)
-
- companion object {
- /**
- * Parquet read schema for the "tasks" table in the trace.
- */
- @JvmStatic
- val READ_SCHEMA: MessageType =
- Types.buildMessage()
- .addFields(
- Types
- .optional(PrimitiveType.PrimitiveTypeName.INT64)
- .`as`(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.MILLIS))
- .named("timestamp"),
- Types
- .optional(PrimitiveType.PrimitiveTypeName.DOUBLE)
- .named("carbon_intensity"),
- )
- .named("carbon_intensity_fragment")
- }
+ ): RecordMaterializer<CarbonFragment> = CarbonRecordMaterializer(readContext.requestedSchema)
}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/parquet/CarbonIntensityRecordMaterializer.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/parquet/CarbonRecordMaterializer.kt
index f5d68f22..aa915a39 100644
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/parquet/CarbonIntensityRecordMaterializer.kt
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/parquet/CarbonRecordMaterializer.kt
@@ -30,9 +30,9 @@ import org.apache.parquet.schema.MessageType
import java.time.Instant
/**
- * A [RecordMaterializer] for [Task] records.
+ * A [RecordMaterializer] for [CarbonFragment] records.
*/
-internal class CarbonIntensityRecordMaterializer(schema: MessageType) : RecordMaterializer<CarbonIntensityFragment>() {
+internal class CarbonRecordMaterializer(schema: MessageType) : RecordMaterializer<CarbonFragment>() {
/**
* State of current record being read.
*/
@@ -76,8 +76,8 @@ internal class CarbonIntensityRecordMaterializer(schema: MessageType) : RecordMa
override fun getConverter(fieldIndex: Int): Converter = converters[fieldIndex]
}
- override fun getCurrentRecord(): CarbonIntensityFragment =
- CarbonIntensityFragment(
+ override fun getCurrentRecord(): CarbonFragment =
+ CarbonFragment(
localTimestamp,
localCarbonIntensity,
)
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/parquet/CarbonSchemas.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/parquet/CarbonSchemas.kt
new file mode 100644
index 00000000..c8b11968
--- /dev/null
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/carbon/parquet/CarbonSchemas.kt
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2025 AtLarge Research
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package org.opendc.trace.formats.carbon.parquet
+
+import org.apache.parquet.schema.LogicalTypeAnnotation
+import org.apache.parquet.schema.MessageType
+import org.apache.parquet.schema.PrimitiveType
+import org.apache.parquet.schema.Types
+
+private val CARBON_SCHEMA_v1: MessageType =
+ Types.buildMessage()
+ .addFields(
+ Types
+ .optional(PrimitiveType.PrimitiveTypeName.INT64)
+ .`as`(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.MILLIS))
+ .named("timestamp"),
+ Types
+ .optional(PrimitiveType.PrimitiveTypeName.DOUBLE)
+ .named("carbon_intensity"),
+ )
+ .named("carbon_intensity_fragment")
+
+public val CARBON_SCHEMA: MessageType = CARBON_SCHEMA_v1
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/failure/parquet/FailureReadSupport.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/failure/parquet/FailureReadSupport.kt
index d49f86c6..9bd8fd72 100644
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/failure/parquet/FailureReadSupport.kt
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/failure/parquet/FailureReadSupport.kt
@@ -27,14 +27,13 @@ import org.apache.parquet.hadoop.api.InitContext
import org.apache.parquet.hadoop.api.ReadSupport
import org.apache.parquet.io.api.RecordMaterializer
import org.apache.parquet.schema.MessageType
-import org.apache.parquet.schema.PrimitiveType
import org.apache.parquet.schema.Types
import org.opendc.trace.conv.FAILURE_DURATION
import org.opendc.trace.conv.FAILURE_INTENSITY
import org.opendc.trace.conv.FAILURE_INTERVAL
/**
- * A [ReadSupport] instance for [Task] objects.
+ * A [ReadSupport] instance for [FailureFragment] objects.
*
* @param projection The projection of the table to read.
*/
@@ -54,16 +53,16 @@ internal class FailureReadSupport(private val projection: List<String>?) : ReadS
if (projection != null) {
Types.buildMessage()
.apply {
- val fieldByName = READ_SCHEMA.fields.associateBy { it.name }
+ val fieldByName = FAILURE_SCHEMA.fields.associateBy { it.name }
for (col in projection) {
val fieldName = colMap[col] ?: continue
addField(fieldByName.getValue(fieldName))
}
}
- .named(READ_SCHEMA.name)
+ .named(FAILURE_SCHEMA.name)
} else {
- READ_SCHEMA
+ FAILURE_SCHEMA
}
return ReadContext(projectedSchema)
}
@@ -74,25 +73,4 @@ internal class FailureReadSupport(private val projection: List<String>?) : ReadS
fileSchema: MessageType,
readContext: ReadContext,
): RecordMaterializer<FailureFragment> = FailureRecordMaterializer(readContext.requestedSchema)
-
- companion object {
- /**
- * Parquet read schema for the "tasks" table in the trace.
- */
- @JvmStatic
- val READ_SCHEMA: MessageType =
- Types.buildMessage()
- .addFields(
- Types
- .optional(PrimitiveType.PrimitiveTypeName.INT64)
- .named("failure_interval"),
- Types
- .optional(PrimitiveType.PrimitiveTypeName.INT64)
- .named("failure_duration"),
- Types
- .optional(PrimitiveType.PrimitiveTypeName.DOUBLE)
- .named("failure_intensity"),
- )
- .named("failure_fragment")
- }
}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/failure/parquet/FailureRecordMaterializer.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/failure/parquet/FailureRecordMaterializer.kt
index 5a00f8c9..83281984 100644
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/failure/parquet/FailureRecordMaterializer.kt
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/failure/parquet/FailureRecordMaterializer.kt
@@ -29,7 +29,7 @@ import org.apache.parquet.io.api.RecordMaterializer
import org.apache.parquet.schema.MessageType
/**
- * A [RecordMaterializer] for [Task] records.
+ * A [RecordMaterializer] for [FailureFragment] records.
*/
internal class FailureRecordMaterializer(schema: MessageType) : RecordMaterializer<FailureFragment>() {
/**
diff --git a/opendc-trace/opendc-trace-tools/src/main/kotlin/org/opendc/trace/tools/TraceTools.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/failure/parquet/FailureSchemas.kt
index 8f3dc60d..bafac387 100644
--- a/opendc-trace/opendc-trace-tools/src/main/kotlin/org/opendc/trace/tools/TraceTools.kt
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/failure/parquet/FailureSchemas.kt
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022 AtLarge Research
+ * Copyright (c) 2025 AtLarge Research
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -20,26 +20,25 @@
* SOFTWARE.
*/
-@file:JvmName("TraceTools")
+package org.opendc.trace.formats.failure.parquet
-package org.opendc.trace.tools
+import org.apache.parquet.schema.MessageType
+import org.apache.parquet.schema.PrimitiveType
+import org.apache.parquet.schema.Types
-import com.github.ajalt.clikt.core.CliktCommand
-import com.github.ajalt.clikt.core.subcommands
+private val FAILURE_SCHEMA_v1: MessageType =
+ Types.buildMessage()
+ .addFields(
+ Types
+ .optional(PrimitiveType.PrimitiveTypeName.INT64)
+ .named("failure_interval"),
+ Types
+ .optional(PrimitiveType.PrimitiveTypeName.INT64)
+ .named("failure_duration"),
+ Types
+ .optional(PrimitiveType.PrimitiveTypeName.DOUBLE)
+ .named("failure_intensity"),
+ )
+ .named("failure_fragment")
-/**
- * A script for querying and manipulating workload traces supported by OpenDC.
- */
-fun main(args: Array<String>): Unit = TraceToolsCli().main(args)
-
-/**
- * The primary [CliktCommand] for the trace tools offered by OpenDC.
- */
-class TraceToolsCli : CliktCommand(name = "trace-tools") {
- init {
- subcommands(QueryCommand())
- subcommands(ConvertCommand())
- }
-
- override fun run() {}
-}
+public val FAILURE_SCHEMA: MessageType = FAILURE_SCHEMA_v1
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/gwf/GwfTaskTableReader.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/gwf/GwfTaskTableReader.kt
deleted file mode 100644
index 8a2a99cb..00000000
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/gwf/GwfTaskTableReader.kt
+++ /dev/null
@@ -1,286 +0,0 @@
-/*
- * Copyright (c) 2021 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.gwf
-
-import com.fasterxml.jackson.core.JsonToken
-import com.fasterxml.jackson.dataformat.csv.CsvParser
-import com.fasterxml.jackson.dataformat.csv.CsvSchema
-import org.opendc.trace.TableColumnType
-import org.opendc.trace.TableReader
-import org.opendc.trace.conv.TASK_ALLOC_NCPUS
-import org.opendc.trace.conv.TASK_ID
-import org.opendc.trace.conv.TASK_PARENTS
-import org.opendc.trace.conv.TASK_REQ_NCPUS
-import org.opendc.trace.conv.TASK_RUNTIME
-import org.opendc.trace.conv.TASK_SUBMIT_TIME
-import org.opendc.trace.conv.TASK_WORKFLOW_ID
-import org.opendc.trace.util.convertTo
-import java.time.Duration
-import java.time.Instant
-import java.util.UUID
-import java.util.regex.Pattern
-
-/**
- * A [TableReader] implementation for the GWF format.
- */
-internal class GwfTaskTableReader(private val parser: CsvParser) : TableReader {
- /**
- * A flag to indicate whether a single row has been read already.
- */
- private var isStarted = false
-
- init {
- parser.schema = schema
- }
-
- override fun nextRow(): Boolean {
- if (!isStarted) {
- isStarted = true
- }
-
- // Reset the row state
- reset()
-
- if (parser.isClosed || !nextStart()) {
- return false
- }
-
- while (true) {
- val token = parser.nextValue()
-
- if (token == null || token == JsonToken.END_OBJECT) {
- break
- }
-
- when (parser.currentName) {
- "WorkflowID" -> workflowId = parser.text
- "JobID" -> jobId = parser.text
- "SubmitTime" -> submitTime = Instant.ofEpochSecond(parser.longValue)
- "RunTime" -> runtime = Duration.ofSeconds(parser.longValue)
- "NProcs" -> nProcs = parser.intValue
- "ReqNProcs" -> reqNProcs = parser.intValue
- "Dependencies" -> dependencies = parseParents(parser.valueAsString)
- }
- }
-
- return true
- }
-
- override fun resolve(name: String): Int {
- return when (name) {
- TASK_ID -> colJobID
- TASK_WORKFLOW_ID -> colWorkflowID
- TASK_SUBMIT_TIME -> colSubmitTime
- TASK_RUNTIME -> colRuntime
- TASK_ALLOC_NCPUS -> colNproc
- TASK_REQ_NCPUS -> colReqNproc
- TASK_PARENTS -> colDeps
- else -> -1
- }
- }
-
- override fun isNull(index: Int): Boolean {
- require(index in 0..colDeps) { "Invalid column" }
- return false
- }
-
- override fun getBoolean(index: Int): Boolean {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getInt(index: Int): Int {
- checkActive()
- return when (index) {
- colReqNproc -> reqNProcs
- colNproc -> nProcs
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getLong(index: Int): Long {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getFloat(index: Int): Float {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getDouble(index: Int): Double {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getString(index: Int): String? {
- checkActive()
- return when (index) {
- colJobID -> jobId
- colWorkflowID -> workflowId
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getUUID(index: Int): UUID? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getInstant(index: Int): Instant? {
- checkActive()
- return when (index) {
- colSubmitTime -> submitTime
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getDuration(index: Int): Duration? {
- checkActive()
- return when (index) {
- colRuntime -> runtime
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun <T> getList(
- index: Int,
- elementType: Class<T>,
- ): List<T>? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun <K, V> getMap(
- index: Int,
- keyType: Class<K>,
- valueType: Class<V>,
- ): Map<K, V>? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun <T> getSet(
- index: Int,
- elementType: Class<T>,
- ): Set<T>? {
- checkActive()
- return when (index) {
- colDeps -> typeDeps.convertTo(dependencies, elementType)
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun close() {
- parser.close()
- }
-
- /**
- * Helper method to check if the reader is active.
- */
- private fun checkActive() {
- check(isStarted && !parser.isClosed) { "No active row. Did you call nextRow()?" }
- }
-
- /**
- * The pattern used to parse the parents.
- */
- private val pattern = Pattern.compile("\\s+")
-
- /**
- * Parse the parents into a set of longs.
- */
- private fun parseParents(value: String): Set<String> {
- val result = mutableSetOf<String>()
- val deps = value.split(pattern)
-
- for (dep in deps) {
- if (dep.isBlank()) {
- continue
- }
-
- result.add(dep)
- }
-
- return result
- }
-
- /**
- * Advance the parser until the next object start.
- */
- private fun nextStart(): Boolean {
- var token = parser.nextValue()
-
- while (token != null && token != JsonToken.START_OBJECT) {
- token = parser.nextValue()
- }
-
- return token != null
- }
-
- /**
- * Reader state fields.
- */
- private var workflowId: String? = null
- private var jobId: String? = null
- private var submitTime: Instant? = null
- private var runtime: Duration? = null
- private var nProcs = -1
- private var reqNProcs = -1
- private var dependencies = emptySet<String>()
-
- /**
- * Reset the state.
- */
- private fun reset() {
- workflowId = null
- jobId = null
- submitTime = null
- runtime = null
- nProcs = -1
- reqNProcs = -1
- dependencies = emptySet()
- }
-
- private val colWorkflowID = 0
- private val colJobID = 1
- private val colSubmitTime = 2
- private val colRuntime = 3
- private val colNproc = 4
- private val colReqNproc = 5
- private val colDeps = 6
-
- private val typeDeps = TableColumnType.Set(TableColumnType.String)
-
- companion object {
- /**
- * The [CsvSchema] that is used to parse the trace.
- */
- private val schema =
- CsvSchema.builder()
- .addColumn("WorkflowID", CsvSchema.ColumnType.NUMBER)
- .addColumn("JobID", CsvSchema.ColumnType.NUMBER)
- .addColumn("SubmitTime", CsvSchema.ColumnType.NUMBER)
- .addColumn("RunTime", CsvSchema.ColumnType.NUMBER)
- .addColumn("NProcs", CsvSchema.ColumnType.NUMBER)
- .addColumn("ReqNProcs", CsvSchema.ColumnType.NUMBER)
- .addColumn("Dependencies", CsvSchema.ColumnType.STRING)
- .setAllowComments(true)
- .setUseHeader(true)
- .setColumnSeparator(',')
- .build()
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/gwf/GwfTraceFormat.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/gwf/GwfTraceFormat.kt
deleted file mode 100644
index 097c5593..00000000
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/gwf/GwfTraceFormat.kt
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (c) 2021 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.gwf
-
-import com.fasterxml.jackson.dataformat.csv.CsvFactory
-import com.fasterxml.jackson.dataformat.csv.CsvParser
-import org.opendc.trace.TableColumn
-import org.opendc.trace.TableColumnType
-import org.opendc.trace.TableReader
-import org.opendc.trace.TableWriter
-import org.opendc.trace.conv.TABLE_TASKS
-import org.opendc.trace.conv.TASK_ALLOC_NCPUS
-import org.opendc.trace.conv.TASK_ID
-import org.opendc.trace.conv.TASK_PARENTS
-import org.opendc.trace.conv.TASK_REQ_NCPUS
-import org.opendc.trace.conv.TASK_RUNTIME
-import org.opendc.trace.conv.TASK_SUBMIT_TIME
-import org.opendc.trace.conv.TASK_WORKFLOW_ID
-import org.opendc.trace.spi.TableDetails
-import org.opendc.trace.spi.TraceFormat
-import java.nio.file.Path
-
-/**
- * A [TraceFormat] implementation for the GWF trace format.
- */
-public class GwfTraceFormat : TraceFormat {
- /**
- * The name of this trace format.
- */
- override val name: String = "gwf"
-
- /**
- * The [CsvFactory] used to create the parser.
- */
- private val factory =
- CsvFactory()
- .enable(CsvParser.Feature.ALLOW_COMMENTS)
- .enable(CsvParser.Feature.TRIM_SPACES)
-
- override fun create(path: Path) {
- throw UnsupportedOperationException("Writing not supported for this format")
- }
-
- override fun getTables(path: Path): List<String> = listOf(TABLE_TASKS)
-
- override fun getDetails(
- path: Path,
- table: String,
- ): TableDetails {
- return when (table) {
- TABLE_TASKS ->
- TableDetails(
- listOf(
- TableColumn(TASK_WORKFLOW_ID, TableColumnType.String),
- TableColumn(TASK_ID, TableColumnType.String),
- TableColumn(TASK_SUBMIT_TIME, TableColumnType.Instant),
- TableColumn(TASK_RUNTIME, TableColumnType.Duration),
- TableColumn(TASK_REQ_NCPUS, TableColumnType.Int),
- TableColumn(TASK_ALLOC_NCPUS, TableColumnType.Int),
- TableColumn(TASK_PARENTS, TableColumnType.Set(TableColumnType.String)),
- ),
- )
- else -> throw IllegalArgumentException("Table $table not supported")
- }
- }
-
- override fun newReader(
- path: Path,
- table: String,
- projection: List<String>?,
- ): TableReader {
- return when (table) {
- TABLE_TASKS -> GwfTaskTableReader(factory.createParser(path.toFile()))
- else -> throw IllegalArgumentException("Table $table not supported")
- }
- }
-
- override fun newWriter(
- path: Path,
- table: String,
- ): TableWriter {
- throw UnsupportedOperationException("Writing not supported for this format")
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/OdcVmInterferenceJsonTableReader.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/OdcVmInterferenceJsonTableReader.kt
deleted file mode 100644
index dba971d7..00000000
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/OdcVmInterferenceJsonTableReader.kt
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * Copyright (c) 2022 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.formats.opendc
-
-import com.fasterxml.jackson.core.JsonParseException
-import com.fasterxml.jackson.core.JsonParser
-import com.fasterxml.jackson.core.JsonToken
-import org.opendc.trace.TableColumnType
-import org.opendc.trace.TableReader
-import org.opendc.trace.conv.INTERFERENCE_GROUP_MEMBERS
-import org.opendc.trace.conv.INTERFERENCE_GROUP_SCORE
-import org.opendc.trace.conv.INTERFERENCE_GROUP_TARGET
-import org.opendc.trace.util.convertTo
-import java.time.Duration
-import java.time.Instant
-import java.util.UUID
-
-/**
- * A [TableReader] implementation for the OpenDC VM interference JSON format.
- */
-internal class OdcVmInterferenceJsonTableReader(private val parser: JsonParser) : TableReader {
- /**
- * A flag to indicate whether a single row has been read already.
- */
- private var isStarted = false
-
- override fun nextRow(): Boolean {
- if (!isStarted) {
- isStarted = true
-
- parser.nextToken()
-
- if (!parser.isExpectedStartArrayToken) {
- throw JsonParseException(parser, "Expected array at start, but got ${parser.currentToken()}")
- }
- }
-
- return if (parser.isClosed || parser.nextToken() == JsonToken.END_ARRAY) {
- parser.close()
- reset()
- false
- } else {
- parseGroup(parser)
- true
- }
- }
-
- private val colMembers = 0
- private val colTarget = 1
- private val colScore = 2
-
- private val typeMembers = TableColumnType.Set(TableColumnType.String)
-
- override fun resolve(name: String): Int {
- return when (name) {
- INTERFERENCE_GROUP_MEMBERS -> colMembers
- INTERFERENCE_GROUP_TARGET -> colTarget
- INTERFERENCE_GROUP_SCORE -> colScore
- else -> -1
- }
- }
-
- override fun isNull(index: Int): Boolean {
- return when (index) {
- colMembers, colTarget, colScore -> false
- else -> throw IllegalArgumentException("Invalid column index $index")
- }
- }
-
- override fun getBoolean(index: Int): Boolean {
- throw IllegalArgumentException("Invalid column $index")
- }
-
- override fun getInt(index: Int): Int {
- throw IllegalArgumentException("Invalid column $index")
- }
-
- override fun getLong(index: Int): Long {
- throw IllegalArgumentException("Invalid column $index")
- }
-
- override fun getFloat(index: Int): Float {
- throw IllegalArgumentException("Invalid column $index")
- }
-
- override fun getDouble(index: Int): Double {
- checkActive()
- return when (index) {
- colTarget -> targetLoad
- colScore -> score
- else -> throw IllegalArgumentException("Invalid column $index")
- }
- }
-
- override fun getString(index: Int): String? {
- throw IllegalArgumentException("Invalid column $index")
- }
-
- override fun getUUID(index: Int): UUID? {
- throw IllegalArgumentException("Invalid column $index")
- }
-
- override fun getInstant(index: Int): Instant? {
- throw IllegalArgumentException("Invalid column $index")
- }
-
- override fun getDuration(index: Int): Duration? {
- throw IllegalArgumentException("Invalid column $index")
- }
-
- override fun <T> getList(
- index: Int,
- elementType: Class<T>,
- ): List<T>? {
- throw IllegalArgumentException("Invalid column $index")
- }
-
- override fun <T> getSet(
- index: Int,
- elementType: Class<T>,
- ): Set<T>? {
- checkActive()
- return when (index) {
- colMembers -> typeMembers.convertTo(members, elementType)
- else -> throw IllegalArgumentException("Invalid column $index")
- }
- }
-
- override fun <K, V> getMap(
- index: Int,
- keyType: Class<K>,
- valueType: Class<V>,
- ): Map<K, V>? {
- throw IllegalArgumentException("Invalid column $index")
- }
-
- override fun close() {
- parser.close()
- }
-
- private var members = emptySet<String>()
- private var targetLoad = Double.POSITIVE_INFINITY
- private var score = 1.0
-
- /**
- * Helper method to check if the reader is active.
- */
- private fun checkActive() {
- check(isStarted && !parser.isClosed) { "No active row. Did you call nextRow()?" }
- }
-
- /**
- * Reset the state.
- */
- private fun reset() {
- members = emptySet()
- targetLoad = Double.POSITIVE_INFINITY
- score = 1.0
- }
-
- /**
- * Parse a group an interference JSON file.
- */
- private fun parseGroup(parser: JsonParser) {
- var targetLoad = Double.POSITIVE_INFINITY
- var score = 1.0
- val members = mutableSetOf<String>()
-
- if (!parser.isExpectedStartObjectToken) {
- throw JsonParseException(parser, "Expected object, but got ${parser.currentToken()}")
- }
-
- while (parser.nextValue() != JsonToken.END_OBJECT) {
- when (parser.currentName) {
- "vms" -> parseGroupMembers(parser, members)
- "minServerLoad" -> targetLoad = parser.doubleValue
- "performanceScore" -> score = parser.doubleValue
- }
- }
-
- this.members = members
- this.targetLoad = targetLoad
- this.score = score
- }
-
- /**
- * Parse the members of a group.
- */
- private fun parseGroupMembers(
- parser: JsonParser,
- members: MutableSet<String>,
- ) {
- if (!parser.isExpectedStartArrayToken) {
- throw JsonParseException(parser, "Expected array for group members, but got ${parser.currentToken()}")
- }
-
- while (parser.nextValue() != JsonToken.END_ARRAY) {
- if (parser.currentToken() != JsonToken.VALUE_STRING) {
- throw JsonParseException(parser, "Expected string value for group member")
- }
-
- members.add(parser.text)
- }
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/OdcVmInterferenceJsonTableWriter.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/OdcVmInterferenceJsonTableWriter.kt
deleted file mode 100644
index b3286a1c..00000000
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/OdcVmInterferenceJsonTableWriter.kt
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * Copyright (c) 2022 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.formats.opendc
-
-import com.fasterxml.jackson.core.JsonGenerator
-import org.opendc.trace.TableWriter
-import org.opendc.trace.conv.INTERFERENCE_GROUP_MEMBERS
-import org.opendc.trace.conv.INTERFERENCE_GROUP_SCORE
-import org.opendc.trace.conv.INTERFERENCE_GROUP_TARGET
-import java.time.Duration
-import java.time.Instant
-import java.util.UUID
-
-/**
- * A [TableWriter] implementation for the OpenDC VM interference JSON format.
- */
-internal class OdcVmInterferenceJsonTableWriter(private val generator: JsonGenerator) : TableWriter {
- /**
- * A flag to indicate whether a row has been started.
- */
- private var isRowActive = false
-
- init {
- generator.writeStartArray()
- }
-
- override fun startRow() {
- // Reset state
- members = emptySet()
- targetLoad = Double.POSITIVE_INFINITY
- score = 1.0
-
- // Mark row as active
- isRowActive = true
- }
-
- override fun endRow() {
- check(isRowActive) { "No active row" }
-
- generator.writeStartObject()
- generator.writeArrayFieldStart("vms")
- for (member in members) {
- generator.writeString(member)
- }
- generator.writeEndArray()
- generator.writeNumberField("minServerLoad", targetLoad)
- generator.writeNumberField("performanceScore", score)
- generator.writeEndObject()
- }
-
- override fun resolve(name: String): Int {
- return when (name) {
- INTERFERENCE_GROUP_MEMBERS -> colMembers
- INTERFERENCE_GROUP_TARGET -> colTarget
- INTERFERENCE_GROUP_SCORE -> colScore
- else -> -1
- }
- }
-
- override fun setBoolean(
- index: Int,
- value: Boolean,
- ) {
- throw IllegalArgumentException("Invalid column $index")
- }
-
- override fun setInt(
- index: Int,
- value: Int,
- ) {
- throw IllegalArgumentException("Invalid column $index")
- }
-
- override fun setLong(
- index: Int,
- value: Long,
- ) {
- throw IllegalArgumentException("Invalid column $index")
- }
-
- override fun setFloat(
- index: Int,
- value: Float,
- ) {
- throw IllegalArgumentException("Invalid column $index")
- }
-
- override fun setDouble(
- index: Int,
- value: Double,
- ) {
- check(isRowActive) { "No active row" }
-
- when (index) {
- colTarget -> targetLoad = (value as Number).toDouble()
- colScore -> score = (value as Number).toDouble()
- else -> throw IllegalArgumentException("Invalid column $index")
- }
- }
-
- override fun setString(
- index: Int,
- value: String,
- ) {
- throw IllegalArgumentException("Invalid column $index")
- }
-
- override fun setUUID(
- index: Int,
- value: UUID,
- ) {
- throw IllegalArgumentException("Invalid column $index")
- }
-
- override fun setInstant(
- index: Int,
- value: Instant,
- ) {
- throw IllegalArgumentException("Invalid column $index")
- }
-
- override fun setDuration(
- index: Int,
- value: Duration,
- ) {
- throw IllegalArgumentException("Invalid column $index")
- }
-
- override fun <T> setList(
- index: Int,
- value: List<T>,
- ) {
- throw IllegalArgumentException("Invalid column $index")
- }
-
- override fun <T> setSet(
- index: Int,
- value: Set<T>,
- ) {
- check(isRowActive) { "No active row" }
-
- @Suppress("UNCHECKED_CAST")
- when (index) {
- colMembers -> members = value as Set<String>
- else -> throw IllegalArgumentException("Invalid column index $index")
- }
- }
-
- override fun <K, V> setMap(
- index: Int,
- value: Map<K, V>,
- ) {
- throw IllegalArgumentException("Invalid column $index")
- }
-
- override fun flush() {
- generator.flush()
- }
-
- override fun close() {
- generator.writeEndArray()
- generator.close()
- }
-
- private val colMembers = 0
- private val colTarget = 1
- private val colScore = 2
-
- private var members = emptySet<String>()
- private var targetLoad = Double.POSITIVE_INFINITY
- private var score = 1.0
-}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/OdcVmTraceFormat.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/OdcVmTraceFormat.kt
deleted file mode 100644
index 74e880be..00000000
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/OdcVmTraceFormat.kt
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Copyright (c) 2021 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.formats.opendc
-
-import com.fasterxml.jackson.core.JsonEncoding
-import com.fasterxml.jackson.core.JsonFactory
-import org.apache.parquet.column.ParquetProperties
-import org.apache.parquet.hadoop.ParquetFileWriter
-import org.apache.parquet.hadoop.metadata.CompressionCodecName
-import org.opendc.trace.TableColumn
-import org.opendc.trace.TableColumnType
-import org.opendc.trace.TableReader
-import org.opendc.trace.TableWriter
-import org.opendc.trace.conv.INTERFERENCE_GROUP_MEMBERS
-import org.opendc.trace.conv.INTERFERENCE_GROUP_SCORE
-import org.opendc.trace.conv.INTERFERENCE_GROUP_TARGET
-import org.opendc.trace.conv.TABLE_INTERFERENCE_GROUPS
-import org.opendc.trace.conv.TABLE_RESOURCES
-import org.opendc.trace.conv.TABLE_RESOURCE_STATES
-import org.opendc.trace.conv.resourceChildren
-import org.opendc.trace.conv.resourceCpuCapacity
-import org.opendc.trace.conv.resourceCpuCount
-import org.opendc.trace.conv.resourceDeadline
-import org.opendc.trace.conv.resourceDuration
-import org.opendc.trace.conv.resourceGpuCapacity
-import org.opendc.trace.conv.resourceGpuCount
-import org.opendc.trace.conv.resourceID
-import org.opendc.trace.conv.resourceMemCapacity
-import org.opendc.trace.conv.resourceNature
-import org.opendc.trace.conv.resourceParents
-import org.opendc.trace.conv.resourceStateCpuUsage
-import org.opendc.trace.conv.resourceStateDuration
-import org.opendc.trace.conv.resourceStateTimestamp
-import org.opendc.trace.conv.resourceSubmissionTime
-import org.opendc.trace.formats.opendc.parquet.ResourceReadSupport
-import org.opendc.trace.formats.opendc.parquet.ResourceStateReadSupport
-import org.opendc.trace.formats.opendc.parquet.ResourceStateWriteSupport
-import org.opendc.trace.formats.opendc.parquet.ResourceWriteSupport
-import org.opendc.trace.spi.TableDetails
-import org.opendc.trace.spi.TraceFormat
-import org.opendc.trace.util.parquet.LocalParquetReader
-import org.opendc.trace.util.parquet.LocalParquetWriter
-import java.nio.file.Files
-import java.nio.file.Path
-import kotlin.io.path.exists
-
-/**
- * A [TraceFormat] implementation of the OpenDC virtual machine trace format.
- */
-public class OdcVmTraceFormat : TraceFormat {
- /**
- * A [JsonFactory] that is used to parse the JSON-based interference model.
- */
- private val jsonFactory = JsonFactory()
-
- /**
- * The name of this trace format.
- */
- override val name: String = "opendc-vm"
-
- override fun create(path: Path) {
- // Construct directory containing the trace files
- Files.createDirectories(path)
-
- val tables = getTables(path)
-
- for (table in tables) {
- val writer = newWriter(path, table)
- writer.close()
- }
- }
-
- override fun getTables(path: Path): List<String> = listOf(TABLE_RESOURCES, TABLE_RESOURCE_STATES, TABLE_INTERFERENCE_GROUPS)
-
- override fun getDetails(
- path: Path,
- table: String,
- ): TableDetails {
- return when (table) {
- TABLE_RESOURCES ->
- TableDetails(
- listOf(
- TableColumn(resourceID, TableColumnType.String),
- TableColumn(resourceSubmissionTime, TableColumnType.Instant),
- TableColumn(resourceDuration, TableColumnType.Long),
- TableColumn(resourceCpuCount, TableColumnType.Int),
- TableColumn(resourceCpuCapacity, TableColumnType.Double),
- TableColumn(resourceMemCapacity, TableColumnType.Double),
- TableColumn(resourceGpuCount, TableColumnType.Int),
- TableColumn(resourceGpuCapacity, TableColumnType.Double),
- TableColumn(resourceParents, TableColumnType.Set(TableColumnType.String)),
- TableColumn(resourceChildren, TableColumnType.Set(TableColumnType.String)),
- TableColumn(resourceNature, TableColumnType.String),
- TableColumn(resourceDeadline, TableColumnType.Long),
- ),
- )
- TABLE_RESOURCE_STATES ->
- TableDetails(
- listOf(
- TableColumn(resourceID, TableColumnType.String),
- TableColumn(resourceStateTimestamp, TableColumnType.Instant),
- TableColumn(resourceStateDuration, TableColumnType.Duration),
- TableColumn(resourceCpuCount, TableColumnType.Int),
- TableColumn(resourceStateCpuUsage, TableColumnType.Double),
- ),
- )
- TABLE_INTERFERENCE_GROUPS ->
- TableDetails(
- listOf(
- TableColumn(INTERFERENCE_GROUP_MEMBERS, TableColumnType.Set(TableColumnType.String)),
- TableColumn(INTERFERENCE_GROUP_TARGET, TableColumnType.Double),
- TableColumn(INTERFERENCE_GROUP_SCORE, TableColumnType.Double),
- ),
- )
- else -> throw IllegalArgumentException("Table $table not supported")
- }
- }
-
- override fun newReader(
- path: Path,
- table: String,
- projection: List<String>?,
- ): TableReader {
- return when (table) {
- TABLE_RESOURCES -> {
- val reader = LocalParquetReader(path.resolve("tasks.parquet"), ResourceReadSupport(projection))
- OdcVmResourceTableReader(reader)
- }
- TABLE_RESOURCE_STATES -> {
- val reader = LocalParquetReader(path.resolve("fragments.parquet"), ResourceStateReadSupport(projection))
- OdcVmResourceStateTableReader(reader)
- }
- TABLE_INTERFERENCE_GROUPS -> {
- val modelPath = path.resolve("interference-model.json")
- val parser =
- if (modelPath.exists()) {
- jsonFactory.createParser(modelPath.toFile())
- } else {
- jsonFactory.createParser("[]") // If model does not exist, return empty model
- }
-
- OdcVmInterferenceJsonTableReader(parser)
- }
- else -> throw IllegalArgumentException("Table $table not supported")
- }
- }
-
- override fun newWriter(
- path: Path,
- table: String,
- ): TableWriter {
- return when (table) {
- TABLE_RESOURCES -> {
- val writer =
- LocalParquetWriter.builder(path.resolve("tasks.parquet"), ResourceWriteSupport())
- .withCompressionCodec(CompressionCodecName.ZSTD)
- .withPageWriteChecksumEnabled(true)
- .withWriterVersion(ParquetProperties.WriterVersion.PARQUET_2_0)
- .withWriteMode(ParquetFileWriter.Mode.OVERWRITE)
- .build()
- OdcVmResourceTableWriter(writer)
- }
- TABLE_RESOURCE_STATES -> {
- val writer =
- LocalParquetWriter.builder(path.resolve("fragments.parquet"), ResourceStateWriteSupport())
- .withCompressionCodec(CompressionCodecName.ZSTD)
- .withDictionaryEncoding("id", true)
- .withBloomFilterEnabled("id", true)
- .withPageWriteChecksumEnabled(true)
- .withWriterVersion(ParquetProperties.WriterVersion.PARQUET_2_0)
- .withWriteMode(ParquetFileWriter.Mode.OVERWRITE)
- .build()
- OdcVmResourceStateTableWriter(writer)
- }
- TABLE_INTERFERENCE_GROUPS -> {
- val generator = jsonFactory.createGenerator(path.resolve("interference-model.json").toFile(), JsonEncoding.UTF8)
- OdcVmInterferenceJsonTableWriter(generator)
- }
- else -> throw IllegalArgumentException("Table $table not supported")
- }
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/parquet/ResourceReadSupport.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/parquet/ResourceReadSupport.kt
deleted file mode 100644
index cd2ccef7..00000000
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/parquet/ResourceReadSupport.kt
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * Copyright (c) 2022 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.formats.opendc.parquet
-
-import org.apache.hadoop.conf.Configuration
-import org.apache.parquet.hadoop.api.InitContext
-import org.apache.parquet.hadoop.api.ReadSupport
-import org.apache.parquet.io.api.RecordMaterializer
-import org.apache.parquet.schema.LogicalTypeAnnotation
-import org.apache.parquet.schema.MessageType
-import org.apache.parquet.schema.PrimitiveType
-import org.apache.parquet.schema.Type
-import org.apache.parquet.schema.Types
-import org.opendc.trace.TableColumn
-import org.opendc.trace.conv.resourceChildren
-import org.opendc.trace.conv.resourceCpuCapacity
-import org.opendc.trace.conv.resourceCpuCount
-import org.opendc.trace.conv.resourceDeadline
-import org.opendc.trace.conv.resourceDuration
-import org.opendc.trace.conv.resourceGpuCapacity
-import org.opendc.trace.conv.resourceGpuCount
-import org.opendc.trace.conv.resourceID
-import org.opendc.trace.conv.resourceMemCapacity
-import org.opendc.trace.conv.resourceNature
-import org.opendc.trace.conv.resourceParents
-import org.opendc.trace.conv.resourceSubmissionTime
-
-/**
- * A [ReadSupport] instance for [Resource] objects.
- */
-internal class ResourceReadSupport(private val projection: List<String>?) : ReadSupport<Resource>() {
- /**
- * Mapping from field names to [TableColumn]s.
- */
- private val fieldMap =
- mapOf(
- "id" to resourceID,
- "submissionTime" to resourceSubmissionTime,
- "submission_time" to resourceSubmissionTime,
- "duration" to resourceDuration,
- "maxCores" to resourceCpuCount,
- "cpu_count" to resourceCpuCount,
- "cpu_capacity" to resourceCpuCapacity,
- "requiredMemory" to resourceMemCapacity,
- "mem_capacity" to resourceMemCapacity,
- "gpu_count" to resourceGpuCount,
- "gpu_capacity" to resourceGpuCapacity,
- "parents" to resourceParents,
- "children" to resourceChildren,
- "nature" to resourceNature,
- "deadline" to resourceDeadline,
- )
-
- override fun init(context: InitContext): ReadContext {
- val projectedSchema =
- if (projection != null) {
- Types.buildMessage()
- .apply {
- val projectionSet = projection.toSet()
-
- for (field in READ_SCHEMA.fields) {
- val col = fieldMap[field.name] ?: continue
- if (col in projectionSet) {
- addField(field)
- }
- }
- }
- .named(READ_SCHEMA.name)
- } else {
- READ_SCHEMA
- }
-
- return ReadContext(projectedSchema)
- }
-
- override fun prepareForRead(
- configuration: Configuration,
- keyValueMetaData: Map<String, String>,
- fileSchema: MessageType,
- readContext: ReadContext,
- ): RecordMaterializer<Resource> = ResourceRecordMaterializer(readContext.requestedSchema)
-
- companion object {
- /**
- * Parquet read schema (version 2.0) for the "resources" table in the trace.
- */
- @JvmStatic
- val READ_SCHEMA_V2_0: MessageType =
- Types.buildMessage()
- .addFields(
- Types
- .required(PrimitiveType.PrimitiveTypeName.BINARY)
- .`as`(LogicalTypeAnnotation.stringType())
- .named("id"),
- Types
- .required(PrimitiveType.PrimitiveTypeName.INT64)
- .`as`(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.MILLIS))
- .named("submissionTime"),
- Types
- .required(PrimitiveType.PrimitiveTypeName.INT64)
- .named("duration"),
- Types
- .required(PrimitiveType.PrimitiveTypeName.INT32)
- .named("maxCores"),
- Types
- .required(PrimitiveType.PrimitiveTypeName.INT64)
- .named("requiredMemory"),
- Types
- .optional(PrimitiveType.PrimitiveTypeName.BINARY)
- .`as`(LogicalTypeAnnotation.stringType())
- .named("nature"),
- Types
- .optional(PrimitiveType.PrimitiveTypeName.INT64)
- .named("deadline"),
- )
- .named("resource")
-
- /**
- * Parquet read schema (version 2.1) for the "resources" table in the trace.
- */
- @JvmStatic
- val READ_SCHEMA_V2_2: MessageType =
- Types.buildMessage()
- .addFields(
- Types
- .required(PrimitiveType.PrimitiveTypeName.BINARY)
- .`as`(LogicalTypeAnnotation.stringType())
- .named("id"),
- Types
- .required(PrimitiveType.PrimitiveTypeName.INT64)
- .`as`(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.MILLIS))
- .named("submission_time"),
- Types
- .required(PrimitiveType.PrimitiveTypeName.INT64)
- .named("duration"),
- Types
- .required(PrimitiveType.PrimitiveTypeName.INT32)
- .named("cpu_count"),
- Types
- .required(PrimitiveType.PrimitiveTypeName.DOUBLE)
- .named("cpu_capacity"),
- Types
- .required(PrimitiveType.PrimitiveTypeName.INT64)
- .named("mem_capacity"),
- Types
- .optional(PrimitiveType.PrimitiveTypeName.INT32)
- .named("gpu_count"),
- Types
- .optional(PrimitiveType.PrimitiveTypeName.DOUBLE)
- .named("gpu_capacity"),
- Types
- .buildGroup(Type.Repetition.OPTIONAL)
- .addField(
- Types.repeatedGroup()
- .addField(
- Types.optional(PrimitiveType.PrimitiveTypeName.BINARY)
- .`as`(LogicalTypeAnnotation.stringType())
- .named("element"),
- )
- .named("list"),
- )
- .`as`(LogicalTypeAnnotation.listType())
- .named("parents"),
- Types
- .buildGroup(Type.Repetition.OPTIONAL)
- .addField(
- Types.repeatedGroup()
- .addField(
- Types.optional(PrimitiveType.PrimitiveTypeName.BINARY)
- .`as`(LogicalTypeAnnotation.stringType())
- .named("element"),
- )
- .named("list"),
- )
- .`as`(LogicalTypeAnnotation.listType())
- .named("children"),
- Types
- .optional(PrimitiveType.PrimitiveTypeName.BINARY)
- .`as`(LogicalTypeAnnotation.stringType())
- .named("nature"),
- Types
- .optional(PrimitiveType.PrimitiveTypeName.INT64)
- .named("deadline"),
- )
- .named("resource")
-
- /**
- * Parquet read schema for the "resources" table in the trace.
- */
- @JvmStatic
- val READ_SCHEMA: MessageType =
- READ_SCHEMA_V2_2
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/parquet/ResourceStateReadSupport.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/parquet/ResourceStateReadSupport.kt
deleted file mode 100644
index 53e594de..00000000
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/parquet/ResourceStateReadSupport.kt
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright (c) 2022 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.formats.opendc.parquet
-
-import org.apache.hadoop.conf.Configuration
-import org.apache.parquet.hadoop.api.InitContext
-import org.apache.parquet.hadoop.api.ReadSupport
-import org.apache.parquet.io.api.RecordMaterializer
-import org.apache.parquet.schema.LogicalTypeAnnotation
-import org.apache.parquet.schema.MessageType
-import org.apache.parquet.schema.PrimitiveType
-import org.apache.parquet.schema.Types
-import org.opendc.trace.TableColumn
-import org.opendc.trace.conv.resourceCpuCount
-import org.opendc.trace.conv.resourceID
-import org.opendc.trace.conv.resourceStateCpuUsage
-import org.opendc.trace.conv.resourceStateDuration
-import org.opendc.trace.conv.resourceStateTimestamp
-
-/**
- * A [ReadSupport] instance for [ResourceState] objects.
- */
-internal class ResourceStateReadSupport(private val projection: List<String>?) : ReadSupport<ResourceState>() {
- /**
- * Mapping from field names to [TableColumn]s.
- */
- private val fieldMap =
- mapOf(
- "id" to resourceID,
- "time" to resourceStateTimestamp,
- "timestamp" to resourceStateTimestamp,
- "duration" to resourceStateDuration,
- "cores" to resourceCpuCount,
- "cpu_count" to resourceCpuCount,
- "cpuUsage" to resourceStateCpuUsage,
- "cpu_usage" to resourceStateCpuUsage,
- )
-
- override fun init(context: InitContext): ReadContext {
- val projectedSchema =
- if (projection != null) {
- Types.buildMessage()
- .apply {
- val projectionSet = projection.toSet()
-
- for (field in READ_SCHEMA.fields) {
- val col = fieldMap[field.name] ?: continue
- if (col in projectionSet) {
- addField(field)
- }
- }
- }
- .named(READ_SCHEMA.name)
- } else {
- READ_SCHEMA
- }
-
- return ReadContext(projectedSchema)
- }
-
- override fun prepareForRead(
- configuration: Configuration,
- keyValueMetaData: Map<String, String>,
- fileSchema: MessageType,
- readContext: ReadContext,
- ): RecordMaterializer<ResourceState> = ResourceStateRecordMaterializer(readContext.requestedSchema)
-
- companion object {
- /**
- * Parquet read schema (version 2.0) for the "resource states" table in the trace.
- */
- @JvmStatic
- val READ_SCHEMA_V2_0: MessageType =
- Types.buildMessage()
- .addFields(
- Types
- .required(PrimitiveType.PrimitiveTypeName.BINARY)
- .`as`(LogicalTypeAnnotation.stringType())
- .named("id"),
- Types
- .required(PrimitiveType.PrimitiveTypeName.INT64)
- .`as`(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.MILLIS))
- .named("time"),
- Types
- .required(PrimitiveType.PrimitiveTypeName.INT64)
- .named("duration"),
- Types
- .required(PrimitiveType.PrimitiveTypeName.INT32)
- .named("cores"),
- Types
- .required(PrimitiveType.PrimitiveTypeName.DOUBLE)
- .named("cpuUsage"),
- Types
- .optional(PrimitiveType.PrimitiveTypeName.INT32)
- .named("gpuCount"),
- Types
- .optional(PrimitiveType.PrimitiveTypeName.DOUBLE)
- .named("gpuUsage"),
- )
- .named("resource_state")
-
- /**
- * Parquet read schema (version 2.1) for the "resource states" table in the trace.
- */
- @JvmStatic
- val READ_SCHEMA_V2_1: MessageType =
- Types.buildMessage()
- .addFields(
- Types
- .required(PrimitiveType.PrimitiveTypeName.BINARY)
- .`as`(LogicalTypeAnnotation.stringType())
- .named("id"),
- Types
- .required(PrimitiveType.PrimitiveTypeName.INT64)
- .`as`(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.MILLIS))
- .named("timestamp"),
- Types
- .required(PrimitiveType.PrimitiveTypeName.INT64)
- .named("duration"),
- Types
- .required(PrimitiveType.PrimitiveTypeName.INT32)
- .named("cpu_count"),
- Types
- .required(PrimitiveType.PrimitiveTypeName.DOUBLE)
- .named("cpu_usage"),
- Types
- .optional(PrimitiveType.PrimitiveTypeName.INT32)
- .named("gpu_count"),
- Types
- .optional(PrimitiveType.PrimitiveTypeName.DOUBLE)
- .named("gpu_usage"),
- )
- .named("resource_state")
-
- /**
- * Parquet read schema for the "resource states" table in the trace.
- */
- @JvmStatic
- val READ_SCHEMA: MessageType = READ_SCHEMA_V2_0.union(READ_SCHEMA_V2_1)
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/swf/SwfTaskTableReader.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/swf/SwfTaskTableReader.kt
deleted file mode 100644
index 5a79fd6f..00000000
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/swf/SwfTaskTableReader.kt
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * Copyright (c) 2021 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.swf
-
-import org.opendc.trace.TableReader
-import org.opendc.trace.conv.TASK_ALLOC_NCPUS
-import org.opendc.trace.conv.TASK_GROUP_ID
-import org.opendc.trace.conv.TASK_ID
-import org.opendc.trace.conv.TASK_PARENTS
-import org.opendc.trace.conv.TASK_REQ_NCPUS
-import org.opendc.trace.conv.TASK_RUNTIME
-import org.opendc.trace.conv.TASK_STATUS
-import org.opendc.trace.conv.TASK_SUBMIT_TIME
-import org.opendc.trace.conv.TASK_USER_ID
-import org.opendc.trace.conv.TASK_WAIT_TIME
-import java.io.BufferedReader
-import java.time.Duration
-import java.time.Instant
-import java.util.UUID
-
-/**
- * A [TableReader] implementation for the SWF format.
- */
-internal class SwfTaskTableReader(private val reader: BufferedReader) : TableReader {
- /**
- * A flag to indicate the state of the reader
- */
- private var state = State.Pending
-
- /**
- * The current row.
- */
- private var fields = emptyList<String>()
-
- /**
- * A [Regex] object to match whitespace.
- */
- private val whitespace = "\\s+".toRegex()
-
- override fun nextRow(): Boolean {
- var line: String?
- var num = 0
-
- val state = state
- if (state == State.Closed) {
- return false
- } else if (state == State.Pending) {
- this.state = State.Active
- }
-
- while (true) {
- line = reader.readLine()
-
- if (line == null) {
- this.state = State.Closed
- return false
- }
- num++
-
- if (line.isBlank()) {
- // Ignore empty lines
- continue
- } else if (line.startsWith(";")) {
- // Ignore comments for now
- continue
- }
-
- break
- }
-
- fields = line!!.trim().split(whitespace)
-
- if (fields.size < 18) {
- throw IllegalArgumentException("Invalid format at line $line")
- }
-
- return true
- }
-
- override fun resolve(name: String): Int {
- return when (name) {
- TASK_ID -> colJobID
- TASK_SUBMIT_TIME -> colSubmitTime
- TASK_WAIT_TIME -> colWaitTime
- TASK_RUNTIME -> colRunTime
- TASK_ALLOC_NCPUS -> colAllocNcpus
- TASK_REQ_NCPUS -> colReqNcpus
- TASK_STATUS -> colStatus
- TASK_USER_ID -> colUserID
- TASK_GROUP_ID -> colGroupID
- TASK_PARENTS -> colParentJob
- else -> -1
- }
- }
-
- override fun isNull(index: Int): Boolean {
- require(index in colJobID..colParentThinkTime) { "Invalid column index" }
- return false
- }
-
- override fun getBoolean(index: Int): Boolean {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getInt(index: Int): Int {
- check(state == State.Active) { "No active row" }
- return when (index) {
- colReqNcpus, colAllocNcpus, colStatus, colGroupID, colUserID -> fields[index].toInt(10)
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getLong(index: Int): Long {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getFloat(index: Int): Float {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getDouble(index: Int): Double {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getString(index: Int): String {
- check(state == State.Active) { "No active row" }
- return when (index) {
- colJobID -> fields[index]
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getUUID(index: Int): UUID? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getInstant(index: Int): Instant? {
- check(state == State.Active) { "No active row" }
- return when (index) {
- colSubmitTime -> Instant.ofEpochSecond(fields[index].toLong(10))
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getDuration(index: Int): Duration? {
- check(state == State.Active) { "No active row" }
- return when (index) {
- colWaitTime, colRunTime -> Duration.ofSeconds(fields[index].toLong(10))
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun <T> getList(
- index: Int,
- elementType: Class<T>,
- ): List<T>? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun <T> getSet(
- index: Int,
- elementType: Class<T>,
- ): Set<T>? {
- check(state == State.Active) { "No active row" }
- @Suppress("UNCHECKED_CAST")
- return when (index) {
- colParentJob -> {
- require(elementType.isAssignableFrom(String::class.java))
- val parent = fields[index].toLong(10)
- if (parent < 0) emptySet() else setOf(parent)
- }
- else -> throw IllegalArgumentException("Invalid column")
- } as Set<T>?
- }
-
- override fun <K, V> getMap(
- index: Int,
- keyType: Class<K>,
- valueType: Class<V>,
- ): Map<K, V>? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun close() {
- reader.close()
- state = State.Closed
- }
-
- /**
- * Default column indices for the SWF format.
- */
- private val colJobID = 0
- private val colSubmitTime = 1
- private val colWaitTime = 2
- private val colRunTime = 3
- private val colAllocNcpus = 4
- private val colAvgCpuTime = 5
- private val colUsedMem = 6
- private val colReqNcpus = 7
- private val colReqTime = 8
- private val colReqMem = 9
- private val colStatus = 10
- private val colUserID = 11
- private val colGroupID = 12
- private val colExecNum = 13
- private val colQueueNum = 14
- private val colPartNum = 15
- private val colParentJob = 16
- private val colParentThinkTime = 17
-
- private enum class State {
- Pending,
- Active,
- Closed,
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/swf/SwfTraceFormat.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/swf/SwfTraceFormat.kt
deleted file mode 100644
index d59b07b4..00000000
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/swf/SwfTraceFormat.kt
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Copyright (c) 2020 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.swf
-
-import org.opendc.trace.TableColumn
-import org.opendc.trace.TableColumnType
-import org.opendc.trace.TableReader
-import org.opendc.trace.TableWriter
-import org.opendc.trace.conv.TABLE_TASKS
-import org.opendc.trace.conv.TASK_ALLOC_NCPUS
-import org.opendc.trace.conv.TASK_GROUP_ID
-import org.opendc.trace.conv.TASK_ID
-import org.opendc.trace.conv.TASK_PARENTS
-import org.opendc.trace.conv.TASK_REQ_NCPUS
-import org.opendc.trace.conv.TASK_RUNTIME
-import org.opendc.trace.conv.TASK_STATUS
-import org.opendc.trace.conv.TASK_SUBMIT_TIME
-import org.opendc.trace.conv.TASK_USER_ID
-import org.opendc.trace.conv.TASK_WAIT_TIME
-import org.opendc.trace.spi.TableDetails
-import org.opendc.trace.spi.TraceFormat
-import java.nio.file.Path
-import kotlin.io.path.bufferedReader
-
-/**
- * Support for the Standard Workload Format (SWF) in OpenDC.
- *
- * The standard is defined by the PWA, see here: https://www.cse.huji.ac.il/labs/parallel/workload/swf.html
- */
-public class SwfTraceFormat : TraceFormat {
- override val name: String = "swf"
-
- override fun create(path: Path) {
- throw UnsupportedOperationException("Writing not supported for this format")
- }
-
- override fun getTables(path: Path): List<String> = listOf(TABLE_TASKS)
-
- override fun getDetails(
- path: Path,
- table: String,
- ): TableDetails {
- return when (table) {
- TABLE_TASKS ->
- TableDetails(
- listOf(
- TableColumn(TASK_ID, TableColumnType.String),
- TableColumn(TASK_SUBMIT_TIME, TableColumnType.Instant),
- TableColumn(TASK_WAIT_TIME, TableColumnType.Duration),
- TableColumn(TASK_RUNTIME, TableColumnType.Duration),
- TableColumn(TASK_REQ_NCPUS, TableColumnType.Int),
- TableColumn(TASK_ALLOC_NCPUS, TableColumnType.Int),
- TableColumn(TASK_PARENTS, TableColumnType.Set(TableColumnType.String)),
- TableColumn(TASK_STATUS, TableColumnType.Int),
- TableColumn(TASK_GROUP_ID, TableColumnType.Int),
- TableColumn(TASK_USER_ID, TableColumnType.Int),
- ),
- )
- else -> throw IllegalArgumentException("Table $table not supported")
- }
- }
-
- override fun newReader(
- path: Path,
- table: String,
- projection: List<String>?,
- ): TableReader {
- return when (table) {
- TABLE_TASKS -> SwfTaskTableReader(path.bufferedReader())
- else -> throw IllegalArgumentException("Table $table not supported")
- }
- }
-
- override fun newWriter(
- path: Path,
- table: String,
- ): TableWriter {
- throw UnsupportedOperationException("Writing not supported for this format")
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/wfformat/WfFormatTaskTableReader.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/wfformat/WfFormatTaskTableReader.kt
deleted file mode 100644
index 8f84e51f..00000000
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/wfformat/WfFormatTaskTableReader.kt
+++ /dev/null
@@ -1,314 +0,0 @@
-/*
- * Copyright (c) 2021 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.wfformat
-
-import com.fasterxml.jackson.core.JsonParseException
-import com.fasterxml.jackson.core.JsonParser
-import com.fasterxml.jackson.core.JsonToken
-import org.opendc.trace.TableColumnType
-import org.opendc.trace.TableReader
-import org.opendc.trace.conv.TASK_CHILDREN
-import org.opendc.trace.conv.TASK_ID
-import org.opendc.trace.conv.TASK_PARENTS
-import org.opendc.trace.conv.TASK_REQ_NCPUS
-import org.opendc.trace.conv.TASK_RUNTIME
-import org.opendc.trace.conv.TASK_WORKFLOW_ID
-import org.opendc.trace.util.convertTo
-import java.time.Duration
-import java.time.Instant
-import java.util.UUID
-import kotlin.math.roundToInt
-
-/**
- * A [TableReader] implementation for the WfCommons workload trace format.
- */
-internal class WfFormatTaskTableReader(private val parser: JsonParser) : TableReader {
- /**
- * The current nesting of the parser.
- */
- private var level: ParserLevel = ParserLevel.TOP
-
- override fun nextRow(): Boolean {
- reset()
-
- var hasJob = false
-
- while (!hasJob) {
- when (level) {
- ParserLevel.TOP -> {
- val token = parser.nextToken()
-
- // Check whether the document is not empty and starts with an object
- if (token == null) {
- parser.close()
- break
- } else if (token != JsonToken.START_OBJECT) {
- throw JsonParseException(parser, "Expected object", parser.currentLocation)
- } else {
- level = ParserLevel.TRACE
- }
- }
- ParserLevel.TRACE -> {
- // Seek for the workflow object in the file
- if (!seekWorkflow()) {
- parser.close()
- break
- } else if (!parser.isExpectedStartObjectToken) {
- throw JsonParseException(parser, "Expected object", parser.currentLocation)
- } else {
- level = ParserLevel.WORKFLOW
- }
- }
- ParserLevel.WORKFLOW -> {
- // Seek for the jobs object in the file
- level =
- if (!seekJobs()) {
- ParserLevel.TRACE
- } else if (!parser.isExpectedStartArrayToken) {
- throw JsonParseException(parser, "Expected array", parser.currentLocation)
- } else {
- ParserLevel.JOB
- }
- }
- ParserLevel.JOB -> {
- when (parser.nextToken()) {
- JsonToken.END_ARRAY -> level = ParserLevel.WORKFLOW
- JsonToken.START_OBJECT -> {
- parseJob()
- hasJob = true
- break
- }
- else -> throw JsonParseException(parser, "Unexpected token", parser.currentLocation)
- }
- }
- }
- }
-
- return hasJob
- }
-
- override fun resolve(name: String): Int {
- return when (name) {
- TASK_ID -> colID
- TASK_WORKFLOW_ID -> colWorkflowID
- TASK_RUNTIME -> colRuntime
- TASK_REQ_NCPUS -> colNproc
- TASK_PARENTS -> colParents
- TASK_CHILDREN -> colChildren
- else -> -1
- }
- }
-
- override fun isNull(index: Int): Boolean {
- require(index in 0..colChildren) { "Invalid column value" }
- return false
- }
-
- override fun getBoolean(index: Int): Boolean {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getInt(index: Int): Int {
- checkActive()
- return when (index) {
- colNproc -> cores
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getLong(index: Int): Long {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getFloat(index: Int): Float {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getDouble(index: Int): Double {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getString(index: Int): String? {
- checkActive()
- return when (index) {
- colID -> id
- colWorkflowID -> workflowId
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getUUID(index: Int): UUID? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getInstant(index: Int): Instant? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getDuration(index: Int): Duration? {
- checkActive()
- return when (index) {
- colRuntime -> runtime
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun <T> getList(
- index: Int,
- elementType: Class<T>,
- ): List<T>? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun <T> getSet(
- index: Int,
- elementType: Class<T>,
- ): Set<T>? {
- checkActive()
- return when (index) {
- colParents -> typeParents.convertTo(parents, elementType)
- colChildren -> typeChildren.convertTo(children, elementType)
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun <K, V> getMap(
- index: Int,
- keyType: Class<K>,
- valueType: Class<V>,
- ): Map<K, V>? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun close() {
- parser.close()
- }
-
- /**
- * Helper method to check if the reader is active.
- */
- private fun checkActive() {
- check(level != ParserLevel.TOP && !parser.isClosed) { "No active row. Did you call nextRow()?" }
- }
-
- /**
- * Parse the trace and seek until the workflow description.
- */
- private fun seekWorkflow(): Boolean {
- while (parser.nextValue() != JsonToken.END_OBJECT && !parser.isClosed) {
- when (parser.currentName) {
- "name" -> workflowId = parser.text
- "workflow" -> return true
- else -> parser.skipChildren()
- }
- }
-
- return false
- }
-
- /**
- * Parse the workflow description in the file and seek until the first job.
- */
- private fun seekJobs(): Boolean {
- while (parser.nextValue() != JsonToken.END_OBJECT) {
- when (parser.currentName) {
- "jobs" -> return true
- else -> parser.skipChildren()
- }
- }
-
- return false
- }
-
- /**
- * Parse a single job in the file.
- */
- private fun parseJob() {
- while (parser.nextValue() != JsonToken.END_OBJECT) {
- when (parser.currentName) {
- "name" -> id = parser.text
- "parents" -> parents = parseIds()
- "children" -> children = parseIds()
- "runtime" -> runtime = Duration.ofSeconds(parser.numberValue.toLong())
- "cores" -> cores = parser.floatValue.roundToInt()
- else -> parser.skipChildren()
- }
- }
- }
-
- /**
- * Parse the parents/children of a job.
- */
- private fun parseIds(): Set<String> {
- if (!parser.isExpectedStartArrayToken) {
- throw JsonParseException(parser, "Expected array", parser.currentLocation)
- }
-
- val ids = mutableSetOf<String>()
-
- while (parser.nextToken() != JsonToken.END_ARRAY) {
- if (parser.currentToken != JsonToken.VALUE_STRING) {
- throw JsonParseException(parser, "Expected token", parser.currentLocation)
- }
-
- ids.add(parser.valueAsString)
- }
-
- return ids
- }
-
- private enum class ParserLevel {
- TOP,
- TRACE,
- WORKFLOW,
- JOB,
- }
-
- /**
- * State fields for the parser.
- */
- private var id: String? = null
- private var workflowId: String? = null
- private var runtime: Duration? = null
- private var parents: Set<String>? = null
- private var children: Set<String>? = null
- private var cores = -1
-
- private fun reset() {
- id = null
- runtime = null
- parents = null
- children = null
- cores = -1
- }
-
- private val colID = 0
- private val colWorkflowID = 1
- private val colRuntime = 3
- private val colNproc = 4
- private val colParents = 5
- private val colChildren = 6
-
- private val typeParents = TableColumnType.Set(TableColumnType.String)
- private val typeChildren = TableColumnType.Set(TableColumnType.String)
-}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/wfformat/WfFormatTraceFormat.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/wfformat/WfFormatTraceFormat.kt
deleted file mode 100644
index 2178fac6..00000000
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/wfformat/WfFormatTraceFormat.kt
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 2021 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.wfformat
-
-import com.fasterxml.jackson.core.JsonFactory
-import org.opendc.trace.TableColumn
-import org.opendc.trace.TableColumnType
-import org.opendc.trace.TableReader
-import org.opendc.trace.TableWriter
-import org.opendc.trace.conv.TABLE_TASKS
-import org.opendc.trace.conv.TASK_CHILDREN
-import org.opendc.trace.conv.TASK_ID
-import org.opendc.trace.conv.TASK_PARENTS
-import org.opendc.trace.conv.TASK_REQ_NCPUS
-import org.opendc.trace.conv.TASK_RUNTIME
-import org.opendc.trace.conv.TASK_WORKFLOW_ID
-import org.opendc.trace.spi.TableDetails
-import org.opendc.trace.spi.TraceFormat
-import java.nio.file.Path
-
-/**
- * A [TraceFormat] implementation for the WfCommons workload trace format.
- */
-public class WfFormatTraceFormat : TraceFormat {
- /**
- * The [JsonFactory] that is used to created JSON parsers.
- */
- private val factory = JsonFactory()
-
- override val name: String = "wfformat"
-
- override fun create(path: Path) {
- throw UnsupportedOperationException("Writing not supported for this format")
- }
-
- override fun getTables(path: Path): List<String> = listOf(TABLE_TASKS)
-
- override fun getDetails(
- path: Path,
- table: String,
- ): TableDetails {
- return when (table) {
- TABLE_TASKS ->
- TableDetails(
- listOf(
- TableColumn(TASK_ID, TableColumnType.String),
- TableColumn(TASK_WORKFLOW_ID, TableColumnType.String),
- TableColumn(TASK_RUNTIME, TableColumnType.Duration),
- TableColumn(TASK_REQ_NCPUS, TableColumnType.Int),
- TableColumn(TASK_PARENTS, TableColumnType.Set(TableColumnType.String)),
- TableColumn(TASK_CHILDREN, TableColumnType.Set(TableColumnType.String)),
- ),
- )
- else -> throw IllegalArgumentException("Table $table not supported")
- }
- }
-
- override fun newReader(
- path: Path,
- table: String,
- projection: List<String>?,
- ): TableReader {
- return when (table) {
- TABLE_TASKS -> WfFormatTaskTableReader(factory.createParser(path.toFile()))
- else -> throw IllegalArgumentException("Table $table not supported")
- }
- }
-
- override fun newWriter(
- path: Path,
- table: String,
- ): TableWriter {
- throw UnsupportedOperationException("Writing not supported for this format")
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/OdcVmResourceStateTableReader.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/FragmentTableReader.kt
index d474e0ec..947746c6 100644
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/OdcVmResourceStateTableReader.kt
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/FragmentTableReader.kt
@@ -20,17 +20,14 @@
* SOFTWARE.
*/
-package org.opendc.trace.formats.opendc
+package org.opendc.trace.formats.workload
import org.opendc.trace.TableReader
-import org.opendc.trace.conv.resourceCpuCount
-import org.opendc.trace.conv.resourceGpuCount
-import org.opendc.trace.conv.resourceID
-import org.opendc.trace.conv.resourceStateCpuUsage
-import org.opendc.trace.conv.resourceStateDuration
-import org.opendc.trace.conv.resourceStateGpuUsage
-import org.opendc.trace.conv.resourceStateTimestamp
-import org.opendc.trace.formats.opendc.parquet.ResourceState
+import org.opendc.trace.conv.FRAGMENT_CPU_USAGE
+import org.opendc.trace.conv.FRAGMENT_DURATION
+import org.opendc.trace.conv.FRAGMENT_GPU_USAGE
+import org.opendc.trace.conv.TASK_ID
+import org.opendc.trace.formats.workload.parquet.Fragment
import org.opendc.trace.util.parquet.LocalParquetReader
import java.time.Duration
import java.time.Instant
@@ -39,11 +36,11 @@ import java.util.UUID
/**
* A [TableReader] implementation for the OpenDC virtual machine trace format.
*/
-internal class OdcVmResourceStateTableReader(private val reader: LocalParquetReader<ResourceState>) : TableReader {
+internal class FragmentTableReader(private val reader: LocalParquetReader<Fragment>) : TableReader {
/**
* The current record.
*/
- private var record: ResourceState? = null
+ private var record: Fragment? = null
override fun nextRow(): Boolean {
try {
@@ -58,23 +55,16 @@ internal class OdcVmResourceStateTableReader(private val reader: LocalParquetRea
}
private val colID = 0
- private val colTimestamp = 1
- private val colDuration = 2
- private val colCpuCount = 3
- private val colCpuUsage = 4
- private val colGpuCount = 5
- private val colGpuUsage = 6
- private val colMemoryCapacity = 7
+ private val colDuration = 1
+ private val colCpuUsage = 2
+ private val colGpuUsage = 3
override fun resolve(name: String): Int {
return when (name) {
- resourceID -> colID
- resourceStateTimestamp -> colTimestamp
- resourceStateDuration -> colDuration
- resourceCpuCount -> colCpuCount
- resourceStateCpuUsage -> colCpuUsage
- resourceGpuCount -> colGpuCount
- resourceStateGpuUsage -> colGpuUsage
+ TASK_ID -> colID
+ FRAGMENT_DURATION -> colDuration
+ FRAGMENT_CPU_USAGE -> colCpuUsage
+ FRAGMENT_GPU_USAGE -> colGpuUsage
else -> -1
}
}
@@ -91,8 +81,7 @@ internal class OdcVmResourceStateTableReader(private val reader: LocalParquetRea
override fun getInt(index: Int): Int {
val record = checkNotNull(record) { "Reader in invalid state" }
return when (index) {
- colCpuCount -> record.cpuCount
- colGpuCount -> record.gpuCount
+ colID -> record.id
else -> throw IllegalArgumentException("Invalid column or type [index $index]")
}
}
@@ -115,12 +104,7 @@ internal class OdcVmResourceStateTableReader(private val reader: LocalParquetRea
}
override fun getString(index: Int): String {
- val record = checkNotNull(record) { "Reader in invalid state" }
-
- return when (index) {
- colID -> record.id
- else -> throw IllegalArgumentException("Invalid column index $index")
- }
+ throw IllegalArgumentException("Invalid column index $index")
}
override fun getUUID(index: Int): UUID? {
@@ -128,12 +112,7 @@ internal class OdcVmResourceStateTableReader(private val reader: LocalParquetRea
}
override fun getInstant(index: Int): Instant {
- val record = checkNotNull(record) { "Reader in invalid state" }
-
- return when (index) {
- colTimestamp -> record.timestamp
- else -> throw IllegalArgumentException("Invalid column index $index")
- }
+ throw IllegalArgumentException("Invalid column index $index")
}
override fun getDuration(index: Int): Duration {
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/OdcVmResourceStateTableWriter.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/FragmentTableWriter.kt
index c6f117d2..33cd9e17 100644
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/OdcVmResourceStateTableWriter.kt
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/FragmentTableWriter.kt
@@ -20,18 +20,15 @@
* SOFTWARE.
*/
-package org.opendc.trace.formats.opendc
+package org.opendc.trace.formats.workload
import org.apache.parquet.hadoop.ParquetWriter
import org.opendc.trace.TableWriter
-import org.opendc.trace.conv.resourceCpuCount
-import org.opendc.trace.conv.resourceGpuCount
-import org.opendc.trace.conv.resourceID
-import org.opendc.trace.conv.resourceStateCpuUsage
-import org.opendc.trace.conv.resourceStateDuration
-import org.opendc.trace.conv.resourceStateGpuUsage
-import org.opendc.trace.conv.resourceStateTimestamp
-import org.opendc.trace.formats.opendc.parquet.ResourceState
+import org.opendc.trace.conv.FRAGMENT_CPU_USAGE
+import org.opendc.trace.conv.FRAGMENT_DURATION
+import org.opendc.trace.conv.FRAGMENT_GPU_USAGE
+import org.opendc.trace.conv.TASK_ID
+import org.opendc.trace.formats.workload.parquet.Fragment
import java.time.Duration
import java.time.Instant
import java.util.UUID
@@ -39,27 +36,21 @@ import java.util.UUID
/**
* A [TableWriter] implementation for the OpenDC virtual machine trace format.
*/
-internal class OdcVmResourceStateTableWriter(private val writer: ParquetWriter<ResourceState>) : TableWriter {
+internal class FragmentTableWriter(private val writer: ParquetWriter<Fragment>) : TableWriter {
/**
* The current state for the record that is being written.
*/
private var localIsActive = false
- private var localID: String = ""
- private var localTimestamp: Instant = Instant.MIN
+ private var localID: Int = -99
private var localDuration: Duration = Duration.ZERO
- private var localCpuCount: Int = 0
private var localCpuUsage: Double = Double.NaN
- private var localGpuCount: Int = 0
private var localGpuUsage: Double = Double.NaN
override fun startRow() {
localIsActive = true
- localID = ""
- localTimestamp = Instant.MIN
+ localID = -99
localDuration = Duration.ZERO
- localCpuCount = 0
localCpuUsage = Double.NaN
- localGpuCount = 0
localGpuUsage = Double.NaN
}
@@ -67,23 +58,19 @@ internal class OdcVmResourceStateTableWriter(private val writer: ParquetWriter<R
check(localIsActive) { "No active row" }
localIsActive = false
- check(lastId != localID || localTimestamp >= lastTimestamp) { "Records need to be ordered by (id, timestamp)" }
+ check(lastId != localID) { "Records need to be ordered by (id, timestamp)" }
- writer.write(ResourceState(localID, localTimestamp, localDuration, localCpuCount, localCpuUsage, localGpuCount, localGpuUsage))
+ writer.write(Fragment(localID, localDuration, localCpuUsage, localGpuUsage))
lastId = localID
- lastTimestamp = localTimestamp
}
override fun resolve(name: String): Int {
return when (name) {
- resourceID -> colID
- resourceStateTimestamp -> colTimestamp
- resourceStateDuration -> colDuration
- resourceCpuCount -> colCpuCount
- resourceStateCpuUsage -> colCpuUsage
- resourceGpuCount -> colGpuCount
- resourceStateGpuUsage -> colGpuUsage
+ TASK_ID -> colID
+ FRAGMENT_DURATION -> colDuration
+ FRAGMENT_CPU_USAGE -> colCpuUsage
+ FRAGMENT_GPU_USAGE -> colGpuUsage
else -> -1
}
}
@@ -101,8 +88,7 @@ internal class OdcVmResourceStateTableWriter(private val writer: ParquetWriter<R
) {
check(localIsActive) { "No active row" }
when (index) {
- colCpuCount -> localCpuCount = value
- colGpuCount -> localGpuCount = value
+ colID -> localID = value
else -> throw IllegalArgumentException("Invalid column or type [index $index]")
}
}
@@ -137,12 +123,7 @@ internal class OdcVmResourceStateTableWriter(private val writer: ParquetWriter<R
index: Int,
value: String,
) {
- check(localIsActive) { "No active row" }
-
- when (index) {
- colID -> localID = value
- else -> throw IllegalArgumentException("Invalid column or type [index $index]")
- }
+ throw IllegalArgumentException("Invalid column or type [index $index]")
}
override fun setUUID(
@@ -156,12 +137,7 @@ internal class OdcVmResourceStateTableWriter(private val writer: ParquetWriter<R
index: Int,
value: Instant,
) {
- check(localIsActive) { "No active row" }
-
- when (index) {
- colTimestamp -> localTimestamp = value
- else -> throw IllegalArgumentException("Invalid column or type [index $index]")
- }
+ throw IllegalArgumentException("Invalid column or type [index $index]")
}
override fun setDuration(
@@ -208,14 +184,10 @@ internal class OdcVmResourceStateTableWriter(private val writer: ParquetWriter<R
/**
* Last column values that are used to check for correct partitioning.
*/
- private var lastId: String? = null
- private var lastTimestamp: Instant = Instant.MAX
+ private var lastId: Int? = null
private val colID = 0
- private val colTimestamp = 1
- private val colDuration = 2
- private val colCpuCount = 3
- private val colCpuUsage = 4
- private val colGpuCount = 5
- private val colGpuUsage = 6
+ private val colDuration = 1
+ private val colCpuUsage = 2
+ private val colGpuUsage = 3
}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/OdcVmResourceTableReader.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/TaskTableReader.kt
index 495a5d75..6c700b0c 100644
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/OdcVmResourceTableReader.kt
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/TaskTableReader.kt
@@ -20,23 +20,24 @@
* SOFTWARE.
*/
-package org.opendc.trace.formats.opendc
+package org.opendc.trace.formats.workload
import org.opendc.trace.TableColumnType
import org.opendc.trace.TableReader
-import org.opendc.trace.conv.resourceChildren
-import org.opendc.trace.conv.resourceCpuCapacity
-import org.opendc.trace.conv.resourceCpuCount
-import org.opendc.trace.conv.resourceDeadline
-import org.opendc.trace.conv.resourceDuration
-import org.opendc.trace.conv.resourceGpuCapacity
-import org.opendc.trace.conv.resourceGpuCount
-import org.opendc.trace.conv.resourceID
-import org.opendc.trace.conv.resourceMemCapacity
-import org.opendc.trace.conv.resourceNature
-import org.opendc.trace.conv.resourceParents
-import org.opendc.trace.conv.resourceSubmissionTime
-import org.opendc.trace.formats.opendc.parquet.Resource
+import org.opendc.trace.conv.TASK_CHILDREN
+import org.opendc.trace.conv.TASK_CPU_CAPACITY
+import org.opendc.trace.conv.TASK_CPU_COUNT
+import org.opendc.trace.conv.TASK_DEADLINE
+import org.opendc.trace.conv.TASK_DURATION
+import org.opendc.trace.conv.TASK_GPU_CAPACITY
+import org.opendc.trace.conv.TASK_GPU_COUNT
+import org.opendc.trace.conv.TASK_ID
+import org.opendc.trace.conv.TASK_MEM_CAPACITY
+import org.opendc.trace.conv.TASK_NAME
+import org.opendc.trace.conv.TASK_NATURE
+import org.opendc.trace.conv.TASK_PARENTS
+import org.opendc.trace.conv.TASK_SUBMISSION_TIME
+import org.opendc.trace.formats.workload.parquet.Task
import org.opendc.trace.util.convertTo
import org.opendc.trace.util.parquet.LocalParquetReader
import java.time.Duration
@@ -46,11 +47,11 @@ import java.util.UUID
/**
* A [TableReader] implementation for the "resources table" in the OpenDC virtual machine trace format.
*/
-internal class OdcVmResourceTableReader(private val reader: LocalParquetReader<Resource>) : TableReader {
+internal class TaskTableReader(private val reader: LocalParquetReader<Task>) : TableReader {
/**
* The current record.
*/
- private var record: Resource? = null
+ private var record: Task? = null
override fun nextRow(): Boolean {
try {
@@ -65,35 +66,37 @@ internal class OdcVmResourceTableReader(private val reader: LocalParquetReader<R
}
private val colID = 0
- private val colSubmissionTime = 1
- private val colDurationTime = 2
- private val colCpuCount = 3
- private val colCpuCapacity = 4
- private val colMemCapacity = 5
- private val colGpuCapacity = 6
- private val colGpuCount = 7
- private val colParents = 8
- private val colChildren = 9
- private val colNature = 10
- private val colDeadline = 11
-
- private val typeParents = TableColumnType.Set(TableColumnType.String)
- private val typeChildren = TableColumnType.Set(TableColumnType.String)
+ private val colName = 1
+ private val colSubmissionTime = 2
+ private val colDurationTime = 3
+ private val colCpuCount = 4
+ private val colCpuCapacity = 5
+ private val colMemCapacity = 6
+ private val colGpuCapacity = 7
+ private val colGpuCount = 8
+ private val colParents = 9
+ private val colChildren = 10
+ private val colNature = 11
+ private val colDeadline = 12
+
+ private val typeParents = TableColumnType.Set(TableColumnType.Int)
+ private val typeChildren = TableColumnType.Set(TableColumnType.Int)
override fun resolve(name: String): Int {
return when (name) {
- resourceID -> colID
- resourceSubmissionTime -> colSubmissionTime
- resourceDuration -> colDurationTime
- resourceCpuCount -> colCpuCount
- resourceCpuCapacity -> colCpuCapacity
- resourceMemCapacity -> colMemCapacity
- resourceGpuCount -> colGpuCount
- resourceGpuCapacity -> colGpuCapacity
- resourceParents -> colParents
- resourceChildren -> colChildren
- resourceNature -> colNature
- resourceDeadline -> colDeadline
+ TASK_ID -> colID
+ TASK_NAME -> colName
+ TASK_SUBMISSION_TIME -> colSubmissionTime
+ TASK_DURATION -> colDurationTime
+ TASK_CPU_COUNT -> colCpuCount
+ TASK_CPU_CAPACITY -> colCpuCapacity
+ TASK_MEM_CAPACITY -> colMemCapacity
+ TASK_GPU_COUNT -> colGpuCount
+ TASK_GPU_CAPACITY -> colGpuCapacity
+ TASK_PARENTS -> colParents
+ TASK_CHILDREN -> colChildren
+ TASK_NATURE -> colNature
+ TASK_DEADLINE -> colDeadline
else -> -1
}
}
@@ -117,6 +120,7 @@ internal class OdcVmResourceTableReader(private val reader: LocalParquetReader<R
val record = checkNotNull(record) { "Reader in invalid state" }
return when (index) {
+ colID -> record.id
colCpuCount -> record.cpuCount
colGpuCount -> record.gpuCount
else -> throw IllegalArgumentException("Invalid column")
@@ -151,7 +155,7 @@ internal class OdcVmResourceTableReader(private val reader: LocalParquetReader<R
val record = checkNotNull(record) { "Reader in invalid state" }
return when (index) {
- colID -> record.id
+ colName -> record.name
colNature -> record.nature
else -> throw IllegalArgumentException("Invalid column")
}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/OdcVmResourceTableWriter.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/TaskTableWriter.kt
index 022e288a..39be36c1 100644
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/OdcVmResourceTableWriter.kt
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/TaskTableWriter.kt
@@ -20,23 +20,24 @@
* SOFTWARE.
*/
-package org.opendc.trace.formats.opendc
+package org.opendc.trace.formats.workload
import org.apache.parquet.hadoop.ParquetWriter
import org.opendc.trace.TableWriter
-import org.opendc.trace.conv.resourceChildren
-import org.opendc.trace.conv.resourceCpuCapacity
-import org.opendc.trace.conv.resourceCpuCount
-import org.opendc.trace.conv.resourceDeadline
-import org.opendc.trace.conv.resourceDuration
-import org.opendc.trace.conv.resourceGpuCapacity
-import org.opendc.trace.conv.resourceGpuCount
-import org.opendc.trace.conv.resourceID
-import org.opendc.trace.conv.resourceMemCapacity
-import org.opendc.trace.conv.resourceNature
-import org.opendc.trace.conv.resourceParents
-import org.opendc.trace.conv.resourceSubmissionTime
-import org.opendc.trace.formats.opendc.parquet.Resource
+import org.opendc.trace.conv.TASK_CHILDREN
+import org.opendc.trace.conv.TASK_CPU_CAPACITY
+import org.opendc.trace.conv.TASK_CPU_COUNT
+import org.opendc.trace.conv.TASK_DEADLINE
+import org.opendc.trace.conv.TASK_DURATION
+import org.opendc.trace.conv.TASK_GPU_CAPACITY
+import org.opendc.trace.conv.TASK_GPU_COUNT
+import org.opendc.trace.conv.TASK_ID
+import org.opendc.trace.conv.TASK_MEM_CAPACITY
+import org.opendc.trace.conv.TASK_NAME
+import org.opendc.trace.conv.TASK_NATURE
+import org.opendc.trace.conv.TASK_PARENTS
+import org.opendc.trace.conv.TASK_SUBMISSION_TIME
+import org.opendc.trace.formats.workload.parquet.Task
import java.time.Duration
import java.time.Instant
import java.util.UUID
@@ -44,12 +45,13 @@ import java.util.UUID
/**
* A [TableWriter] implementation for the OpenDC virtual machine trace format.
*/
-internal class OdcVmResourceTableWriter(private val writer: ParquetWriter<Resource>) : TableWriter {
+internal class TaskTableWriter(private val writer: ParquetWriter<Task>) : TableWriter {
/**
* The current state for the record that is being written.
*/
private var localIsActive = false
- private var localId: String = ""
+ private var localId: Int = -99
+ private var localName: String = ""
private var localSubmissionTime: Instant = Instant.MIN
private var localDuration: Long = 0L
private var localCpuCount: Int = 0
@@ -57,14 +59,15 @@ internal class OdcVmResourceTableWriter(private val writer: ParquetWriter<Resour
private var localMemCapacity: Double = Double.NaN
private var localGpuCount: Int = 0
private var localGpuCapacity: Double = Double.NaN
- private var localParents = mutableSetOf<String>()
- private var localChildren = mutableSetOf<String>()
+ private var localParents = mutableSetOf<Int>()
+ private var localChildren = mutableSetOf<Int>()
private var localNature: String? = null
private var localDeadline: Long = -1
override fun startRow() {
localIsActive = true
- localId = ""
+ localId = -99
+ localName = ""
localSubmissionTime = Instant.MIN
localDuration = 0L
localCpuCount = 0
@@ -82,8 +85,9 @@ internal class OdcVmResourceTableWriter(private val writer: ParquetWriter<Resour
check(localIsActive) { "No active row" }
localIsActive = false
writer.write(
- Resource(
+ Task(
localId,
+ localName,
localSubmissionTime,
localDuration,
localCpuCount,
@@ -101,18 +105,19 @@ internal class OdcVmResourceTableWriter(private val writer: ParquetWriter<Resour
override fun resolve(name: String): Int {
return when (name) {
- resourceID -> colID
- resourceSubmissionTime -> colSubmissionTime
- resourceDuration -> colDuration
- resourceCpuCount -> colCpuCount
- resourceCpuCapacity -> colCpuCapacity
- resourceMemCapacity -> colMemCapacity
- resourceGpuCount -> colGpuCount
- resourceGpuCapacity -> colGpuCapacity
- resourceParents -> colParents
- resourceChildren -> colChildren
- resourceNature -> colNature
- resourceDeadline -> colDeadline
+ TASK_ID -> colID
+ TASK_NAME -> colID
+ TASK_SUBMISSION_TIME -> colSubmissionTime
+ TASK_DURATION -> colDuration
+ TASK_CPU_COUNT -> colCpuCount
+ TASK_CPU_CAPACITY -> colCpuCapacity
+ TASK_MEM_CAPACITY -> colMemCapacity
+ TASK_GPU_COUNT -> colGpuCount
+ TASK_GPU_CAPACITY -> colGpuCapacity
+ TASK_PARENTS -> colParents
+ TASK_CHILDREN -> colChildren
+ TASK_NATURE -> colNature
+ TASK_DEADLINE -> colDeadline
else -> -1
}
}
@@ -130,6 +135,7 @@ internal class OdcVmResourceTableWriter(private val writer: ParquetWriter<Resour
) {
check(localIsActive) { "No active row" }
when (index) {
+ colID -> localId = value
colCpuCount -> localCpuCount = value
colGpuCount -> localGpuCount = value
else -> throw IllegalArgumentException("Invalid column or type [index $index]")
@@ -174,7 +180,7 @@ internal class OdcVmResourceTableWriter(private val writer: ParquetWriter<Resour
) {
check(localIsActive) { "No active row" }
when (index) {
- colID -> localId = value
+ colName -> localName = value
colNature -> localNature = value
else -> throw IllegalArgumentException("Invalid column index $index")
}
@@ -235,15 +241,16 @@ internal class OdcVmResourceTableWriter(private val writer: ParquetWriter<Resour
}
private val colID = 0
- private val colSubmissionTime = 1
- private val colDuration = 2
- private val colCpuCount = 3
- private val colCpuCapacity = 4
- private val colMemCapacity = 5
- private val colGpuCount = 6
- private val colGpuCapacity = 7
- private val colParents = 8
- private val colChildren = 9
- private val colNature = 10
- private val colDeadline = 11
+ private val colName = 1
+ private val colSubmissionTime = 2
+ private val colDuration = 3
+ private val colCpuCount = 4
+ private val colCpuCapacity = 5
+ private val colMemCapacity = 6
+ private val colGpuCount = 7
+ private val colGpuCapacity = 8
+ private val colParents = 9
+ private val colChildren = 10
+ private val colNature = 11
+ private val colDeadline = 12
}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/WorkloadTraceFormat.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/WorkloadTraceFormat.kt
new file mode 100644
index 00000000..7af0650e
--- /dev/null
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/WorkloadTraceFormat.kt
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2021 AtLarge Research
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package org.opendc.trace.formats.workload
+
+import org.apache.parquet.column.ParquetProperties
+import org.apache.parquet.hadoop.ParquetFileWriter
+import org.apache.parquet.hadoop.metadata.CompressionCodecName
+import org.opendc.trace.TableColumn
+import org.opendc.trace.TableColumnType
+import org.opendc.trace.TableReader
+import org.opendc.trace.TableWriter
+import org.opendc.trace.conv.FRAGMENT_CPU_USAGE
+import org.opendc.trace.conv.FRAGMENT_DURATION
+import org.opendc.trace.conv.TABLE_FRAGMENTS
+import org.opendc.trace.conv.TABLE_TASKS
+import org.opendc.trace.conv.TASK_CHILDREN
+import org.opendc.trace.conv.TASK_CPU_CAPACITY
+import org.opendc.trace.conv.TASK_CPU_COUNT
+import org.opendc.trace.conv.TASK_DEADLINE
+import org.opendc.trace.conv.TASK_DURATION
+import org.opendc.trace.conv.TASK_GPU_CAPACITY
+import org.opendc.trace.conv.TASK_GPU_COUNT
+import org.opendc.trace.conv.TASK_ID
+import org.opendc.trace.conv.TASK_MEM_CAPACITY
+import org.opendc.trace.conv.TASK_NATURE
+import org.opendc.trace.conv.TASK_PARENTS
+import org.opendc.trace.conv.TASK_SUBMISSION_TIME
+import org.opendc.trace.formats.workload.parquet.FragmentReadSupport
+import org.opendc.trace.formats.workload.parquet.FragmentWriteSupport
+import org.opendc.trace.formats.workload.parquet.TaskReadSupport
+import org.opendc.trace.formats.workload.parquet.TaskWriteSupport
+import org.opendc.trace.spi.TableDetails
+import org.opendc.trace.spi.TraceFormat
+import org.opendc.trace.util.parquet.LocalParquetReader
+import org.opendc.trace.util.parquet.LocalParquetWriter
+import java.nio.file.Files
+import java.nio.file.Path
+
+/**
+ * A [TraceFormat] implementation of the OpenDC virtual machine trace format.
+ */
+public class WorkloadTraceFormat : TraceFormat {
+ /**
+ * The name of this trace format.
+ */
+ override val name: String = "workload"
+
+ override fun create(path: Path) {
+ // Construct directory containing the trace files
+ Files.createDirectories(path)
+
+ val tables = getTables(path)
+
+ for (table in tables) {
+ val writer = newWriter(path, table)
+ writer.close()
+ }
+ }
+
+ override fun getTables(path: Path): List<String> = listOf(TABLE_TASKS, TABLE_FRAGMENTS)
+
+ override fun getDetails(
+ path: Path,
+ table: String,
+ ): TableDetails {
+ return when (table) {
+ TABLE_TASKS ->
+ TableDetails(
+ listOf(
+ TableColumn(TASK_ID, TableColumnType.String),
+ TableColumn(TASK_SUBMISSION_TIME, TableColumnType.Instant),
+ TableColumn(TASK_DURATION, TableColumnType.Long),
+ TableColumn(TASK_CPU_COUNT, TableColumnType.Int),
+ TableColumn(TASK_CPU_CAPACITY, TableColumnType.Double),
+ TableColumn(TASK_MEM_CAPACITY, TableColumnType.Double),
+ TableColumn(TASK_GPU_COUNT, TableColumnType.Int),
+ TableColumn(TASK_GPU_CAPACITY, TableColumnType.Double),
+ TableColumn(TASK_PARENTS, TableColumnType.Set(TableColumnType.String)),
+ TableColumn(TASK_CHILDREN, TableColumnType.Set(TableColumnType.String)),
+ TableColumn(TASK_NATURE, TableColumnType.String),
+ TableColumn(TASK_DEADLINE, TableColumnType.Long),
+ ),
+ )
+ TABLE_FRAGMENTS ->
+ TableDetails(
+ listOf(
+ TableColumn(TASK_ID, TableColumnType.String),
+ TableColumn(FRAGMENT_DURATION, TableColumnType.Duration),
+ TableColumn(TASK_CPU_COUNT, TableColumnType.Int),
+ TableColumn(FRAGMENT_CPU_USAGE, TableColumnType.Double),
+ ),
+ )
+ else -> throw IllegalArgumentException("Table $table not supported")
+ }
+ }
+
+ override fun newReader(
+ path: Path,
+ table: String,
+ projection: List<String>?,
+ ): TableReader {
+ return when (table) {
+ TABLE_TASKS -> {
+ val reader = LocalParquetReader(path.resolve("tasks.parquet"), TaskReadSupport(projection))
+ TaskTableReader(reader)
+ }
+ TABLE_FRAGMENTS -> {
+ val reader = LocalParquetReader(path.resolve("fragments.parquet"), FragmentReadSupport(projection))
+ FragmentTableReader(reader)
+ }
+ else -> throw IllegalArgumentException("Table $table not supported")
+ }
+ }
+
+ override fun newWriter(
+ path: Path,
+ table: String,
+ ): TableWriter {
+ return when (table) {
+ TABLE_TASKS -> {
+ val writer =
+ LocalParquetWriter.builder(path.resolve("tasks.parquet"), TaskWriteSupport())
+ .withCompressionCodec(CompressionCodecName.ZSTD)
+ .withPageWriteChecksumEnabled(true)
+ .withWriterVersion(ParquetProperties.WriterVersion.PARQUET_2_0)
+ .withWriteMode(ParquetFileWriter.Mode.OVERWRITE)
+ .build()
+ TaskTableWriter(writer)
+ }
+ TABLE_FRAGMENTS -> {
+ val writer =
+ LocalParquetWriter.builder(path.resolve("fragments.parquet"), FragmentWriteSupport())
+ .withCompressionCodec(CompressionCodecName.ZSTD)
+ .withDictionaryEncoding("id", true)
+ .withBloomFilterEnabled("id", true)
+ .withPageWriteChecksumEnabled(true)
+ .withWriterVersion(ParquetProperties.WriterVersion.PARQUET_2_0)
+ .withWriteMode(ParquetFileWriter.Mode.OVERWRITE)
+ .build()
+ FragmentTableWriter(writer)
+ }
+ else -> throw IllegalArgumentException("Table $table not supported")
+ }
+ }
+}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/parquet/ResourceState.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/Fragment.kt
index 10fc6be4..44385088 100644
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/parquet/ResourceState.kt
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/Fragment.kt
@@ -20,17 +20,13 @@
* SOFTWARE.
*/
-package org.opendc.trace.formats.opendc.parquet
+package org.opendc.trace.formats.workload.parquet
import java.time.Duration
-import java.time.Instant
-internal class ResourceState(
- val id: String,
- val timestamp: Instant,
+internal class Fragment(
+ val id: Int,
val duration: Duration,
- val cpuCount: Int,
val cpuUsage: Double,
- val gpuCount: Int,
val gpuUsage: Double,
)
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/FragmentReadSupport.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/FragmentReadSupport.kt
new file mode 100644
index 00000000..3fa914bc
--- /dev/null
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/FragmentReadSupport.kt
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2022 AtLarge Research
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package org.opendc.trace.formats.workload.parquet
+
+import org.apache.hadoop.conf.Configuration
+import org.apache.parquet.hadoop.api.InitContext
+import org.apache.parquet.hadoop.api.ReadSupport
+import org.apache.parquet.io.api.RecordMaterializer
+import org.apache.parquet.schema.MessageType
+import org.apache.parquet.schema.Types
+import org.opendc.trace.TableColumn
+import org.opendc.trace.conv.FRAGMENT_CPU_USAGE
+import org.opendc.trace.conv.FRAGMENT_DURATION
+import org.opendc.trace.conv.TASK_ID
+
+/**
+ * A [ReadSupport] instance for [Fragment] objects.
+ */
+internal class FragmentReadSupport(private val projection: List<String>?) : ReadSupport<Fragment>() {
+ /**
+ * Mapping from field names to [TableColumn]s.
+ */
+ private val fieldMap =
+ mapOf(
+ "id" to TASK_ID,
+ "duration" to FRAGMENT_DURATION,
+ "cpuUsage" to FRAGMENT_CPU_USAGE,
+ "cpu_usage" to FRAGMENT_CPU_USAGE,
+ )
+
+ override fun init(context: InitContext): ReadContext {
+ val projectedSchema =
+ if (projection != null) {
+ Types.buildMessage()
+ .apply {
+ val projectionSet = projection.toSet()
+
+ for (field in FRAGMENT_SCHEMA.fields) {
+ val col = fieldMap[field.name] ?: continue
+ if (col in projectionSet) {
+ addField(field)
+ }
+ }
+ }
+ .named(FRAGMENT_SCHEMA.name)
+ } else {
+ FRAGMENT_SCHEMA
+ }
+
+ return ReadContext(projectedSchema)
+ }
+
+ override fun prepareForRead(
+ configuration: Configuration,
+ keyValueMetaData: Map<String, String>,
+ fileSchema: MessageType,
+ readContext: ReadContext,
+ ): RecordMaterializer<Fragment> = FragmentRecordMaterializer(readContext.requestedSchema)
+}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/parquet/ResourceStateRecordMaterializer.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/FragmentRecordMaterializer.kt
index ee5e56aa..7902cab1 100644
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/parquet/ResourceStateRecordMaterializer.kt
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/FragmentRecordMaterializer.kt
@@ -20,9 +20,8 @@
* SOFTWARE.
*/
-package org.opendc.trace.formats.opendc.parquet
+package org.opendc.trace.formats.workload.parquet
-import org.apache.parquet.io.api.Binary
import org.apache.parquet.io.api.Converter
import org.apache.parquet.io.api.GroupConverter
import org.apache.parquet.io.api.PrimitiveConverter
@@ -32,13 +31,13 @@ import java.time.Duration
import java.time.Instant
/**
- * A [RecordMaterializer] for [ResourceState] records.
+ * A [RecordMaterializer] for [Fragment] records.
*/
-internal class ResourceStateRecordMaterializer(schema: MessageType) : RecordMaterializer<ResourceState>() {
+internal class FragmentRecordMaterializer(schema: MessageType) : RecordMaterializer<Fragment>() {
/**
* State of current record being read.
*/
- private var localId = ""
+ private var localId = -99
private var localTimestamp = Instant.MIN
private var localDuration = Duration.ZERO
private var localCpuCount = 0
@@ -59,8 +58,8 @@ internal class ResourceStateRecordMaterializer(schema: MessageType) : RecordMate
when (type.name) {
"id" ->
object : PrimitiveConverter() {
- override fun addBinary(value: Binary) {
- localId = value.toStringUsingUTF8()
+ override fun addInt(value: Int) {
+ localId = value
}
}
"timestamp", "time" ->
@@ -104,8 +103,7 @@ internal class ResourceStateRecordMaterializer(schema: MessageType) : RecordMate
}
override fun start() {
- localId = ""
- localTimestamp = Instant.MIN
+ localId = -99
localDuration = Duration.ZERO
localCpuCount = 0
localCpuUsage = 0.0
@@ -118,14 +116,11 @@ internal class ResourceStateRecordMaterializer(schema: MessageType) : RecordMate
override fun getConverter(fieldIndex: Int): Converter = converters[fieldIndex]
}
- override fun getCurrentRecord(): ResourceState =
- ResourceState(
+ override fun getCurrentRecord(): Fragment =
+ Fragment(
localId,
- localTimestamp,
localDuration,
- localCpuCount,
localCpuUsage,
- localGpuCount,
localGpuUsage,
)
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/FragmentSchemas.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/FragmentSchemas.kt
new file mode 100644
index 00000000..cd499e7e
--- /dev/null
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/FragmentSchemas.kt
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2025 AtLarge Research
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package org.opendc.trace.formats.workload.parquet
+
+import org.apache.parquet.schema.LogicalTypeAnnotation
+import org.apache.parquet.schema.MessageType
+import org.apache.parquet.schema.PrimitiveType
+import org.apache.parquet.schema.Types
+
+private val FRAGMENT_SCHEMA_v1: MessageType =
+ Types.buildMessage()
+ .addFields(
+ Types
+ .required(PrimitiveType.PrimitiveTypeName.BINARY)
+ .`as`(LogicalTypeAnnotation.stringType())
+ .named("id"),
+ Types
+ .required(PrimitiveType.PrimitiveTypeName.INT64)
+ .`as`(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.MILLIS))
+ .named("timestamp"),
+ Types
+ .required(PrimitiveType.PrimitiveTypeName.INT64)
+ .named("duration"),
+ Types
+ .required(PrimitiveType.PrimitiveTypeName.INT32)
+ .named("cpu_count"),
+ Types
+ .required(PrimitiveType.PrimitiveTypeName.DOUBLE)
+ .named("cpu_usage"),
+ Types
+ .optional(PrimitiveType.PrimitiveTypeName.INT32)
+ .named("gpu_count"),
+ Types
+ .optional(PrimitiveType.PrimitiveTypeName.DOUBLE)
+ .named("gpu_usage"),
+ )
+ .named("resource_state")
+
+private val FRAGMENT_SCHEMA_v2: MessageType =
+ Types.buildMessage()
+ .addFields(
+ Types
+ .required(PrimitiveType.PrimitiveTypeName.INT32)
+ .named("id"),
+ Types
+ .required(PrimitiveType.PrimitiveTypeName.INT64)
+ .named("duration"),
+ Types
+ .required(PrimitiveType.PrimitiveTypeName.DOUBLE)
+ .named("cpu_usage"),
+ Types
+ .optional(PrimitiveType.PrimitiveTypeName.DOUBLE)
+ .named("gpu_usage"),
+ )
+ .named("resource_state")
+
+/**
+ * Parquet read schema for the "resource states" table in the trace.
+ */
+public val FRAGMENT_SCHEMA: MessageType = FRAGMENT_SCHEMA_v2
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/parquet/ResourceStateWriteSupport.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/FragmentWriteSupport.kt
index 58c43916..e6b7ba4f 100644
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/parquet/ResourceStateWriteSupport.kt
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/FragmentWriteSupport.kt
@@ -20,11 +20,10 @@
* SOFTWARE.
*/
-package org.opendc.trace.formats.opendc.parquet
+package org.opendc.trace.formats.workload.parquet
import org.apache.hadoop.conf.Configuration
import org.apache.parquet.hadoop.api.WriteSupport
-import org.apache.parquet.io.api.Binary
import org.apache.parquet.io.api.RecordConsumer
import org.apache.parquet.schema.LogicalTypeAnnotation
import org.apache.parquet.schema.MessageType
@@ -32,9 +31,9 @@ import org.apache.parquet.schema.PrimitiveType
import org.apache.parquet.schema.Types
/**
- * Support for writing [Resource] instances to Parquet format.
+ * Support for writing [Task] instances to Parquet format.
*/
-internal class ResourceStateWriteSupport : WriteSupport<ResourceState>() {
+internal class FragmentWriteSupport : WriteSupport<Fragment>() {
/**
* The current active record consumer.
*/
@@ -48,32 +47,24 @@ internal class ResourceStateWriteSupport : WriteSupport<ResourceState>() {
this.recordConsumer = recordConsumer
}
- override fun write(record: ResourceState) {
+ override fun write(record: Fragment) {
write(recordConsumer, record)
}
private fun write(
consumer: RecordConsumer,
- record: ResourceState,
+ record: Fragment,
) {
consumer.startMessage()
consumer.startField("id", 0)
- consumer.addBinary(Binary.fromCharSequence(record.id))
+ consumer.addInteger(record.id)
consumer.endField("id", 0)
- consumer.startField("timestamp", 1)
- consumer.addLong(record.timestamp.toEpochMilli())
- consumer.endField("timestamp", 1)
-
consumer.startField("duration", 2)
consumer.addLong(record.duration.toMillis())
consumer.endField("duration", 2)
- consumer.startField("cpu_count", 3)
- consumer.addInteger(record.cpuCount)
- consumer.endField("cpu_count", 3)
-
consumer.startField("cpu_usage", 4)
consumer.addDouble(record.cpuUsage)
consumer.endField("cpu_usage", 4)
@@ -101,9 +92,6 @@ internal class ResourceStateWriteSupport : WriteSupport<ResourceState>() {
.required(PrimitiveType.PrimitiveTypeName.INT64)
.named("duration"),
Types
- .required(PrimitiveType.PrimitiveTypeName.INT32)
- .named("cpu_count"),
- Types
.required(PrimitiveType.PrimitiveTypeName.DOUBLE)
.named("cpu_usage"),
)
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/parquet/Resource.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/Task.kt
index d727920a..f661d5a9 100644
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/parquet/Resource.kt
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/Task.kt
@@ -20,15 +20,16 @@
* SOFTWARE.
*/
-package org.opendc.trace.formats.opendc.parquet
+package org.opendc.trace.formats.workload.parquet
import java.time.Instant
/**
* A description of a resource in a trace.
*/
-internal data class Resource(
- val id: String,
+internal data class Task(
+ val id: Int,
+ val name: String,
val submissionTime: Instant,
val durationTime: Long,
val cpuCount: Int,
@@ -36,8 +37,8 @@ internal data class Resource(
val memCapacity: Double,
val gpuCount: Int = 0,
val gpuCapacity: Double = 0.0,
- val parents: Set<String> = emptySet(),
- val children: Set<String> = emptySet(),
+ val parents: Set<Int> = emptySet(),
+ val children: Set<Int> = emptySet(),
val nature: String? = null,
val deadline: Long = -1,
)
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/TaskReadSupport.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/TaskReadSupport.kt
new file mode 100644
index 00000000..4bbb18ac
--- /dev/null
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/TaskReadSupport.kt
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2022 AtLarge Research
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package org.opendc.trace.formats.workload.parquet
+
+import org.apache.hadoop.conf.Configuration
+import org.apache.parquet.hadoop.api.InitContext
+import org.apache.parquet.hadoop.api.ReadSupport
+import org.apache.parquet.io.api.RecordMaterializer
+import org.apache.parquet.schema.MessageType
+import org.apache.parquet.schema.Types
+import org.opendc.trace.TableColumn
+import org.opendc.trace.conv.TASK_CHILDREN
+import org.opendc.trace.conv.TASK_CPU_CAPACITY
+import org.opendc.trace.conv.TASK_CPU_COUNT
+import org.opendc.trace.conv.TASK_DEADLINE
+import org.opendc.trace.conv.TASK_DURATION
+import org.opendc.trace.conv.TASK_GPU_CAPACITY
+import org.opendc.trace.conv.TASK_GPU_COUNT
+import org.opendc.trace.conv.TASK_ID
+import org.opendc.trace.conv.TASK_MEM_CAPACITY
+import org.opendc.trace.conv.TASK_NAME
+import org.opendc.trace.conv.TASK_NATURE
+import org.opendc.trace.conv.TASK_PARENTS
+import org.opendc.trace.conv.TASK_SUBMISSION_TIME
+
+/**
+ * A [ReadSupport] instance for [Task] objects.
+ */
+internal class TaskReadSupport(private val projection: List<String>?) : ReadSupport<Task>() {
+ /**
+ * Mapping from field names to [TableColumn]s.
+ */
+ private val fieldMap =
+ mapOf(
+ "id" to TASK_ID,
+ "name" to TASK_NAME,
+ "submissionTime" to TASK_SUBMISSION_TIME,
+ "submission_time" to TASK_SUBMISSION_TIME,
+ "duration" to TASK_DURATION,
+ "maxCores" to TASK_CPU_COUNT,
+ "cpu_count" to TASK_CPU_COUNT,
+ "cpu_capacity" to TASK_CPU_CAPACITY,
+ "requiredMemory" to TASK_MEM_CAPACITY,
+ "mem_capacity" to TASK_MEM_CAPACITY,
+ "gpu_count" to TASK_GPU_COUNT,
+ "gpu_capacity" to TASK_GPU_CAPACITY,
+ "parents" to TASK_PARENTS,
+ "children" to TASK_CHILDREN,
+ "nature" to TASK_NATURE,
+ "deadline" to TASK_DEADLINE,
+ )
+
+ override fun init(context: InitContext): ReadContext {
+ val projectedSchema =
+ if (projection != null) {
+ Types.buildMessage()
+ .apply {
+ val projectionSet = projection.toSet()
+
+ for (field in TASK_SCHEMA.fields) {
+ val col = fieldMap[field.name] ?: continue
+ if (col in projectionSet) {
+ addField(field)
+ }
+ }
+ }
+ .named(TASK_SCHEMA.name)
+ } else {
+ TASK_SCHEMA
+ }
+
+ return ReadContext(projectedSchema)
+ }
+
+ override fun prepareForRead(
+ configuration: Configuration,
+ keyValueMetaData: Map<String, String>,
+ fileSchema: MessageType,
+ readContext: ReadContext,
+ ): RecordMaterializer<Task> = TaskRecordMaterializer(readContext.requestedSchema)
+}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/parquet/ResourceRecordMaterializer.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/TaskRecordMaterializer.kt
index f9493721..12dc54b7 100644
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/parquet/ResourceRecordMaterializer.kt
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/TaskRecordMaterializer.kt
@@ -20,7 +20,7 @@
* SOFTWARE.
*/
-package org.opendc.trace.formats.opendc.parquet
+package org.opendc.trace.formats.workload.parquet
import org.apache.parquet.io.api.Binary
import org.apache.parquet.io.api.Converter
@@ -31,13 +31,14 @@ import org.apache.parquet.schema.MessageType
import java.time.Instant
/**
- * A [RecordMaterializer] for [Resource] records.
+ * A [RecordMaterializer] for [Task] records.
*/
-internal class ResourceRecordMaterializer(schema: MessageType) : RecordMaterializer<Resource>() {
+internal class TaskRecordMaterializer(schema: MessageType) : RecordMaterializer<Task>() {
/**
* State of current record being read.
*/
- private var localId = ""
+ private var localId = -99
+ private var localName = ""
private var localSubmissionTime = Instant.MIN
private var localDuration = 0L
private var localCpuCount = 0
@@ -45,8 +46,8 @@ internal class ResourceRecordMaterializer(schema: MessageType) : RecordMateriali
private var localMemCapacity = 0.0
private var localGpuCount = 0
private var localGpuCapacity = 0.0
- private var localParents = mutableSetOf<String>()
- private var localChildren = mutableSetOf<String>()
+ private var localParents = mutableSetOf<Int>()
+ private var localChildren = mutableSetOf<Int>()
private var localNature: String? = null
private var localDeadline = -1L
@@ -63,8 +64,14 @@ internal class ResourceRecordMaterializer(schema: MessageType) : RecordMateriali
when (type.name) {
"id" ->
object : PrimitiveConverter() {
+ override fun addInt(value: Int) {
+ localId = value
+ }
+ }
+ "name" ->
+ object : PrimitiveConverter() {
override fun addBinary(value: Binary) {
- localId = value.toStringUsingUTF8()
+ localName = value.toStringUsingUTF8()
}
}
"submission_time", "submissionTime" ->
@@ -132,7 +139,8 @@ internal class ResourceRecordMaterializer(schema: MessageType) : RecordMateriali
}
override fun start() {
- localId = ""
+ localId = -99
+ localName = ""
localSubmissionTime = Instant.MIN
localDuration = 0L
localCpuCount = 0
@@ -151,9 +159,10 @@ internal class ResourceRecordMaterializer(schema: MessageType) : RecordMateriali
override fun getConverter(fieldIndex: Int): Converter = converters[fieldIndex]
}
- override fun getCurrentRecord(): Resource =
- Resource(
+ override fun getCurrentRecord(): Task =
+ Task(
localId,
+ localName,
localSubmissionTime,
localDuration,
localCpuCount,
@@ -172,12 +181,11 @@ internal class ResourceRecordMaterializer(schema: MessageType) : RecordMateriali
/**
* Helper class to convert parent and child relations and add them to [relations].
*/
- private class RelationConverter(private val relations: MutableSet<String>) : GroupConverter() {
+ private class RelationConverter(private val relations: MutableSet<Int>) : GroupConverter() {
private val entryConverter =
object : PrimitiveConverter() {
- override fun addBinary(value: Binary) {
- val str = value.toStringUsingUTF8()
- relations.add(str)
+ override fun addInt(value: Int) {
+ relations.add(value)
}
}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/TaskSchemas.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/TaskSchemas.kt
new file mode 100644
index 00000000..f7f5e953
--- /dev/null
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/TaskSchemas.kt
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2025 AtLarge Research
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in all
+ * copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+package org.opendc.trace.formats.workload.parquet
+
+import org.apache.parquet.schema.LogicalTypeAnnotation
+import org.apache.parquet.schema.MessageType
+import org.apache.parquet.schema.PrimitiveType
+import org.apache.parquet.schema.Type
+import org.apache.parquet.schema.Types
+
+private val TASK_SCHEMA_V1: MessageType =
+ Types.buildMessage()
+ .addFields(
+ Types
+ .required(PrimitiveType.PrimitiveTypeName.BINARY)
+ .`as`(LogicalTypeAnnotation.stringType())
+ .named("id"),
+ Types
+ .required(PrimitiveType.PrimitiveTypeName.INT64)
+ .`as`(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.MILLIS))
+ .named("submission_time"),
+ Types
+ .required(PrimitiveType.PrimitiveTypeName.INT64)
+ .named("duration"),
+ Types
+ .required(PrimitiveType.PrimitiveTypeName.INT32)
+ .named("cpu_count"),
+ Types
+ .required(PrimitiveType.PrimitiveTypeName.DOUBLE)
+ .named("cpu_capacity"),
+ Types
+ .required(PrimitiveType.PrimitiveTypeName.INT64)
+ .named("mem_capacity"),
+ Types
+ .optional(PrimitiveType.PrimitiveTypeName.INT32)
+ .named("gpu_count"),
+ Types
+ .optional(PrimitiveType.PrimitiveTypeName.DOUBLE)
+ .named("gpu_capacity"),
+ Types
+ .buildGroup(Type.Repetition.OPTIONAL)
+ .addField(
+ Types.repeatedGroup()
+ .addField(
+ Types.optional(PrimitiveType.PrimitiveTypeName.BINARY)
+ .`as`(LogicalTypeAnnotation.stringType())
+ .named("element"),
+ )
+ .named("list"),
+ )
+ .`as`(LogicalTypeAnnotation.listType())
+ .named("parents"),
+ Types
+ .buildGroup(Type.Repetition.OPTIONAL)
+ .addField(
+ Types.repeatedGroup()
+ .addField(
+ Types.optional(PrimitiveType.PrimitiveTypeName.BINARY)
+ .`as`(LogicalTypeAnnotation.stringType())
+ .named("element"),
+ )
+ .named("list"),
+ )
+ .`as`(LogicalTypeAnnotation.listType())
+ .named("children"),
+ Types
+ .optional(PrimitiveType.PrimitiveTypeName.BINARY)
+ .`as`(LogicalTypeAnnotation.stringType())
+ .named("nature"),
+ Types
+ .optional(PrimitiveType.PrimitiveTypeName.INT64)
+ .named("deadline"),
+ )
+ .named("resource")
+
+private val TASK_SCHEMA_V2: MessageType =
+ Types.buildMessage()
+ .addFields(
+ Types
+ .required(PrimitiveType.PrimitiveTypeName.INT32)
+ .named("id"),
+ Types
+ .optional(PrimitiveType.PrimitiveTypeName.BINARY)
+ .`as`(LogicalTypeAnnotation.stringType())
+ .named("name"),
+ Types
+ .required(PrimitiveType.PrimitiveTypeName.INT64)
+ .`as`(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.MILLIS))
+ .named("submission_time"),
+ Types
+ .required(PrimitiveType.PrimitiveTypeName.INT64)
+ .named("duration"),
+ Types
+ .required(PrimitiveType.PrimitiveTypeName.INT32)
+ .named("cpu_count"),
+ Types
+ .required(PrimitiveType.PrimitiveTypeName.DOUBLE)
+ .named("cpu_capacity"),
+ Types
+ .required(PrimitiveType.PrimitiveTypeName.INT64)
+ .named("mem_capacity"),
+ Types
+ .optional(PrimitiveType.PrimitiveTypeName.INT32)
+ .named("gpu_count"),
+ Types
+ .optional(PrimitiveType.PrimitiveTypeName.DOUBLE)
+ .named("gpu_capacity"),
+ Types
+ .buildGroup(Type.Repetition.OPTIONAL)
+ .addField(
+ Types.repeatedGroup()
+ .addField(
+ Types.optional(
+ PrimitiveType.PrimitiveTypeName.INT32,
+ )
+ .named("element"),
+ )
+ .named("list"),
+ )
+ .`as`(LogicalTypeAnnotation.listType())
+ .named("parents"),
+ Types
+ .buildGroup(Type.Repetition.OPTIONAL)
+ .addField(
+ Types.repeatedGroup()
+ .addField(
+ Types.optional(
+ PrimitiveType.PrimitiveTypeName.INT32,
+ )
+ .named("element"),
+ )
+ .named("list"),
+ )
+ .`as`(LogicalTypeAnnotation.listType())
+ .named("children"),
+ Types
+ .optional(PrimitiveType.PrimitiveTypeName.BINARY)
+ .`as`(LogicalTypeAnnotation.stringType())
+ .named("nature"),
+ Types
+ .optional(PrimitiveType.PrimitiveTypeName.INT64)
+ .named("deadline"),
+ )
+ .named("resource")
+
+public val TASK_SCHEMA: MessageType = TASK_SCHEMA_V2
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/parquet/ResourceWriteSupport.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/TaskWriteSupport.kt
index c3e984fb..a7ce62b8 100644
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/opendc/parquet/ResourceWriteSupport.kt
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/workload/parquet/TaskWriteSupport.kt
@@ -20,47 +20,43 @@
* SOFTWARE.
*/
-package org.opendc.trace.formats.opendc.parquet
+package org.opendc.trace.formats.workload.parquet
import org.apache.hadoop.conf.Configuration
import org.apache.parquet.hadoop.api.WriteSupport
import org.apache.parquet.io.api.Binary
import org.apache.parquet.io.api.RecordConsumer
-import org.apache.parquet.schema.LogicalTypeAnnotation
-import org.apache.parquet.schema.MessageType
-import org.apache.parquet.schema.PrimitiveType
-import org.apache.parquet.schema.Types
import kotlin.math.roundToLong
/**
- * Support for writing [Resource] instances to Parquet format.
+ * Support for writing [Task] instances to Parquet format.
*/
-internal class ResourceWriteSupport : WriteSupport<Resource>() {
+internal class TaskWriteSupport : WriteSupport<Task>() {
/**
* The current active record consumer.
*/
private lateinit var recordConsumer: RecordConsumer
override fun init(configuration: Configuration): WriteContext {
- return WriteContext(WRITE_SCHEMA, emptyMap())
+ return WriteContext(TASK_SCHEMA, emptyMap())
}
override fun prepareForWrite(recordConsumer: RecordConsumer) {
this.recordConsumer = recordConsumer
}
- override fun write(record: Resource) {
+ override fun write(record: Task) {
write(recordConsumer, record)
}
private fun write(
consumer: RecordConsumer,
- record: Resource,
+ record: Task,
) {
consumer.startMessage()
consumer.startField("id", 0)
- consumer.addBinary(Binary.fromCharSequence(record.id))
+ consumer.addInteger(record.id)
consumer.endField("id", 0)
consumer.startField("submission_time", 1)
@@ -97,43 +93,4 @@ internal class ResourceWriteSupport : WriteSupport<Resource>() {
consumer.endMessage()
}
-
- companion object {
- /**
- * Parquet schema for the "resources" table in the trace.
- */
- @JvmStatic
- val WRITE_SCHEMA: MessageType =
- Types.buildMessage()
- .addFields(
- Types
- .required(PrimitiveType.PrimitiveTypeName.BINARY)
- .`as`(LogicalTypeAnnotation.stringType())
- .named("id"),
- Types
- .required(PrimitiveType.PrimitiveTypeName.INT64)
- .`as`(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.MILLIS))
- .named("submission_time"),
- Types
- .required(PrimitiveType.PrimitiveTypeName.INT64)
- .named("duration"),
- Types
- .required(PrimitiveType.PrimitiveTypeName.INT32)
- .named("cpu_count"),
- Types
- .required(PrimitiveType.PrimitiveTypeName.DOUBLE)
- .named("cpu_capacity"),
- Types
- .required(PrimitiveType.PrimitiveTypeName.INT64)
- .named("mem_capacity"),
- Types
- .optional(PrimitiveType.PrimitiveTypeName.BINARY)
- .`as`(LogicalTypeAnnotation.stringType())
- .named("nature"),
- Types
- .optional(PrimitiveType.PrimitiveTypeName.INT64)
- .named("deadline"),
- )
- .named("resource")
- }
}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/wtf/WtfTaskTableReader.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/wtf/WtfTaskTableReader.kt
deleted file mode 100644
index 95582388..00000000
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/wtf/WtfTaskTableReader.kt
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- * Copyright (c) 2021 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.wtf
-
-import org.opendc.trace.TableColumnType
-import org.opendc.trace.TableReader
-import org.opendc.trace.conv.TASK_CHILDREN
-import org.opendc.trace.conv.TASK_GROUP_ID
-import org.opendc.trace.conv.TASK_ID
-import org.opendc.trace.conv.TASK_PARENTS
-import org.opendc.trace.conv.TASK_REQ_NCPUS
-import org.opendc.trace.conv.TASK_RUNTIME
-import org.opendc.trace.conv.TASK_SUBMIT_TIME
-import org.opendc.trace.conv.TASK_USER_ID
-import org.opendc.trace.conv.TASK_WAIT_TIME
-import org.opendc.trace.conv.TASK_WORKFLOW_ID
-import org.opendc.trace.util.convertTo
-import org.opendc.trace.util.parquet.LocalParquetReader
-import org.opendc.trace.wtf.parquet.Task
-import java.time.Duration
-import java.time.Instant
-import java.util.UUID
-
-/**
- * A [TableReader] implementation for the WTF format.
- */
-internal class WtfTaskTableReader(private val reader: LocalParquetReader<Task>) : TableReader {
- /**
- * The current record.
- */
- private var record: Task? = null
-
- override fun nextRow(): Boolean {
- try {
- val record = reader.read()
- this.record = record
-
- return record != null
- } catch (e: Throwable) {
- this.record = null
- throw e
- }
- }
-
- private val colID = 0
- private val colWorkflowID = 1
- private val colSubmitTime = 2
- private val colWaitTime = 3
- private val colRuntime = 4
- private val colReqNcpus = 5
- private val colParents = 6
- private val colChildren = 7
- private val colGroupID = 8
- private val colUserID = 9
-
- private val typeParents = TableColumnType.Set(TableColumnType.String)
- private val typeChildren = TableColumnType.Set(TableColumnType.String)
-
- override fun resolve(name: String): Int {
- return when (name) {
- TASK_ID -> colID
- TASK_WORKFLOW_ID -> colWorkflowID
- TASK_SUBMIT_TIME -> colSubmitTime
- TASK_WAIT_TIME -> colWaitTime
- TASK_RUNTIME -> colRuntime
- TASK_REQ_NCPUS -> colReqNcpus
- TASK_PARENTS -> colParents
- TASK_CHILDREN -> colChildren
- TASK_GROUP_ID -> colGroupID
- TASK_USER_ID -> colUserID
- else -> -1
- }
- }
-
- override fun isNull(index: Int): Boolean {
- require(index in colID..colUserID) { "Invalid column index" }
- return false
- }
-
- override fun getBoolean(index: Int): Boolean {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getInt(index: Int): Int {
- val record = checkNotNull(record) { "Reader in invalid state" }
-
- return when (index) {
- colReqNcpus -> record.requestedCpus
- colGroupID -> record.groupId
- colUserID -> record.userId
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getLong(index: Int): Long {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getFloat(index: Int): Float {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getDouble(index: Int): Double {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getString(index: Int): String {
- val record = checkNotNull(record) { "Reader in invalid state" }
- return when (index) {
- colID -> record.id
- colWorkflowID -> record.workflowId
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getUUID(index: Int): UUID? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun getInstant(index: Int): Instant {
- val record = checkNotNull(record) { "Reader in invalid state" }
- return when (index) {
- colSubmitTime -> record.submitTime
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun getDuration(index: Int): Duration {
- val record = checkNotNull(record) { "Reader in invalid state" }
- return when (index) {
- colWaitTime -> record.waitTime
- colRuntime -> record.runtime
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun <T> getList(
- index: Int,
- elementType: Class<T>,
- ): List<T>? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun <T> getSet(
- index: Int,
- elementType: Class<T>,
- ): Set<T>? {
- val record = checkNotNull(record) { "Reader in invalid state" }
- return when (index) {
- colParents -> typeParents.convertTo(record.parents, elementType)
- colChildren -> typeChildren.convertTo(record.children, elementType)
- else -> throw IllegalArgumentException("Invalid column")
- }
- }
-
- override fun <K, V> getMap(
- index: Int,
- keyType: Class<K>,
- valueType: Class<V>,
- ): Map<K, V>? {
- throw IllegalArgumentException("Invalid column")
- }
-
- override fun close() {
- reader.close()
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/wtf/WtfTraceFormat.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/wtf/WtfTraceFormat.kt
deleted file mode 100644
index 1386d2ef..00000000
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/wtf/WtfTraceFormat.kt
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (c) 2021 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.wtf
-
-import org.opendc.trace.TableColumn
-import org.opendc.trace.TableColumnType
-import org.opendc.trace.TableReader
-import org.opendc.trace.TableWriter
-import org.opendc.trace.conv.TABLE_TASKS
-import org.opendc.trace.conv.TASK_CHILDREN
-import org.opendc.trace.conv.TASK_GROUP_ID
-import org.opendc.trace.conv.TASK_ID
-import org.opendc.trace.conv.TASK_PARENTS
-import org.opendc.trace.conv.TASK_REQ_NCPUS
-import org.opendc.trace.conv.TASK_RUNTIME
-import org.opendc.trace.conv.TASK_SUBMIT_TIME
-import org.opendc.trace.conv.TASK_USER_ID
-import org.opendc.trace.conv.TASK_WAIT_TIME
-import org.opendc.trace.conv.TASK_WORKFLOW_ID
-import org.opendc.trace.spi.TableDetails
-import org.opendc.trace.spi.TraceFormat
-import org.opendc.trace.util.parquet.LocalParquetReader
-import org.opendc.trace.wtf.parquet.TaskReadSupport
-import java.nio.file.Path
-
-/**
- * A [TraceFormat] implementation for the Workflow Trace Format (WTF).
- */
-public class WtfTraceFormat : TraceFormat {
- override val name: String = "wtf"
-
- override fun create(path: Path) {
- throw UnsupportedOperationException("Writing not supported for this format")
- }
-
- override fun getTables(path: Path): List<String> = listOf(TABLE_TASKS)
-
- override fun getDetails(
- path: Path,
- table: String,
- ): TableDetails {
- return when (table) {
- TABLE_TASKS ->
- TableDetails(
- listOf(
- TableColumn(TASK_ID, TableColumnType.String),
- TableColumn(TASK_WORKFLOW_ID, TableColumnType.String),
- TableColumn(TASK_SUBMIT_TIME, TableColumnType.Instant),
- TableColumn(TASK_WAIT_TIME, TableColumnType.Duration),
- TableColumn(TASK_RUNTIME, TableColumnType.Duration),
- TableColumn(TASK_REQ_NCPUS, TableColumnType.Int),
- TableColumn(TASK_PARENTS, TableColumnType.Set(TableColumnType.String)),
- TableColumn(TASK_CHILDREN, TableColumnType.Set(TableColumnType.String)),
- TableColumn(TASK_GROUP_ID, TableColumnType.Int),
- TableColumn(TASK_USER_ID, TableColumnType.Int),
- ),
- )
- else -> throw IllegalArgumentException("Table $table not supported")
- }
- }
-
- override fun newReader(
- path: Path,
- table: String,
- projection: List<String>?,
- ): TableReader {
- return when (table) {
- TABLE_TASKS -> {
- val reader = LocalParquetReader(path.resolve("tasks/schema-1.0"), TaskReadSupport(projection), strictTyping = false)
- WtfTaskTableReader(reader)
- }
- else -> throw IllegalArgumentException("Table $table not supported")
- }
- }
-
- override fun newWriter(
- path: Path,
- table: String,
- ): TableWriter {
- throw UnsupportedOperationException("Writing not supported for this format")
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/wtf/parquet/Task.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/wtf/parquet/Task.kt
deleted file mode 100644
index a1db0cab..00000000
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/wtf/parquet/Task.kt
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2022 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.wtf.parquet
-
-import java.time.Duration
-import java.time.Instant
-
-/**
- * A task in the Workflow Trace Format.
- */
-internal data class Task(
- val id: String,
- val workflowId: String,
- val submitTime: Instant,
- val waitTime: Duration,
- val runtime: Duration,
- val requestedCpus: Int,
- val groupId: Int,
- val userId: Int,
- val parents: Set<String>,
- val children: Set<String>,
-)
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/wtf/parquet/TaskReadSupport.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/wtf/parquet/TaskReadSupport.kt
deleted file mode 100644
index 1f9c506d..00000000
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/wtf/parquet/TaskReadSupport.kt
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (c) 2022 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.wtf.parquet
-
-import org.apache.hadoop.conf.Configuration
-import org.apache.parquet.hadoop.api.InitContext
-import org.apache.parquet.hadoop.api.ReadSupport
-import org.apache.parquet.io.api.RecordMaterializer
-import org.apache.parquet.schema.LogicalTypeAnnotation
-import org.apache.parquet.schema.MessageType
-import org.apache.parquet.schema.PrimitiveType
-import org.apache.parquet.schema.Type
-import org.apache.parquet.schema.Types
-import org.opendc.trace.conv.TASK_CHILDREN
-import org.opendc.trace.conv.TASK_GROUP_ID
-import org.opendc.trace.conv.TASK_ID
-import org.opendc.trace.conv.TASK_PARENTS
-import org.opendc.trace.conv.TASK_REQ_NCPUS
-import org.opendc.trace.conv.TASK_RUNTIME
-import org.opendc.trace.conv.TASK_SUBMIT_TIME
-import org.opendc.trace.conv.TASK_USER_ID
-import org.opendc.trace.conv.TASK_WAIT_TIME
-import org.opendc.trace.conv.TASK_WORKFLOW_ID
-
-/**
- * A [ReadSupport] instance for [Task] objects.
- *
- * @param projection The projection of the table to read.
- */
-internal class TaskReadSupport(private val projection: List<String>?) : ReadSupport<Task>() {
- /**
- * Mapping of table columns to their Parquet column names.
- */
- private val colMap =
- mapOf(
- TASK_ID to "id",
- TASK_WORKFLOW_ID to "workflow_id",
- TASK_SUBMIT_TIME to "ts_submit",
- TASK_WAIT_TIME to "wait_time",
- TASK_RUNTIME to "runtime",
- TASK_REQ_NCPUS to "resource_amount_requested",
- TASK_PARENTS to "parents",
- TASK_CHILDREN to "children",
- TASK_GROUP_ID to "group_id",
- TASK_USER_ID to "user_id",
- )
-
- override fun init(context: InitContext): ReadContext {
- val projectedSchema =
- if (projection != null) {
- Types.buildMessage()
- .apply {
- val fieldByName = READ_SCHEMA.fields.associateBy { it.name }
-
- for (col in projection) {
- val fieldName = colMap[col] ?: continue
- addField(fieldByName.getValue(fieldName))
- }
- }
- .named(READ_SCHEMA.name)
- } else {
- READ_SCHEMA
- }
- return ReadContext(projectedSchema)
- }
-
- override fun prepareForRead(
- configuration: Configuration,
- keyValueMetaData: Map<String, String>,
- fileSchema: MessageType,
- readContext: ReadContext,
- ): RecordMaterializer<Task> = TaskRecordMaterializer(readContext.requestedSchema)
-
- companion object {
- /**
- * Parquet read schema for the "tasks" table in the trace.
- */
- @JvmStatic
- val READ_SCHEMA: MessageType =
- Types.buildMessage()
- .addFields(
- Types
- .optional(PrimitiveType.PrimitiveTypeName.INT64)
- .named("id"),
- Types
- .optional(PrimitiveType.PrimitiveTypeName.INT64)
- .named("workflow_id"),
- Types
- .optional(PrimitiveType.PrimitiveTypeName.INT64)
- .`as`(LogicalTypeAnnotation.timestampType(true, LogicalTypeAnnotation.TimeUnit.MILLIS))
- .named("ts_submit"),
- Types
- .optional(PrimitiveType.PrimitiveTypeName.INT64)
- .named("wait_time"),
- Types
- .optional(PrimitiveType.PrimitiveTypeName.INT64)
- .named("runtime"),
- Types
- .optional(PrimitiveType.PrimitiveTypeName.DOUBLE)
- .named("resource_amount_requested"),
- Types
- .optional(PrimitiveType.PrimitiveTypeName.INT32)
- .named("user_id"),
- Types
- .optional(PrimitiveType.PrimitiveTypeName.INT32)
- .named("group_id"),
- Types
- .buildGroup(Type.Repetition.OPTIONAL)
- .addField(
- Types.repeatedGroup()
- .addField(Types.optional(PrimitiveType.PrimitiveTypeName.INT64).named("item"))
- .named("list"),
- )
- .`as`(LogicalTypeAnnotation.listType())
- .named("children"),
- Types
- .buildGroup(Type.Repetition.OPTIONAL)
- .addField(
- Types.repeatedGroup()
- .addField(Types.optional(PrimitiveType.PrimitiveTypeName.INT64).named("item"))
- .named("list"),
- )
- .`as`(LogicalTypeAnnotation.listType())
- .named("parents"),
- )
- .named("task")
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/wtf/parquet/TaskRecordMaterializer.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/wtf/parquet/TaskRecordMaterializer.kt
deleted file mode 100644
index 412a4f8b..00000000
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/formats/wtf/parquet/TaskRecordMaterializer.kt
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * Copyright (c) 2022 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.wtf.parquet
-
-import org.apache.parquet.io.api.Converter
-import org.apache.parquet.io.api.GroupConverter
-import org.apache.parquet.io.api.PrimitiveConverter
-import org.apache.parquet.io.api.RecordMaterializer
-import org.apache.parquet.schema.MessageType
-import java.time.Duration
-import java.time.Instant
-import kotlin.math.roundToInt
-import kotlin.math.roundToLong
-
-/**
- * A [RecordMaterializer] for [Task] records.
- */
-internal class TaskRecordMaterializer(schema: MessageType) : RecordMaterializer<Task>() {
- /**
- * State of current record being read.
- */
- private var localID = ""
- private var localWorkflowID = ""
- private var localSubmitTime = Instant.MIN
- private var localWaitTime = Duration.ZERO
- private var localRuntime = Duration.ZERO
- private var localRequestedCpus = 0
- private var localGroupId = 0
- private var localUserId = 0
- private var localParents = mutableSetOf<String>()
- private var localChildren = mutableSetOf<String>()
-
- /**
- * Root converter for the record.
- */
- private val root =
- object : GroupConverter() {
- /**
- * The converters for the columns of the schema.
- */
- private val converters =
- schema.fields.map { type ->
- when (type.name) {
- "id" ->
- object : PrimitiveConverter() {
- override fun addLong(value: Long) {
- localID = value.toString()
- }
- }
- "workflow_id" ->
- object : PrimitiveConverter() {
- override fun addLong(value: Long) {
- localWorkflowID = value.toString()
- }
- }
- "ts_submit" ->
- object : PrimitiveConverter() {
- override fun addLong(value: Long) {
- localSubmitTime = Instant.ofEpochMilli(value)
- }
- }
- "wait_time" ->
- object : PrimitiveConverter() {
- override fun addLong(value: Long) {
- localWaitTime = Duration.ofMillis(value)
- }
- }
- "runtime" ->
- object : PrimitiveConverter() {
- override fun addLong(value: Long) {
- localRuntime = Duration.ofMillis(value)
- }
- }
- "resource_amount_requested" ->
- object : PrimitiveConverter() {
- override fun addDouble(value: Double) {
- localRequestedCpus = value.roundToInt()
- }
- }
- "group_id" ->
- object : PrimitiveConverter() {
- override fun addInt(value: Int) {
- localGroupId = value
- }
- }
- "user_id" ->
- object : PrimitiveConverter() {
- override fun addInt(value: Int) {
- localUserId = value
- }
- }
- "children" -> RelationConverter(localChildren)
- "parents" -> RelationConverter(localParents)
- else -> error("Unknown column $type")
- }
- }
-
- override fun start() {
- localID = ""
- localWorkflowID = ""
- localSubmitTime = Instant.MIN
- localWaitTime = Duration.ZERO
- localRuntime = Duration.ZERO
- localRequestedCpus = 0
- localGroupId = 0
- localUserId = 0
- localParents.clear()
- localChildren.clear()
- }
-
- override fun end() {}
-
- override fun getConverter(fieldIndex: Int): Converter = converters[fieldIndex]
- }
-
- override fun getCurrentRecord(): Task =
- Task(
- localID,
- localWorkflowID,
- localSubmitTime,
- localWaitTime,
- localRuntime,
- localRequestedCpus,
- localGroupId,
- localUserId,
- localParents.toSet(),
- localChildren.toSet(),
- )
-
- override fun getRootConverter(): GroupConverter = root
-
- /**
- * Helper class to convert parent and child relations and add them to [relations].
- */
- private class RelationConverter(private val relations: MutableSet<String>) : GroupConverter() {
- private val entryConverter =
- object : PrimitiveConverter() {
- override fun addLong(value: Long) {
- relations.add(value.toString())
- }
-
- override fun addDouble(value: Double) {
- relations.add(value.roundToLong().toString())
- }
- }
-
- private val listConverter =
- object : GroupConverter() {
- override fun getConverter(fieldIndex: Int): Converter {
- require(fieldIndex == 0)
- return entryConverter
- }
-
- override fun start() {}
-
- override fun end() {}
- }
-
- override fun getConverter(fieldIndex: Int): Converter {
- require(fieldIndex == 0)
- return listConverter
- }
-
- override fun start() {}
-
- override fun end() {}
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/spi/TraceFormat.kt b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/spi/TraceFormat.kt
index e586f90a..945d8f2f 100644
--- a/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/spi/TraceFormat.kt
+++ b/opendc-trace/opendc-trace-api/src/main/kotlin/org/opendc/trace/spi/TraceFormat.kt
@@ -24,15 +24,9 @@ package org.opendc.trace.spi
import org.opendc.trace.TableReader
import org.opendc.trace.TableWriter
-import org.opendc.trace.azure.AzureTraceFormat
-import org.opendc.trace.bitbrains.BitbrainsTraceFormat
import org.opendc.trace.formats.carbon.CarbonTraceFormat
import org.opendc.trace.formats.failure.FailureTraceFormat
-import org.opendc.trace.formats.opendc.OdcVmTraceFormat
-import org.opendc.trace.gwf.GwfTraceFormat
-import org.opendc.trace.swf.SwfTraceFormat
-import org.opendc.trace.wfformat.WfFormatTraceFormat
-import org.opendc.trace.wtf.WtfTraceFormat
+import org.opendc.trace.formats.workload.WorkloadTraceFormat
import java.nio.file.Path
import java.util.ServiceLoader
@@ -122,15 +116,9 @@ public interface TraceFormat {
@JvmStatic
public fun byName(name: String): TraceFormat? {
return when (name) {
- "azure" -> AzureTraceFormat()
- "bitbrains" -> BitbrainsTraceFormat()
"carbon" -> CarbonTraceFormat()
"failure" -> FailureTraceFormat()
- "gwf" -> GwfTraceFormat()
- "opendc-vm" -> OdcVmTraceFormat()
- "swf" -> SwfTraceFormat()
- "wfformat" -> WfFormatTraceFormat()
- "wtf" -> WtfTraceFormat()
+ "workload" -> WorkloadTraceFormat()
else -> null
}
}
diff --git a/opendc-trace/opendc-trace-api/src/test/kotlin/formats/azure/AzureTraceFormatTest.kt b/opendc-trace/opendc-trace-api/src/test/kotlin/formats/azure/AzureTraceFormatTest.kt
deleted file mode 100644
index 40df36c6..00000000
--- a/opendc-trace/opendc-trace-api/src/test/kotlin/formats/azure/AzureTraceFormatTest.kt
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright (c) 2021 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package formats.azure
-
-import formats.wtf.TableReaderTestKit
-import org.junit.jupiter.api.Assertions.assertAll
-import org.junit.jupiter.api.Assertions.assertDoesNotThrow
-import org.junit.jupiter.api.Assertions.assertEquals
-import org.junit.jupiter.api.Assertions.assertTrue
-import org.junit.jupiter.api.BeforeEach
-import org.junit.jupiter.api.DisplayName
-import org.junit.jupiter.api.Nested
-import org.junit.jupiter.api.Test
-import org.junit.jupiter.api.assertThrows
-import org.opendc.trace.TableColumn
-import org.opendc.trace.TableReader
-import org.opendc.trace.azure.AzureTraceFormat
-import org.opendc.trace.conv.TABLE_RESOURCES
-import org.opendc.trace.conv.TABLE_RESOURCE_STATES
-import org.opendc.trace.conv.resourceCpuCount
-import org.opendc.trace.conv.resourceID
-import org.opendc.trace.conv.resourceMemCapacity
-import org.opendc.trace.conv.resourceStateCpuUsagePct
-import org.opendc.trace.conv.resourceStateTimestamp
-import java.nio.file.Paths
-
-/**
- * Test suite for the [AzureTraceFormat] class.
- */
-@DisplayName("Azure VM TraceFormat")
-class AzureTraceFormatTest {
- private val format = AzureTraceFormat()
-
- @Test
- fun testTables() {
- val path = Paths.get("src/test/resources/azure/trace")
-
- assertEquals(listOf(TABLE_RESOURCES, TABLE_RESOURCE_STATES), format.getTables(path))
- }
-
- @Test
- fun testTableExists() {
- val path = Paths.get("src/test/resources/azure/trace")
-
- assertDoesNotThrow { format.getDetails(path, TABLE_RESOURCE_STATES) }
- }
-
- @Test
- fun testTableDoesNotExist() {
- val path = Paths.get("src/test/resources/azure/trace")
- assertThrows<IllegalArgumentException> { format.getDetails(path, "test") }
- }
-
- @Test
- fun testResources() {
- val path = Paths.get("src/test/resources/azure/trace")
- val reader = format.newReader(path, TABLE_RESOURCES, null)
- assertAll(
- { assertTrue(reader.nextRow()) },
- { assertEquals("x/XsOfHO4ocsV99i4NluqKDuxctW2MMVmwqOPAlg4wp8mqbBOe3wxBlQo0+Qx+uf", reader.getString(resourceID)) },
- { assertEquals(1, reader.getInt(resourceCpuCount)) },
- { assertEquals(1750000.0, reader.getDouble(resourceMemCapacity)) },
- )
-
- reader.close()
- }
-
- @Test
- fun testSmoke() {
- val path = Paths.get("src/test/resources/azure/trace")
- val reader = format.newReader(path, TABLE_RESOURCE_STATES, null)
-
- assertAll(
- { assertTrue(reader.nextRow()) },
- { assertEquals("+ZcrOp5/c/fJ6mVgP5qMZlOAGDwyjaaDNM0WoWOt2IDb47gT0UwK9lFwkPQv3C7Q", reader.getString(resourceID)) },
- { assertEquals(0, reader.getInstant(resourceStateTimestamp)?.epochSecond) },
- { assertEquals(0.0286979, reader.getDouble(resourceStateCpuUsagePct), 0.01) },
- )
-
- reader.close()
- }
-
- @DisplayName("TableReader for Resources")
- @Nested
- inner class ResourcesTableReaderTest : TableReaderTestKit() {
- override lateinit var reader: TableReader
- override lateinit var columns: List<TableColumn>
-
- @BeforeEach
- fun setUp() {
- val path = Paths.get("src/test/resources/azure/trace")
-
- columns = format.getDetails(path, TABLE_RESOURCES).columns
- reader = format.newReader(path, TABLE_RESOURCES, null)
- }
- }
-
- @DisplayName("TableReader for Resource States")
- @Nested
- inner class ResourceStatesTableReaderTest : TableReaderTestKit() {
- override lateinit var reader: TableReader
- override lateinit var columns: List<TableColumn>
-
- @BeforeEach
- fun setUp() {
- val path = Paths.get("src/test/resources/azure/trace")
-
- columns = format.getDetails(path, TABLE_RESOURCE_STATES).columns
- reader = format.newReader(path, TABLE_RESOURCE_STATES, null)
- }
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/test/kotlin/formats/bitbrains/BitbrainsExTraceFormatTest.kt b/opendc-trace/opendc-trace-api/src/test/kotlin/formats/bitbrains/BitbrainsExTraceFormatTest.kt
deleted file mode 100644
index 0b604c18..00000000
--- a/opendc-trace/opendc-trace-api/src/test/kotlin/formats/bitbrains/BitbrainsExTraceFormatTest.kt
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright (c) 2021 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package formats.bitbrains
-
-import formats.wtf.TableReaderTestKit
-import org.junit.jupiter.api.Assertions.assertAll
-import org.junit.jupiter.api.Assertions.assertDoesNotThrow
-import org.junit.jupiter.api.Assertions.assertEquals
-import org.junit.jupiter.api.Assertions.assertTrue
-import org.junit.jupiter.api.BeforeEach
-import org.junit.jupiter.api.DisplayName
-import org.junit.jupiter.api.Nested
-import org.junit.jupiter.api.Test
-import org.junit.jupiter.api.assertThrows
-import org.opendc.trace.TableColumn
-import org.opendc.trace.TableReader
-import org.opendc.trace.bitbrains.BitbrainsExTraceFormat
-import org.opendc.trace.conv.TABLE_RESOURCE_STATES
-import org.opendc.trace.conv.resourceStateCpuUsage
-import org.opendc.trace.conv.resourceStateTimestamp
-import java.nio.file.Paths
-
-/**
- * Test suite for the [BitbrainsExTraceFormat] class.
- */
-internal class BitbrainsExTraceFormatTest {
- private val format = BitbrainsExTraceFormat()
-
- @Test
- fun testTables() {
- val path = Paths.get("src/test/resources/bitbrains/vm.txt")
-
- assertEquals(listOf(TABLE_RESOURCE_STATES), format.getTables(path))
- }
-
- @Test
- fun testTableExists() {
- val path = Paths.get("src/test/resources/bitbrains/vm.txt")
-
- assertDoesNotThrow { format.getDetails(path, TABLE_RESOURCE_STATES) }
- }
-
- @Test
- fun testTableDoesNotExist() {
- val path = Paths.get("src/test/resources/bitbrains/vm.txt")
- assertThrows<IllegalArgumentException> { format.getDetails(path, "test") }
- }
-
- @Test
- fun testSmoke() {
- val path = Paths.get("src/test/resources/bitbrains/vm.txt")
- val reader = format.newReader(path, TABLE_RESOURCE_STATES, null)
-
- assertAll(
- { assertTrue(reader.nextRow()) },
- { assertEquals(1631911500, reader.getInstant(resourceStateTimestamp)?.epochSecond) },
- { assertEquals(21.2, reader.getDouble(resourceStateCpuUsage), 0.01) },
- )
-
- reader.close()
- }
-
- @DisplayName("TableReader for Resource States")
- @Nested
- inner class ResourceStatesTableReaderTest : TableReaderTestKit() {
- override lateinit var reader: TableReader
- override lateinit var columns: List<TableColumn>
-
- @BeforeEach
- fun setUp() {
- val path = Paths.get("src/test/resources/bitbrains/vm.txt")
-
- columns = format.getDetails(path, TABLE_RESOURCE_STATES).columns
- reader = format.newReader(path, TABLE_RESOURCE_STATES, null)
- }
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/test/kotlin/formats/bitbrains/BitbrainsTraceFormatTest.kt b/opendc-trace/opendc-trace-api/src/test/kotlin/formats/bitbrains/BitbrainsTraceFormatTest.kt
deleted file mode 100644
index d8ffb335..00000000
--- a/opendc-trace/opendc-trace-api/src/test/kotlin/formats/bitbrains/BitbrainsTraceFormatTest.kt
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Copyright (c) 2021 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package formats.bitbrains
-
-import formats.wtf.TableReaderTestKit
-import org.junit.jupiter.api.Assertions.assertAll
-import org.junit.jupiter.api.Assertions.assertDoesNotThrow
-import org.junit.jupiter.api.Assertions.assertEquals
-import org.junit.jupiter.api.Assertions.assertFalse
-import org.junit.jupiter.api.Assertions.assertTrue
-import org.junit.jupiter.api.BeforeEach
-import org.junit.jupiter.api.DisplayName
-import org.junit.jupiter.api.Nested
-import org.junit.jupiter.api.Test
-import org.junit.jupiter.api.assertThrows
-import org.opendc.trace.TableColumn
-import org.opendc.trace.TableReader
-import org.opendc.trace.bitbrains.BitbrainsTraceFormat
-import org.opendc.trace.conv.TABLE_RESOURCES
-import org.opendc.trace.conv.TABLE_RESOURCE_STATES
-import org.opendc.trace.conv.resourceID
-import org.opendc.trace.conv.resourceStateCpuUsage
-import org.opendc.trace.conv.resourceStateTimestamp
-import java.nio.file.Paths
-
-/**
- * Test suite for the [BitbrainsTraceFormat] class.
- */
-class BitbrainsTraceFormatTest {
- private val format = BitbrainsTraceFormat()
-
- @Test
- fun testTables() {
- val path = Paths.get("src/test/resources/bitbrains/bitbrains.csv")
-
- assertEquals(listOf(TABLE_RESOURCES, TABLE_RESOURCE_STATES), format.getTables(path))
- }
-
- @Test
- fun testTableExists() {
- val path = Paths.get("src/test/resources/bitbrains/bitbrains.csv")
-
- assertDoesNotThrow { format.getDetails(path, TABLE_RESOURCE_STATES) }
- }
-
- @Test
- fun testTableDoesNotExist() {
- val path = Paths.get("src/test/resources/bitbrains/bitbrains.csv")
- assertThrows<IllegalArgumentException> { format.getDetails(path, "test") }
- }
-
- @Test
- fun testResources() {
- val path = Paths.get("src/test/resources/bitbrains/bitbrains.csv")
- val reader = format.newReader(path, TABLE_RESOURCES, null)
-
- assertAll(
- { assertTrue(reader.nextRow()) },
- { assertEquals("bitbrains", reader.getString(resourceID)) },
- { assertFalse(reader.nextRow()) },
- )
-
- reader.close()
- }
-
- @Test
- fun testSmoke() {
- val path = Paths.get("src/test/resources/bitbrains/bitbrains.csv")
- val reader = format.newReader(path, TABLE_RESOURCE_STATES, null)
-
- assertAll(
- { assertTrue(reader.nextRow()) },
- { assertEquals(1376314846, reader.getInstant(resourceStateTimestamp)?.epochSecond) },
- { assertEquals(19.066, reader.getDouble(resourceStateCpuUsage), 0.01) },
- )
-
- reader.close()
- }
-
- @DisplayName("TableReader for Resources")
- @Nested
- inner class ResourcesTableReaderTest : TableReaderTestKit() {
- override lateinit var reader: TableReader
- override lateinit var columns: List<TableColumn>
-
- @BeforeEach
- fun setUp() {
- val path = Paths.get("src/test/resources/bitbrains/bitbrains.csv")
-
- columns = format.getDetails(path, TABLE_RESOURCES).columns
- reader = format.newReader(path, TABLE_RESOURCES, null)
- }
- }
-
- @DisplayName("TableReader for Resource States")
- @Nested
- inner class ResourceStatesTableReaderTest : TableReaderTestKit() {
- override lateinit var reader: TableReader
- override lateinit var columns: List<TableColumn>
-
- @BeforeEach
- fun setUp() {
- val path = Paths.get("src/test/resources/bitbrains/bitbrains.csv")
-
- columns = format.getDetails(path, TABLE_RESOURCE_STATES).columns
- reader = format.newReader(path, TABLE_RESOURCE_STATES, null)
- }
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/test/kotlin/formats/gwf/GwfTraceFormatTest.kt b/opendc-trace/opendc-trace-api/src/test/kotlin/formats/gwf/GwfTraceFormatTest.kt
deleted file mode 100644
index cf098556..00000000
--- a/opendc-trace/opendc-trace-api/src/test/kotlin/formats/gwf/GwfTraceFormatTest.kt
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright (c) 2021 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package formats.gwf
-
-import formats.wtf.TableReaderTestKit
-import org.junit.jupiter.api.Assertions.assertAll
-import org.junit.jupiter.api.Assertions.assertDoesNotThrow
-import org.junit.jupiter.api.Assertions.assertEquals
-import org.junit.jupiter.api.Assertions.assertTrue
-import org.junit.jupiter.api.BeforeEach
-import org.junit.jupiter.api.DisplayName
-import org.junit.jupiter.api.Nested
-import org.junit.jupiter.api.Test
-import org.junit.jupiter.api.assertThrows
-import org.opendc.trace.TableColumn
-import org.opendc.trace.TableReader
-import org.opendc.trace.conv.TABLE_TASKS
-import org.opendc.trace.conv.TASK_ID
-import org.opendc.trace.conv.TASK_PARENTS
-import org.opendc.trace.conv.TASK_RUNTIME
-import org.opendc.trace.conv.TASK_SUBMIT_TIME
-import org.opendc.trace.conv.TASK_WORKFLOW_ID
-import org.opendc.trace.gwf.GwfTraceFormat
-import java.nio.file.Paths
-import java.time.Duration
-import java.time.Instant
-
-/**
- * Test suite for the [GwfTraceFormat] class.
- */
-@DisplayName("GWF TraceFormat")
-internal class GwfTraceFormatTest {
- private val format = GwfTraceFormat()
-
- @Test
- fun testTables() {
- val path = Paths.get("src/test/resources/gwf/trace.gwf")
-
- assertEquals(listOf(TABLE_TASKS), format.getTables(path))
- }
-
- @Test
- fun testTableExists() {
- val path = Paths.get("src/test/resources/gwf/trace.gwf")
- assertDoesNotThrow { format.getDetails(path, TABLE_TASKS) }
- }
-
- @Test
- fun testTableDoesNotExist() {
- val path = Paths.get("src/test/resources/gwf/trace.gwf")
-
- assertThrows<IllegalArgumentException> { format.getDetails(path, "test") }
- }
-
- @Test
- fun testTableReader() {
- val path = Paths.get("src/test/resources/gwf/trace.gwf")
- val reader = format.newReader(path, TABLE_TASKS, null)
-
- assertAll(
- { assertTrue(reader.nextRow()) },
- { assertEquals("0", reader.getString(TASK_WORKFLOW_ID)) },
- { assertEquals("1", reader.getString(TASK_ID)) },
- { assertEquals(Instant.ofEpochSecond(16), reader.getInstant(TASK_SUBMIT_TIME)) },
- { assertEquals(Duration.ofSeconds(11), reader.getDuration(TASK_RUNTIME)) },
- { assertEquals(emptySet<String>(), reader.getSet(TASK_PARENTS, String::class.java)) },
- )
- }
-
- @Test
- fun testReadingRowWithDependencies() {
- val path = Paths.get("src/test/resources/gwf/trace.gwf")
- val reader = format.newReader(path, TABLE_TASKS, null)
-
- // Move to row 7
- for (x in 1..6)
- reader.nextRow()
-
- assertAll(
- { assertTrue(reader.nextRow()) },
- { assertEquals("0", reader.getString(TASK_WORKFLOW_ID)) },
- { assertEquals("7", reader.getString(TASK_ID)) },
- { assertEquals(Instant.ofEpochSecond(87), reader.getInstant(TASK_SUBMIT_TIME)) },
- { assertEquals(Duration.ofSeconds(11), reader.getDuration(TASK_RUNTIME)) },
- { assertEquals(setOf("4", "5", "6"), reader.getSet(TASK_PARENTS, String::class.java)) },
- )
- }
-
- @DisplayName("TableReader for Tasks")
- @Nested
- inner class TasksTableReaderTest : TableReaderTestKit() {
- override lateinit var reader: TableReader
- override lateinit var columns: List<TableColumn>
-
- @BeforeEach
- fun setUp() {
- val path = Paths.get("src/test/resources/gwf/trace.gwf")
-
- columns = format.getDetails(path, TABLE_TASKS).columns
- reader = format.newReader(path, TABLE_TASKS, null)
- }
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/test/kotlin/formats/opendc/OdcVmTraceFormatTest.kt b/opendc-trace/opendc-trace-api/src/test/kotlin/formats/opendc/OdcVmTraceFormatTest.kt
deleted file mode 100644
index f801a6ac..00000000
--- a/opendc-trace/opendc-trace-api/src/test/kotlin/formats/opendc/OdcVmTraceFormatTest.kt
+++ /dev/null
@@ -1,341 +0,0 @@
-/*
- * Copyright (c) 2021 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package formats.opendc
-
-import formats.wtf.TableReaderTestKit
-import formats.wtf.TableWriterTestKit
-import org.junit.jupiter.api.Assertions.assertFalse
-import org.junit.jupiter.api.BeforeEach
-import org.junit.jupiter.api.DisplayName
-import org.junit.jupiter.api.Nested
-import org.junit.jupiter.api.Test
-import org.junit.jupiter.params.ParameterizedTest
-import org.junit.jupiter.params.provider.ValueSource
-import org.opendc.trace.TableColumn
-import org.opendc.trace.TableReader
-import org.opendc.trace.TableWriter
-import org.opendc.trace.conv.INTERFERENCE_GROUP_MEMBERS
-import org.opendc.trace.conv.INTERFERENCE_GROUP_SCORE
-import org.opendc.trace.conv.INTERFERENCE_GROUP_TARGET
-import org.opendc.trace.conv.TABLE_INTERFERENCE_GROUPS
-import org.opendc.trace.conv.TABLE_RESOURCES
-import org.opendc.trace.conv.TABLE_RESOURCE_STATES
-import org.opendc.trace.conv.resourceCpuCapacity
-import org.opendc.trace.conv.resourceCpuCount
-import org.opendc.trace.conv.resourceDuration
-import org.opendc.trace.conv.resourceID
-import org.opendc.trace.conv.resourceMemCapacity
-import org.opendc.trace.conv.resourceStateCpuUsage
-import org.opendc.trace.conv.resourceStateTimestamp
-import org.opendc.trace.conv.resourceSubmissionTime
-import org.opendc.trace.formats.opendc.OdcVmTraceFormat
-import java.nio.file.Files
-import java.nio.file.Paths
-import java.time.Instant
-
-/**
- * Test suite for the [OdcVmTraceFormat] implementation.
- */
-@DisplayName("OdcVmTraceFormat")
-internal class OdcVmTraceFormatTest {
- private val format = OdcVmTraceFormat()
-
- @Test
- fun testTables() {
- val path = Paths.get("src/test/resources/opendc/trace-v2.2")
-
-// assertEquals(listOf(TABLE_RESOURCES, TABLE_RESOURCE_STATES, TABLE_INTERFERENCE_GROUPS), format.getTables(path))
- }
-
- @Test
- fun testTableExists() {
- val path = Paths.get("src/test/resources/opendc/trace-v2.2")
-
-// assertDoesNotThrow { format.getDetails(path, TABLE_RESOURCE_STATES) }
- }
-
- @Test
- fun testTableDoesNotExist() {
- val path = Paths.get("src/test/resources/opendc/trace-v2.2")
-// assertThrows<IllegalArgumentException> { format.getDetails(path, "test") }
- }
-
- @ParameterizedTest
- @ValueSource(strings = ["trace-v2.0"])
- fun testResources(name: String) {
- val path = Paths.get("src/test/resources/opendc/$name")
- val reader = format.newReader(path, TABLE_RESOURCES, listOf(resourceID, resourceSubmissionTime))
-
-// assertAll(
-// { assertTrue(reader.nextRow()) },
-// { assertEquals("1019", reader.getString(resourceID)) },
-// { assertTrue(reader.nextRow()) },
-// { assertEquals("1023", reader.getString(resourceID)) },
-// { assertTrue(reader.nextRow()) },
-// { assertEquals("1052", reader.getString(resourceID)) },
-// { assertTrue(reader.nextRow()) },
-// { assertEquals("1073", reader.getString(resourceID)) },
-// { assertFalse(reader.nextRow()) },
-// )
-
- reader.close()
- }
-
-// @Test
- fun testResourcesWrite() {
- val path = Files.createTempDirectory("opendc")
- val writer = format.newWriter(path, TABLE_RESOURCES)
-
- writer.startRow()
- writer.setString(resourceID, "1019")
- writer.setInstant(resourceSubmissionTime, Instant.EPOCH)
- writer.setInstant(resourceDuration, Instant.EPOCH)
- writer.setInt(resourceCpuCount, 1)
- writer.setDouble(resourceCpuCapacity, 1024.0)
- writer.setDouble(resourceMemCapacity, 1024.0)
- writer.endRow()
- writer.close()
-
- val reader = format.newReader(path, TABLE_RESOURCES, null)
-
-// assertAll(
-// { assertTrue(reader.nextRow()) },
-// { assertEquals("1019", reader.getString(resourceID)) },
-// { assertEquals(Instant.EPOCH, reader.getInstant(resourceSubmissionTime)) },
-// { assertEquals(Instant.EPOCH, reader.getInstant(resourceDuration)) },
-// { assertEquals(1, reader.getInt(resourceCpuCount)) },
-// { assertEquals(1024.0, reader.getDouble(resourceCpuCapacity)) },
-// { assertEquals(1024.0, reader.getDouble(resourceMemCapacity)) },
-// { assertFalse(reader.nextRow()) },
-// )
-
- reader.close()
- }
-
- @ParameterizedTest
- @ValueSource(strings = ["trace-v2.0", "trace-v2.1"])
- fun testSmoke(name: String) {
- val path = Paths.get("src/test/resources/opendc/$name")
- val reader =
- format.newReader(
- path,
- TABLE_RESOURCE_STATES,
- listOf(resourceID, resourceStateTimestamp, resourceStateCpuUsage),
- )
-//
-// assertAll(
-// { assertTrue(reader.nextRow()) },
-// { assertEquals("1019", reader.getString(resourceID)) },
-// { assertEquals(0.0, reader.getDouble(resourceStateCpuUsage), 0.01) },
-// )
-
- reader.close()
- }
-
- @Test
- fun testResourceStatesWrite() {
- val path = Files.createTempDirectory("opendc")
- val writer = format.newWriter(path, TABLE_RESOURCE_STATES)
-
- writer.startRow()
- writer.setString(resourceID, "1019")
- writer.setInstant(resourceStateTimestamp, Instant.EPOCH)
- writer.setDouble(resourceStateCpuUsage, 23.0)
- writer.setInt(resourceCpuCount, 1)
- writer.endRow()
- writer.close()
-
- val reader = format.newReader(path, TABLE_RESOURCE_STATES, null)
-
-// assertAll(
-// { assertTrue(reader.nextRow()) },
-// { assertEquals("1019", reader.getString(resourceID)) },
-// { assertEquals(Instant.EPOCH, reader.getInstant(resourceStateTimestamp)) },
-// { assertEquals(1, reader.getInt(resourceCpuCount)) },
-// { assertEquals(23.0, reader.getDouble(resourceStateCpuUsage)) },
-// { assertFalse(reader.nextRow()) },
-// )
-
- reader.close()
- }
-
-// @Test
- fun testInterferenceGroups() {
- val path = Paths.get("src/test/resources/opendc/trace-v2.1")
- val reader =
- format.newReader(
- path,
- TABLE_INTERFERENCE_GROUPS,
- listOf(INTERFERENCE_GROUP_MEMBERS, INTERFERENCE_GROUP_TARGET, INTERFERENCE_GROUP_SCORE),
- )
-
-// assertAll(
-// { assertTrue(reader.nextRow()) },
-// { assertEquals(setOf("1019", "1023", "1052"), reader.getSet(INTERFERENCE_GROUP_MEMBERS, String::class.java)) },
-// { assertEquals(0.0, reader.getDouble(INTERFERENCE_GROUP_TARGET)) },
-// { assertEquals(0.8830158730158756, reader.getDouble(INTERFERENCE_GROUP_SCORE)) },
-// { assertTrue(reader.nextRow()) },
-// { assertEquals(setOf("1023", "1052", "1073"), reader.getSet(INTERFERENCE_GROUP_MEMBERS, String::class.java)) },
-// { assertEquals(0.0, reader.getDouble(INTERFERENCE_GROUP_TARGET)) },
-// { assertEquals(0.7133055555552751, reader.getDouble(INTERFERENCE_GROUP_SCORE)) },
-// { assertFalse(reader.nextRow()) },
-// )
-
- reader.close()
- }
-
- @Test
- fun testInterferenceGroupsEmpty() {
- val path = Paths.get("src/test/resources/opendc/trace-v2.0")
- val reader = format.newReader(path, TABLE_INTERFERENCE_GROUPS, listOf(INTERFERENCE_GROUP_MEMBERS))
-
- assertFalse(reader.nextRow())
- reader.close()
- }
-
- @Test
- fun testInterferenceGroupsWrite() {
- val path = Files.createTempDirectory("opendc")
- val writer = format.newWriter(path, TABLE_INTERFERENCE_GROUPS)
-
- writer.startRow()
- writer.setSet(INTERFERENCE_GROUP_MEMBERS, setOf("a", "b", "c"))
- writer.setDouble(INTERFERENCE_GROUP_TARGET, 0.5)
- writer.setDouble(INTERFERENCE_GROUP_SCORE, 0.8)
- writer.endRow()
- writer.flush()
-
- writer.startRow()
- writer.setSet(INTERFERENCE_GROUP_MEMBERS, setOf("a", "b", "d"))
- writer.setDouble(INTERFERENCE_GROUP_TARGET, 0.5)
- writer.setDouble(INTERFERENCE_GROUP_SCORE, 0.9)
- writer.endRow()
- writer.close()
-
- val reader = format.newReader(path, TABLE_INTERFERENCE_GROUPS, null)
-
-// assertAll(
-// { assertTrue(reader.nextRow()) },
-// { assertEquals(setOf("a", "b", "c"), reader.getSet(INTERFERENCE_GROUP_MEMBERS, String::class.java)) },
-// { assertEquals(0.5, reader.getDouble(INTERFERENCE_GROUP_TARGET)) },
-// { assertEquals(0.8, reader.getDouble(INTERFERENCE_GROUP_SCORE)) },
-// { assertTrue(reader.nextRow()) },
-// { assertEquals(setOf("a", "b", "d"), reader.getSet(INTERFERENCE_GROUP_MEMBERS, String::class.java)) },
-// { assertEquals(0.5, reader.getDouble(INTERFERENCE_GROUP_TARGET)) },
-// { assertEquals(0.9, reader.getDouble(INTERFERENCE_GROUP_SCORE)) },
-// { assertFalse(reader.nextRow()) },
-// )
-
- reader.close()
- }
-
- @DisplayName("TableReader for Resources")
- @Nested
- inner class ResourcesTableReaderTest : TableReaderTestKit() {
- override lateinit var reader: TableReader
- override lateinit var columns: List<TableColumn>
-
- @BeforeEach
- fun setUp() {
- val path = Paths.get("src/test/resources/opendc/trace-v2.1")
-
- columns = format.getDetails(path, TABLE_RESOURCES).columns
- reader = format.newReader(path, TABLE_RESOURCES, null)
- }
- }
-
- @DisplayName("TableWriter for Resources")
- @Nested
- inner class ResourcesTableWriterTest : TableWriterTestKit() {
- override lateinit var writer: TableWriter
- override lateinit var columns: List<TableColumn>
-
- @BeforeEach
- fun setUp() {
- val path = Files.createTempDirectory("opendc")
-
- columns = format.getDetails(Paths.get("src/test/resources/opendc/trace-v2.1"), TABLE_RESOURCES).columns
- writer = format.newWriter(path, TABLE_RESOURCES)
- }
- }
-
- @DisplayName("TableReader for Resource States")
- @Nested
- inner class ResourceStatesTableReaderTest : TableReaderTestKit() {
- override lateinit var reader: TableReader
- override lateinit var columns: List<TableColumn>
-
- @BeforeEach
- fun setUp() {
- val path = Paths.get("src/test/resources/opendc/trace-v2.1")
-
- columns = format.getDetails(path, TABLE_RESOURCE_STATES).columns
- reader = format.newReader(path, TABLE_RESOURCE_STATES, null)
- }
- }
-
- @DisplayName("TableWriter for Resource States")
- @Nested
- inner class ResourceStatesTableWriterTest : TableWriterTestKit() {
- override lateinit var writer: TableWriter
- override lateinit var columns: List<TableColumn>
-
- @BeforeEach
- fun setUp() {
- val path = Files.createTempDirectory("opendc")
-
- columns = format.getDetails(Paths.get("src/test/resources/opendc/trace-v2.1"), TABLE_RESOURCE_STATES).columns
- writer = format.newWriter(path, TABLE_RESOURCE_STATES)
- }
- }
-
- @DisplayName("TableReader for Interference Groups")
- @Nested
- inner class InterferenceGroupsTableReaderTest : TableReaderTestKit() {
- override lateinit var reader: TableReader
- override lateinit var columns: List<TableColumn>
-
- @BeforeEach
- fun setUp() {
- val path = Paths.get("src/test/resources/opendc/trace-v2.1")
-
- columns = format.getDetails(path, TABLE_INTERFERENCE_GROUPS).columns
- reader = format.newReader(path, TABLE_INTERFERENCE_GROUPS, null)
- }
- }
-
- @DisplayName("TableWriter for Interference Groups")
- @Nested
- inner class InterferenceGroupsTableWriterTest : TableWriterTestKit() {
- override lateinit var writer: TableWriter
- override lateinit var columns: List<TableColumn>
-
- @BeforeEach
- fun setUp() {
- val path = Files.createTempDirectory("opendc")
-
- columns = format.getDetails(Paths.get("src/test/resources/opendc/trace-v2.1"), TABLE_INTERFERENCE_GROUPS).columns
- writer = format.newWriter(path, TABLE_INTERFERENCE_GROUPS)
- }
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/test/kotlin/formats/swf/SwfTraceFormatTest.kt b/opendc-trace/opendc-trace-api/src/test/kotlin/formats/swf/SwfTraceFormatTest.kt
deleted file mode 100644
index c4c4e24a..00000000
--- a/opendc-trace/opendc-trace-api/src/test/kotlin/formats/swf/SwfTraceFormatTest.kt
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Copyright (c) 2021 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package formats.swf
-
-import formats.wtf.TableReaderTestKit
-import org.junit.jupiter.api.Assertions.assertAll
-import org.junit.jupiter.api.Assertions.assertDoesNotThrow
-import org.junit.jupiter.api.Assertions.assertEquals
-import org.junit.jupiter.api.Assertions.assertTrue
-import org.junit.jupiter.api.BeforeEach
-import org.junit.jupiter.api.DisplayName
-import org.junit.jupiter.api.Nested
-import org.junit.jupiter.api.Test
-import org.junit.jupiter.api.assertThrows
-import org.opendc.trace.TableColumn
-import org.opendc.trace.TableReader
-import org.opendc.trace.conv.TABLE_TASKS
-import org.opendc.trace.conv.TASK_ALLOC_NCPUS
-import org.opendc.trace.conv.TASK_ID
-import org.opendc.trace.swf.SwfTraceFormat
-import java.nio.file.Paths
-
-/**
- * Test suite for the [SwfTraceFormat] class.
- */
-@DisplayName("SWF TraceFormat")
-internal class SwfTraceFormatTest {
- private val format = SwfTraceFormat()
-
- @Test
- fun testTables() {
- val path = Paths.get("src/test/resources/swf/trace.swf")
-
- assertEquals(listOf(TABLE_TASKS), format.getTables(path))
- }
-
- @Test
- fun testTableExists() {
- val path = Paths.get("src/test/resources/swf/trace.swf")
- assertDoesNotThrow { format.getDetails(path, TABLE_TASKS) }
- }
-
- @Test
- fun testTableDoesNotExist() {
- val path = Paths.get("src/test/resources/swf/trace.swf")
-
- assertThrows<IllegalArgumentException> { format.getDetails(path, "test") }
- }
-
- @Test
- fun testReader() {
- val path = Paths.get("src/test/resources/swf/trace.swf")
- val reader = format.newReader(path, TABLE_TASKS, null)
-
- assertAll(
- { assertTrue(reader.nextRow()) },
- { assertEquals("1", reader.getString(TASK_ID)) },
- { assertEquals(306, reader.getInt(TASK_ALLOC_NCPUS)) },
- { assertTrue(reader.nextRow()) },
- { assertEquals("2", reader.getString(TASK_ID)) },
- { assertEquals(17, reader.getInt(TASK_ALLOC_NCPUS)) },
- )
-
- reader.close()
- }
-
- @DisplayName("TableReader for Tasks")
- @Nested
- inner class TasksTableReaderTest : TableReaderTestKit() {
- override lateinit var reader: TableReader
- override lateinit var columns: List<TableColumn>
-
- @BeforeEach
- fun setUp() {
- val path = Paths.get("src/test/resources/swf/trace.swf")
-
- columns = format.getDetails(path, TABLE_TASKS).columns
- reader = format.newReader(path, TABLE_TASKS, null)
- }
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/test/kotlin/formats/wfformat/WfFormatTaskTableReaderTest.kt b/opendc-trace/opendc-trace-api/src/test/kotlin/formats/wfformat/WfFormatTaskTableReaderTest.kt
deleted file mode 100644
index 1701e566..00000000
--- a/opendc-trace/opendc-trace-api/src/test/kotlin/formats/wfformat/WfFormatTaskTableReaderTest.kt
+++ /dev/null
@@ -1,361 +0,0 @@
-/*
- * Copyright (c) 2021 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package formats.wfformat
-
-import com.fasterxml.jackson.core.JsonFactory
-import com.fasterxml.jackson.core.JsonParseException
-import org.junit.jupiter.api.Assertions.assertEquals
-import org.junit.jupiter.api.Assertions.assertFalse
-import org.junit.jupiter.api.Assertions.assertTrue
-import org.junit.jupiter.api.Test
-import org.junit.jupiter.api.assertDoesNotThrow
-import org.junit.jupiter.api.assertThrows
-import org.opendc.trace.conv.TASK_ID
-import org.opendc.trace.conv.TASK_PARENTS
-import org.opendc.trace.wfformat.WfFormatTaskTableReader
-
-/**
- * Test suite for the [WfFormatTaskTableReader] class.
- */
-internal class WfFormatTaskTableReaderTest {
- /**
- * The [JsonFactory] used to construct the parser.
- */
- private val factory = JsonFactory()
-
- @Test
- fun testEmptyInput() {
- val content = ""
- val parser = factory.createParser(content)
- val reader = WfFormatTaskTableReader(parser)
-
- assertFalse(reader.nextRow())
- reader.close()
- }
-
- @Test
- fun testTopLevelArrayInput() {
- val content = "[]"
- val parser = factory.createParser(content)
- val reader = WfFormatTaskTableReader(parser)
-
- assertThrows<JsonParseException> {
- while (reader.nextRow()) {
- continue
- }
- }
-
- reader.close()
- }
-
- @Test
- fun testNoWorkflow() {
- val content =
- """
- {
- "name": "eager-nextflow-chameleon"
- }
- """.trimIndent()
- val parser = factory.createParser(content)
- val reader = WfFormatTaskTableReader(parser)
-
- assertDoesNotThrow {
- while (reader.nextRow()) {
- continue
- }
- }
-
- reader.close()
- }
-
- @Test
- fun testWorkflowArrayType() {
- val content =
- """
- {
- "name": "eager-nextflow-chameleon",
- "workflow": []
- }
- """.trimIndent()
- val parser = factory.createParser(content)
- val reader = WfFormatTaskTableReader(parser)
-
- assertThrows<JsonParseException> {
- while (reader.nextRow()) {
- continue
- }
- }
-
- reader.close()
- }
-
- @Test
- fun testWorkflowNullType() {
- val content =
- """
- {
- "name": "eager-nextflow-chameleon",
- "workflow": null
- }
- """.trimIndent()
- val parser = factory.createParser(content)
- val reader = WfFormatTaskTableReader(parser)
-
- assertThrows<JsonParseException> {
- while (reader.nextRow()) {
- continue
- }
- }
-
- reader.close()
- }
-
- @Test
- fun testNoJobs() {
- val content =
- """
- {
- "name": "eager-nextflow-chameleon",
- "workflow": {
-
- }
- }
- """.trimIndent()
- val parser = factory.createParser(content)
- val reader = WfFormatTaskTableReader(parser)
-
- assertDoesNotThrow { reader.nextRow() }
-
- reader.close()
- }
-
- @Test
- fun testJobsObjectType() {
- val content =
- """
- {
- "name": "eager-nextflow-chameleon",
- "workflow": { "jobs": {} }
- }
- """.trimIndent()
- val parser = factory.createParser(content)
- val reader = WfFormatTaskTableReader(parser)
-
- assertThrows<JsonParseException> { reader.nextRow() }
-
- reader.close()
- }
-
- @Test
- fun testJobsNullType() {
- val content =
- """
- {
- "name": "eager-nextflow-chameleon",
- "workflow": { "jobs": null }
- }
- """.trimIndent()
- val parser = factory.createParser(content)
- val reader = WfFormatTaskTableReader(parser)
-
- assertThrows<JsonParseException> { reader.nextRow() }
-
- reader.close()
- }
-
- @Test
- fun testJobsInvalidChildType() {
- val content =
- """
- {
- "name": "eager-nextflow-chameleon",
- "workflow": {
- "jobs": [1]
- }
- }
- """.trimIndent()
- val parser = factory.createParser(content)
- val reader = WfFormatTaskTableReader(parser)
-
- assertThrows<JsonParseException> { reader.nextRow() }
-
- reader.close()
- }
-
- @Test
- fun testJobsValidChildType() {
- val content =
- """
- {
- "name": "eager-nextflow-chameleon",
- "workflow": {
- "jobs": [
- {
- "name": "test"
- }
- ]
- }
- }
- """.trimIndent()
- val parser = factory.createParser(content)
- val reader = WfFormatTaskTableReader(parser)
-
- assertTrue(reader.nextRow())
- assertEquals("test", reader.getString(TASK_ID))
- assertFalse(reader.nextRow())
-
- reader.close()
- }
-
- @Test
- fun testJobsInvalidParents() {
- val content =
- """
- {
- "name": "eager-nextflow-chameleon",
- "workflow": {
- "jobs": [
- {
- "name": "test",
- "parents": 1,
- }
- ]
- }
- }
- """.trimIndent()
- val parser = factory.createParser(content)
- val reader = WfFormatTaskTableReader(parser)
-
- assertThrows<JsonParseException> { reader.nextRow() }
-
- reader.close()
- }
-
- @Test
- fun testJobsInvalidParentsItem() {
- val content =
- """
- {
- "name": "eager-nextflow-chameleon",
- "workflow": {
- "jobs": [
- {
- "name": "test",
- "parents": [1],
- }
- ]
- }
- }
- """.trimIndent()
- val parser = factory.createParser(content)
- val reader = WfFormatTaskTableReader(parser)
-
- assertThrows<JsonParseException> { reader.nextRow() }
-
- reader.close()
- }
-
- @Test
- fun testJobsValidParents() {
- val content =
- """
- {
- "name": "eager-nextflow-chameleon",
- "workflow": {
- "jobs": [
- {
- "name": "test",
- "parents": ["1"]
- }
- ]
- }
- }
- """.trimIndent()
- val parser = factory.createParser(content)
- val reader = WfFormatTaskTableReader(parser)
-
- assertTrue(reader.nextRow())
- assertEquals(setOf("1"), reader.getSet(TASK_PARENTS, String::class.java))
- assertFalse(reader.nextRow())
-
- reader.close()
- }
-
- @Test
- fun testJobsInvalidSecondEntry() {
- val content =
- """
- {
- "workflow": {
- "jobs": [
- {
- "name": "test",
- "parents": ["1"]
- },
- "test"
- ]
- }
- }
- """.trimIndent()
- val parser = factory.createParser(content)
- val reader = WfFormatTaskTableReader(parser)
-
- assertDoesNotThrow { reader.nextRow() }
- assertThrows<JsonParseException> { reader.nextRow() }
-
- reader.close()
- }
-
- @Test
- fun testDuplicateJobsArray() {
- val content =
- """
- {
- "name": "eager-nextflow-chameleon",
- "workflow": {
- "jobs": [
- {
- "name": "test",
- "parents": ["1"]
- }
- ],
- "jobs": [
- {
- "name": "test2",
- "parents": ["test"]
- }
- ]
- }
- }
- """.trimIndent()
- val parser = factory.createParser(content)
- val reader = WfFormatTaskTableReader(parser)
-
- assertTrue(reader.nextRow())
- assertTrue(reader.nextRow())
- assertEquals("test2", reader.getString(TASK_ID))
- assertFalse(reader.nextRow())
-
- reader.close()
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/test/kotlin/formats/wfformat/WfFormatTraceFormatTest.kt b/opendc-trace/opendc-trace-api/src/test/kotlin/formats/wfformat/WfFormatTraceFormatTest.kt
deleted file mode 100644
index 94ed30d7..00000000
--- a/opendc-trace/opendc-trace-api/src/test/kotlin/formats/wfformat/WfFormatTraceFormatTest.kt
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Copyright (c) 2021 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package formats.wfformat
-
-import org.junit.jupiter.api.Assertions.assertAll
-import org.junit.jupiter.api.Assertions.assertEquals
-import org.junit.jupiter.api.Assertions.assertTrue
-import org.junit.jupiter.api.BeforeEach
-import org.junit.jupiter.api.DisplayName
-import org.junit.jupiter.api.Nested
-import org.junit.jupiter.api.Test
-import org.junit.jupiter.api.assertDoesNotThrow
-import org.junit.jupiter.api.assertThrows
-import org.opendc.trace.TableColumn
-import org.opendc.trace.TableReader
-import org.opendc.trace.conv.TABLE_TASKS
-import org.opendc.trace.conv.TASK_ID
-import org.opendc.trace.conv.TASK_PARENTS
-import org.opendc.trace.conv.TASK_RUNTIME
-import org.opendc.trace.conv.TASK_WORKFLOW_ID
-import org.opendc.trace.testkit.TableReaderTestKit
-import org.opendc.trace.wfformat.WfFormatTraceFormat
-import java.nio.file.Paths
-
-/**
- * Test suite for the [WfFormatTraceFormat] class.
- */
-@DisplayName("WfFormat TraceFormat")
-class WfFormatTraceFormatTest {
- private val format = WfFormatTraceFormat()
-
- @Test
- fun testTables() {
- val path = Paths.get("src/test/resources/wfformat/trace.json")
-
- assertEquals(listOf(TABLE_TASKS), format.getTables(path))
- }
-
- @Test
- fun testTableExists() {
- val path = Paths.get("src/test/resources/wfformat/trace.json")
- assertDoesNotThrow { format.getDetails(path, TABLE_TASKS) }
- }
-
- @Test
- fun testTableDoesNotExist() {
- val path = Paths.get("src/test/resources/wfformat/trace.json")
-
- assertThrows<IllegalArgumentException> { format.getDetails(path, "test") }
- }
-
- /**
- * Smoke test for parsing WfCommons traces.
- */
- @Test
- fun testTableReader() {
- val path = Paths.get("src/test/resources/wfformat/trace.json")
- val reader = format.newReader(path, TABLE_TASKS, null)
-
- assertAll(
- { assertTrue(reader.nextRow()) },
- { assertEquals("makebwaindex_mammoth_mt_krause.fasta", reader.getString(TASK_ID)) },
- { assertEquals("eager-nextflow-chameleon", reader.getString(TASK_WORKFLOW_ID)) },
- { assertEquals(172000, reader.getDuration(TASK_RUNTIME)?.toMillis()) },
- { assertEquals(emptySet<String>(), reader.getSet(TASK_PARENTS, String::class.java)) },
- )
-
- assertAll(
- { assertTrue(reader.nextRow()) },
- { assertEquals("makeseqdict_mammoth_mt_krause.fasta", reader.getString(TASK_ID)) },
- { assertEquals("eager-nextflow-chameleon", reader.getString(TASK_WORKFLOW_ID)) },
- { assertEquals(175000, reader.getDuration(TASK_RUNTIME)?.toMillis()) },
- { assertEquals(setOf("makebwaindex_mammoth_mt_krause.fasta"), reader.getSet(TASK_PARENTS, String::class.java)) },
- )
-
- reader.close()
- }
-
- /**
- * Test full iteration of the table.
- */
- @Test
- fun testTableReaderFull() {
- val path = Paths.get("src/test/resources/wfformat/trace.json")
- val reader = format.newReader(path, TABLE_TASKS, null)
-
- assertDoesNotThrow {
- while (reader.nextRow()) {
- // reader.get(TASK_ID)
- }
- reader.close()
- }
- }
-
- @DisplayName("TableReader for Tasks")
- @Nested
- inner class TasksTableReaderTest : TableReaderTestKit() {
- override lateinit var reader: TableReader
- override lateinit var columns: List<TableColumn>
-
- @BeforeEach
- fun setUp() {
- val path = Paths.get("src/test/resources/wfformat/trace.json")
-
- columns = format.getDetails(path, TABLE_TASKS).columns
- reader = format.newReader(path, TABLE_TASKS, null)
- }
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/test/kotlin/formats/wtf/TableReaderTestKit.kt b/opendc-trace/opendc-trace-api/src/test/kotlin/formats/wtf/TableReaderTestKit.kt
deleted file mode 100644
index cb6db43f..00000000
--- a/opendc-trace/opendc-trace-api/src/test/kotlin/formats/wtf/TableReaderTestKit.kt
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * Copyright (c) 2022 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package formats.wtf
-
-import org.junit.jupiter.api.AfterEach
-import org.junit.jupiter.api.Assertions.assertEquals
-import org.junit.jupiter.api.Assertions.assertFalse
-import org.junit.jupiter.api.Assertions.assertNotEquals
-import org.junit.jupiter.api.Assumptions.assumeTrue
-import org.junit.jupiter.api.Test
-import org.junit.jupiter.api.assertAll
-import org.junit.jupiter.api.assertDoesNotThrow
-import org.junit.jupiter.api.assertThrows
-import org.opendc.trace.TableColumn
-import org.opendc.trace.TableColumnType
-import org.opendc.trace.TableReader
-
-/**
- * A test suite for implementations of the [TableReader] interface.
- */
-public abstract class TableReaderTestKit {
- /**
- * The [TableReader] instance to test.
- */
- public abstract val reader: TableReader
-
- /**
- * The columns of the table.
- */
- public abstract val columns: List<TableColumn>
-
- @AfterEach
- public fun tearDown() {
- reader.close()
- }
-
- /**
- * Test that we can resolve the columns of a table successfully.
- */
- @Test
- public fun testResolve() {
- assertAll(columns.map { column -> { assertNotEquals(-1, reader.resolve(column.name)) } })
- }
-
- /**
- * Test that resolving an empty column name fails
- */
- @Test
- public fun testResolveEmpty() {
- assertEquals(-1, reader.resolve(""))
- }
-
- /**
- * Test that reading non-existent columns fails.
- */
- @Test
- public fun testReadNonExistentColumns() {
- assumeTrue(reader.nextRow())
- assertAll(
- { assertThrows<IllegalArgumentException> { reader.isNull(-1) } },
- { assertThrows<IllegalArgumentException> { reader.getBoolean(-1) } },
- { assertThrows<IllegalArgumentException> { reader.getInt(-1) } },
- { assertThrows<IllegalArgumentException> { reader.getLong(-1) } },
- { assertThrows<IllegalArgumentException> { reader.getFloat(-1) } },
- { assertThrows<IllegalArgumentException> { reader.getDouble(-1) } },
- { assertThrows<IllegalArgumentException> { reader.getString(-1) } },
- { assertThrows<IllegalArgumentException> { reader.getUUID(-1) } },
- { assertThrows<IllegalArgumentException> { reader.getInstant(-1) } },
- { assertThrows<IllegalArgumentException> { reader.getDuration(-1) } },
- { assertThrows<IllegalArgumentException> { reader.getList(-1, Any::class.java) } },
- { assertThrows<IllegalArgumentException> { reader.getSet(-1, Any::class.java) } },
- { assertThrows<IllegalArgumentException> { reader.getMap(-1, Any::class.java, Any::class.java) } },
- )
- }
-
- /**
- * Test that ensures [TableReader.isNull] reports the correct value.
- */
- @Test
- public fun testVerifyNullColumns() {
- while (reader.nextRow()) {
- assertAll(
- columns.map { column ->
- {
- when (column.type) {
- is TableColumnType.Boolean -> assertFalse(reader.isNull(column.name) && !reader.getBoolean(column.name))
- is TableColumnType.Int -> assertFalse(reader.isNull(column.name) && reader.getInt(column.name) != 0)
- is TableColumnType.Long -> assertFalse(reader.isNull(column.name) && reader.getLong(column.name) != 0L)
- is TableColumnType.Float -> assertFalse(reader.isNull(column.name) && reader.getFloat(column.name) != 0f)
- is TableColumnType.Double -> assertFalse(reader.isNull(column.name) && reader.getDouble(column.name) != 0.0)
- is TableColumnType.String -> assertFalse(reader.isNull(column.name) && reader.getString(column.name) != null)
- is TableColumnType.UUID -> assertFalse(reader.isNull(column.name) && reader.getUUID(column.name) != null)
- is TableColumnType.Instant -> assertFalse(reader.isNull(column.name) && reader.getInstant(column.name) != null)
- is TableColumnType.Duration ->
- assertFalse(
- reader.isNull(column.name) && reader.getDuration(column.name) != null,
- )
- is TableColumnType.List ->
- assertFalse(
- reader.isNull(column.name) && reader.getList(column.name, Any::class.java) != null,
- )
- is TableColumnType.Set ->
- assertFalse(
- reader.isNull(column.name) && reader.getSet(column.name, Any::class.java) != null,
- )
- is TableColumnType.Map ->
- assertFalse(
- reader.isNull(column.name) && reader.getMap(column.name, Any::class.java, Any::class.java) != null,
- )
- }
- }
- },
- )
- }
- }
-
- /**
- * Test that we can read the entire table without any issue.
- */
- @Test
- public fun testReadFully() {
- assertDoesNotThrow {
- while (reader.nextRow()) {
- assertAll(columns.map { column -> { assertDoesNotThrow { reader.get(column) } } })
- }
- reader.close()
- }
-
- assertFalse(reader.nextRow()) { "Reader does not reset" }
- }
-
- /**
- * Test that the reader throws an exception when the columns are read without a call to [TableReader.nextRow]
- */
- @Test
- public fun testReadWithoutNextRow() {
- assertAll(columns.map { column -> { assertThrows<IllegalStateException> { reader.get(column) } } })
- }
-
- /**
- * Test that the reader throws an exception when the columns are read after the [TableReader] is finished.
- */
- @Test
- public fun testReadAfterFinish() {
- @Suppress("ControlFlowWithEmptyBody")
- while (reader.nextRow()) {}
-
- testReadWithoutNextRow()
- }
-
- /**
- * Helper method to map a [TableColumn] to a read.
- */
- private fun TableReader.get(column: TableColumn): Any? {
- return when (column.type) {
- is TableColumnType.Boolean -> getBoolean(column.name)
- is TableColumnType.Int -> getInt(column.name)
- is TableColumnType.Long -> getLong(column.name)
- is TableColumnType.Float -> getFloat(column.name)
- is TableColumnType.Double -> getDouble(column.name)
- is TableColumnType.String -> getString(column.name)
- is TableColumnType.UUID -> getUUID(column.name)
- is TableColumnType.Instant -> getInstant(column.name)
- is TableColumnType.Duration -> getDuration(column.name)
- is TableColumnType.List -> getList(column.name, Any::class.java)
- is TableColumnType.Set -> getSet(column.name, Any::class.java)
- is TableColumnType.Map -> getMap(column.name, Any::class.java, Any::class.java)
- }
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/test/kotlin/formats/wtf/TableWriterTestKit.kt b/opendc-trace/opendc-trace-api/src/test/kotlin/formats/wtf/TableWriterTestKit.kt
deleted file mode 100644
index a7d6879c..00000000
--- a/opendc-trace/opendc-trace-api/src/test/kotlin/formats/wtf/TableWriterTestKit.kt
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Copyright (c) 2022 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package formats.wtf
-
-import org.junit.jupiter.api.AfterEach
-import org.junit.jupiter.api.Assertions.assertEquals
-import org.junit.jupiter.api.Assertions.assertNotEquals
-import org.junit.jupiter.api.Test
-import org.junit.jupiter.api.assertAll
-import org.junit.jupiter.api.assertThrows
-import org.opendc.trace.TableColumn
-import org.opendc.trace.TableColumnType
-import org.opendc.trace.TableWriter
-import java.time.Duration
-import java.time.Instant
-import java.util.UUID
-
-/**
- * A test suite for implementations of the [TableWriter] interface.
- */
-public abstract class TableWriterTestKit {
- /**
- * The [TableWriter] instance to test.
- */
- public abstract val writer: TableWriter
-
- /**
- * The columns of the table.
- */
- public abstract val columns: List<TableColumn>
-
- @AfterEach
- public fun tearDown() {
- writer.close()
- }
-
- /**
- * Test that we can resolve the columns of a table successfully.
- */
- @Test
- public fun testResolve() {
- assertAll(columns.map { column -> { assertNotEquals(-1, writer.resolve(column.name)) } })
- }
-
- /**
- * Test that resolving an empty column name fails
- */
- @Test
- public fun testResolveEmpty() {
- assertEquals(-1, writer.resolve(""))
- }
-
- /**
- * Test that writing non-existent columns fails.
- */
- @Test
- public fun testWriteNonExistentColumns() {
- writer.startRow()
- assertAll(
- { assertThrows<IllegalArgumentException> { writer.setBoolean(-1, false) } },
- { assertThrows<IllegalArgumentException> { writer.setInt(-1, 1) } },
- { assertThrows<IllegalArgumentException> { writer.setLong(-1, 1) } },
- { assertThrows<IllegalArgumentException> { writer.setFloat(-1, 1f) } },
- { assertThrows<IllegalArgumentException> { writer.setDouble(-1, 1.0) } },
- { assertThrows<IllegalArgumentException> { writer.setString(-1, "test") } },
- { assertThrows<IllegalArgumentException> { writer.setUUID(-1, UUID.randomUUID()) } },
- { assertThrows<IllegalArgumentException> { writer.setInstant(-1, Instant.now()) } },
- { assertThrows<IllegalArgumentException> { writer.setDuration(-1, Duration.ofMinutes(5)) } },
- { assertThrows<IllegalArgumentException> { writer.setList(-1, listOf("test")) } },
- { assertThrows<IllegalArgumentException> { writer.setSet(-1, setOf("test")) } },
- { assertThrows<IllegalArgumentException> { writer.setMap(-1, mapOf("test" to "test")) } },
- )
- }
-
- /**
- * Test that writing columns without a row fails.
- */
- public fun testWriteWithoutRow() {
- assertAll(
- columns.map { column ->
- {
- assertThrows<IllegalStateException> {
- when (column.type) {
- is TableColumnType.Boolean -> writer.setBoolean(column.name, true)
- is TableColumnType.Int -> writer.setInt(column.name, 21)
- is TableColumnType.Long -> writer.setLong(column.name, 21)
- is TableColumnType.Float -> writer.setFloat(column.name, 42f)
- is TableColumnType.Double -> writer.setDouble(column.name, 42.0)
- is TableColumnType.String -> writer.setString(column.name, "test")
- is TableColumnType.UUID -> writer.setUUID(column.name, UUID.randomUUID())
- is TableColumnType.Instant -> writer.setInstant(column.name, Instant.now())
- is TableColumnType.Duration -> writer.setDuration(column.name, Duration.ofMinutes(5))
- is TableColumnType.List -> writer.setList(column.name, emptyList<String>())
- is TableColumnType.Set -> writer.setSet(column.name, emptySet<String>())
- is TableColumnType.Map -> writer.setMap(column.name, emptyMap<String, String>())
- }
- }
- }
- },
- )
- }
-
- /**
- * Test to verify we cannot end a row without starting it.
- */
- @Test
- public fun testEndRowWithoutStart() {
- assertThrows<IllegalStateException> { writer.endRow() }
- }
-}
diff --git a/opendc-trace/opendc-trace-api/src/test/kotlin/formats/wtf/WtfTraceFormatTest.kt b/opendc-trace/opendc-trace-api/src/test/kotlin/formats/wtf/WtfTraceFormatTest.kt
deleted file mode 100644
index d218fbf3..00000000
--- a/opendc-trace/opendc-trace-api/src/test/kotlin/formats/wtf/WtfTraceFormatTest.kt
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- * Copyright (c) 2021 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.wtf
-
-import formats.wtf.TableReaderTestKit
-import org.junit.jupiter.api.Assertions.assertAll
-import org.junit.jupiter.api.Assertions.assertDoesNotThrow
-import org.junit.jupiter.api.Assertions.assertEquals
-import org.junit.jupiter.api.Assertions.assertTrue
-import org.junit.jupiter.api.BeforeEach
-import org.junit.jupiter.api.DisplayName
-import org.junit.jupiter.api.Nested
-import org.junit.jupiter.api.Test
-import org.junit.jupiter.api.assertThrows
-import org.opendc.trace.TableColumn
-import org.opendc.trace.TableReader
-import org.opendc.trace.conv.TABLE_TASKS
-import org.opendc.trace.conv.TASK_ID
-import org.opendc.trace.conv.TASK_PARENTS
-import org.opendc.trace.conv.TASK_RUNTIME
-import org.opendc.trace.conv.TASK_SUBMIT_TIME
-import org.opendc.trace.conv.TASK_WORKFLOW_ID
-import java.nio.file.Paths
-import java.time.Duration
-import java.time.Instant
-
-/**
- * Test suite for the [WtfTraceFormat] class.
- */
-@DisplayName("WTF TraceFormat")
-class WtfTraceFormatTest {
- private val format = WtfTraceFormat()
-
- @Test
- fun testTables() {
- val path = Paths.get("src/test/resources/wtf/schema-1.0")
- assertEquals(listOf(TABLE_TASKS), format.getTables(path))
- }
-
- @Test
- fun testTableExists() {
- val path = Paths.get("src/test/resources/wtf/wtf-trace")
- assertDoesNotThrow { format.getDetails(path, TABLE_TASKS) }
- }
-
- @Test
- fun testTableDoesNotExist() {
- val path = Paths.get("src/test/resources/wtf/wtf-trace")
-
- assertThrows<IllegalArgumentException> { format.getDetails(path, "test") }
- }
-
- /**
- * Smoke test for parsing WTF traces.
- */
- @Test
- fun testTableReader() {
- val path = Paths.get("src/test/resources/wtf/wtf-trace")
- val reader = format.newReader(path, TABLE_TASKS, listOf(TASK_ID, TASK_WORKFLOW_ID, TASK_SUBMIT_TIME, TASK_RUNTIME, TASK_PARENTS))
-
- assertAll(
- { assertTrue(reader.nextRow()) },
- { assertEquals("362334516345962206", reader.getString(TASK_ID)) },
- { assertEquals("1078341553348591493", reader.getString(TASK_WORKFLOW_ID)) },
- { assertEquals(Instant.ofEpochMilli(245604), reader.getInstant(TASK_SUBMIT_TIME)) },
- { assertEquals(Duration.ofMillis(8163), reader.getDuration(TASK_RUNTIME)) },
- {
- assertEquals(
- setOf("584055316413447529", "133113685133695608", "1008582348422865408"),
- reader.getSet(TASK_PARENTS, String::class.java),
- )
- },
- )
-
- assertAll(
- { assertTrue(reader.nextRow()) },
- { assertEquals("502010169100446658", reader.getString(TASK_ID)) },
- { assertEquals("1078341553348591493", reader.getString(TASK_WORKFLOW_ID)) },
- { assertEquals(Instant.ofEpochMilli(251325), reader.getInstant(TASK_SUBMIT_TIME)) },
- { assertEquals(Duration.ofMillis(8216), reader.getDuration(TASK_RUNTIME)) },
- {
- assertEquals(
- setOf("584055316413447529", "133113685133695608", "1008582348422865408"),
- reader.getSet(TASK_PARENTS, String::class.java),
- )
- },
- )
-
- reader.close()
- }
-
- @DisplayName("TableReader for Tasks")
- @Nested
- inner class TasksTableReaderTest : TableReaderTestKit() {
- override lateinit var reader: TableReader
- override lateinit var columns: List<TableColumn>
-
- @BeforeEach
- fun setUp() {
- val path = Paths.get("src/test/resources/wtf/wtf-trace")
-
- columns = format.getDetails(path, TABLE_TASKS).columns
- reader = format.newReader(path, TABLE_TASKS, null)
- }
- }
-
- @DisplayName("TableReader for Tasks (Shell trace)")
- @Nested
- inner class ShellTasksTableReaderTest : TableReaderTestKit() {
- override lateinit var reader: TableReader
- override lateinit var columns: List<TableColumn>
-
- @BeforeEach
- fun setUp() {
- val path = Paths.get("src/test/resources/wtf/shell")
-
- columns = format.getDetails(path, TABLE_TASKS).columns
- reader = format.newReader(path, TABLE_TASKS, null)
- }
- }
-}
diff --git a/opendc-trace/opendc-trace-calcite/build.gradle.kts b/opendc-trace/opendc-trace-calcite/build.gradle.kts
deleted file mode 100644
index 433c2a35..00000000
--- a/opendc-trace/opendc-trace-calcite/build.gradle.kts
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (c) 2022 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-description = "Apache Calcite (SQL) integration for the OpenDC trace library"
-
-// Build configuration
-plugins {
- `kotlin-library-conventions`
-}
-
-dependencies {
- api(projects.opendcTrace.opendcTraceApi)
-
- api(libs.calcite.core)
-
- testRuntimeOnly(libs.slf4j.simple)
-}
diff --git a/opendc-trace/opendc-trace-calcite/src/main/kotlin/org/opendc/trace/calcite/InsertableTable.kt b/opendc-trace/opendc-trace-calcite/src/main/kotlin/org/opendc/trace/calcite/InsertableTable.kt
deleted file mode 100644
index 9c7b69a2..00000000
--- a/opendc-trace/opendc-trace-calcite/src/main/kotlin/org/opendc/trace/calcite/InsertableTable.kt
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2022 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.calcite
-
-import org.apache.calcite.linq4j.Enumerable
-import org.apache.calcite.schema.Table
-
-/**
- * A Calcite [Table] to which rows can be inserted.
- */
-internal interface InsertableTable : Table {
- /**
- * Insert [rows] into this table.
- *
- * @param rows The rows to insert into the table.
- * @return The number of rows inserted.
- */
- fun insert(rows: Enumerable<Array<Any?>>): Long
-}
diff --git a/opendc-trace/opendc-trace-calcite/src/main/kotlin/org/opendc/trace/calcite/TraceReaderEnumerator.kt b/opendc-trace/opendc-trace-calcite/src/main/kotlin/org/opendc/trace/calcite/TraceReaderEnumerator.kt
deleted file mode 100644
index eed52ab3..00000000
--- a/opendc-trace/opendc-trace-calcite/src/main/kotlin/org/opendc/trace/calcite/TraceReaderEnumerator.kt
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Copyright (c) 2022 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.calcite
-
-import org.apache.calcite.linq4j.Enumerator
-import org.opendc.trace.TableColumn
-import org.opendc.trace.TableColumnType
-import org.opendc.trace.TableReader
-import java.nio.ByteBuffer
-import java.nio.ByteOrder
-import java.util.concurrent.atomic.AtomicBoolean
-
-/**
- * An [Enumerator] for a [TableReader].
- */
-internal class TraceReaderEnumerator<E>(
- private val reader: TableReader,
- private val columns: List<TableColumn>,
- private val cancelFlag: AtomicBoolean,
-) : Enumerator<E> {
- private val columnIndices = columns.map { reader.resolve(it.name) }.toIntArray()
- private var current: E? = null
-
- override fun moveNext(): Boolean {
- if (cancelFlag.get()) {
- return false
- }
-
- val reader = reader
- val res = reader.nextRow()
-
- if (res) {
- @Suppress("UNCHECKED_CAST")
- current = convertRow(reader) as E
- } else {
- current = null
- }
-
- return res
- }
-
- override fun current(): E = checkNotNull(current)
-
- override fun reset() {
- throw UnsupportedOperationException()
- }
-
- override fun close() {
- reader.close()
- }
-
- private fun convertRow(reader: TableReader): Array<Any?> {
- val res = arrayOfNulls<Any?>(columns.size)
- val columnIndices = columnIndices
-
- for ((index, column) in columns.withIndex()) {
- val columnIndex = columnIndices[index]
- res[index] = convertColumn(reader, column, columnIndex)
- }
- return res
- }
-
- private fun convertColumn(
- reader: TableReader,
- column: TableColumn,
- columnIndex: Int,
- ): Any? {
- return when (column.type) {
- is TableColumnType.Boolean -> reader.getBoolean(columnIndex)
- is TableColumnType.Int -> reader.getInt(columnIndex)
- is TableColumnType.Long -> reader.getLong(columnIndex)
- is TableColumnType.Float -> reader.getFloat(columnIndex)
- is TableColumnType.Double -> reader.getDouble(columnIndex)
- is TableColumnType.String -> reader.getString(columnIndex)
- is TableColumnType.UUID -> {
- val uuid = reader.getUUID(columnIndex)
-
- if (uuid != null) {
- val uuidBytes = ByteArray(16)
-
- ByteBuffer.wrap(uuidBytes)
- .order(ByteOrder.BIG_ENDIAN)
- .putLong(uuid.mostSignificantBits)
- .putLong(uuid.leastSignificantBits)
-
- uuidBytes
- } else {
- null
- }
- }
- is TableColumnType.Instant -> reader.getInstant(columnIndex)?.toEpochMilli()
- is TableColumnType.Duration -> reader.getDuration(columnIndex)?.toMillis() ?: 0
- is TableColumnType.List -> reader.getList(columnIndex, Any::class.java)?.toTypedArray()
- is TableColumnType.Set -> reader.getSet(columnIndex, Any::class.java)?.toTypedArray()
- is TableColumnType.Map -> reader.getMap(columnIndex, Any::class.java, Any::class.java)
- }
- }
-}
diff --git a/opendc-trace/opendc-trace-calcite/src/main/kotlin/org/opendc/trace/calcite/TraceSchema.kt b/opendc-trace/opendc-trace-calcite/src/main/kotlin/org/opendc/trace/calcite/TraceSchema.kt
deleted file mode 100644
index 3249546d..00000000
--- a/opendc-trace/opendc-trace-calcite/src/main/kotlin/org/opendc/trace/calcite/TraceSchema.kt
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2022 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.calcite
-
-import org.apache.calcite.schema.Schema
-import org.apache.calcite.schema.Table
-import org.apache.calcite.schema.impl.AbstractSchema
-import org.opendc.trace.Trace
-
-/**
- * A Calcite [Schema] that exposes an OpenDC [Trace] into multiple SQL tables.
- *
- * @param trace The [Trace] to create a schema for.
- */
-public class TraceSchema(private val trace: Trace) : AbstractSchema() {
- /**
- * The [Table]s that belong to this schema.
- */
- private val tables: Map<String, TraceTable> by lazy {
- trace.tables.associateWith {
- val table = checkNotNull(trace.getTable(it)) { "Unexpected null table" }
- TraceTable(table)
- }
- }
-
- override fun getTableMap(): Map<String, Table> = tables
-}
diff --git a/opendc-trace/opendc-trace-calcite/src/main/kotlin/org/opendc/trace/calcite/TraceSchemaFactory.kt b/opendc-trace/opendc-trace-calcite/src/main/kotlin/org/opendc/trace/calcite/TraceSchemaFactory.kt
deleted file mode 100644
index cbf7ec43..00000000
--- a/opendc-trace/opendc-trace-calcite/src/main/kotlin/org/opendc/trace/calcite/TraceSchemaFactory.kt
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2022 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.calcite
-
-import org.apache.calcite.model.ModelHandler
-import org.apache.calcite.schema.Schema
-import org.apache.calcite.schema.SchemaFactory
-import org.apache.calcite.schema.SchemaPlus
-import org.opendc.trace.Trace
-import java.io.File
-import java.nio.file.Paths
-
-/**
- * Factory that creates a [TraceSchema].
- *
- * This factory allows users to include a schema that references a trace in a `model.json` file.
- */
-public class TraceSchemaFactory : SchemaFactory {
- override fun create(
- parentSchema: SchemaPlus,
- name: String,
- operand: Map<String, Any>,
- ): Schema {
- val base = operand[ModelHandler.ExtraOperand.BASE_DIRECTORY.camelName] as File?
- val pathParam = requireNotNull(operand["path"]) { "Trace path not specified" } as String
- val path = if (base != null) File(base, pathParam).toPath() else Paths.get(pathParam)
-
- val format = requireNotNull(operand["format"]) { "Trace format not specified" } as String
- val create = operand.getOrDefault("create", false) as Boolean
-
- val trace = if (create) Trace.create(path, format) else Trace.open(path, format)
- return TraceSchema(trace)
- }
-}
diff --git a/opendc-trace/opendc-trace-calcite/src/main/kotlin/org/opendc/trace/calcite/TraceTable.kt b/opendc-trace/opendc-trace-calcite/src/main/kotlin/org/opendc/trace/calcite/TraceTable.kt
deleted file mode 100644
index e74d2ee8..00000000
--- a/opendc-trace/opendc-trace-calcite/src/main/kotlin/org/opendc/trace/calcite/TraceTable.kt
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * Copyright (c) 2022 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.calcite
-
-import org.apache.calcite.DataContext
-import org.apache.calcite.adapter.java.AbstractQueryableTable
-import org.apache.calcite.adapter.java.JavaTypeFactory
-import org.apache.calcite.linq4j.AbstractEnumerable
-import org.apache.calcite.linq4j.Enumerable
-import org.apache.calcite.linq4j.Enumerator
-import org.apache.calcite.linq4j.QueryProvider
-import org.apache.calcite.linq4j.Queryable
-import org.apache.calcite.plan.RelOptCluster
-import org.apache.calcite.plan.RelOptTable
-import org.apache.calcite.prepare.Prepare.CatalogReader
-import org.apache.calcite.rel.RelNode
-import org.apache.calcite.rel.core.TableModify
-import org.apache.calcite.rel.logical.LogicalTableModify
-import org.apache.calcite.rel.type.RelDataType
-import org.apache.calcite.rel.type.RelDataTypeFactory
-import org.apache.calcite.rex.RexNode
-import org.apache.calcite.schema.ModifiableTable
-import org.apache.calcite.schema.ProjectableFilterableTable
-import org.apache.calcite.schema.SchemaPlus
-import org.apache.calcite.schema.impl.AbstractTableQueryable
-import org.apache.calcite.sql.type.SqlTypeName
-import org.opendc.trace.TableColumnType
-import java.nio.ByteBuffer
-import java.time.Duration
-import java.time.Instant
-import java.util.UUID
-import java.util.concurrent.atomic.AtomicBoolean
-
-/**
- * A Calcite [Table] that exposes an OpenDC [org.opendc.trace.Table] as SQL table.
- */
-internal class TraceTable(private val table: org.opendc.trace.Table) :
- AbstractQueryableTable(Array<Any?>::class.java),
- ProjectableFilterableTable,
- ModifiableTable,
- InsertableTable {
- private var rowType: RelDataType? = null
-
- override fun getRowType(typeFactory: RelDataTypeFactory): RelDataType {
- var rowType = rowType
- if (rowType == null) {
- rowType = deduceRowType(typeFactory as JavaTypeFactory)
- this.rowType = rowType
- }
-
- return rowType
- }
-
- override fun scan(
- root: DataContext,
- filters: MutableList<RexNode>,
- projects: IntArray?,
- ): Enumerable<Array<Any?>> {
- // Filters are currently not supported by the OpenDC trace API. By keeping the filters in the list, Calcite
- // assumes that they are declined and will perform the filters itself.
-
- val projection = projects?.map { table.columns[it] }
- val cancelFlag = DataContext.Variable.CANCEL_FLAG.get<AtomicBoolean>(root)
- return object : AbstractEnumerable<Array<Any?>>() {
- override fun enumerator(): Enumerator<Array<Any?>> =
- TraceReaderEnumerator(table.newReader(projection?.map { it.name }), projection ?: table.columns, cancelFlag)
- }
- }
-
- override fun insert(rows: Enumerable<Array<Any?>>): Long {
- val table = table
- val columns = table.columns
- val writer = table.newWriter()
- val columnIndices = columns.map { writer.resolve(it.name) }.toIntArray()
- var rowCount = 0L
-
- try {
- for (row in rows) {
- writer.startRow()
-
- for ((index, value) in row.withIndex()) {
- if (value == null) {
- continue
- }
- val columnType = columns[index].type
- val columnIndex = columnIndices[index]
- when (columnType) {
- is TableColumnType.Boolean -> writer.setBoolean(columnIndex, value as Boolean)
- is TableColumnType.Int -> writer.setInt(columnIndex, value as Int)
- is TableColumnType.Long -> writer.setLong(columnIndex, value as Long)
- is TableColumnType.Float -> writer.setFloat(columnIndex, value as Float)
- is TableColumnType.Double -> writer.setDouble(columnIndex, value as Double)
- is TableColumnType.String -> writer.setString(columnIndex, value as String)
- is TableColumnType.UUID -> {
- val bb = ByteBuffer.wrap(value as ByteArray)
- writer.setUUID(columnIndex, UUID(bb.getLong(), bb.getLong()))
- }
- is TableColumnType.Instant -> writer.setInstant(columnIndex, Instant.ofEpochMilli(value as Long))
- is TableColumnType.Duration -> writer.setDuration(columnIndex, Duration.ofMillis(value as Long))
- is TableColumnType.List -> writer.setList(columnIndex, value as List<*>)
- is TableColumnType.Set -> writer.setSet(columnIndex, (value as List<*>).toSet())
- is TableColumnType.Map -> writer.setMap(columnIndex, value as Map<*, *>)
- }
- }
-
- writer.endRow()
-
- rowCount++
- }
- } finally {
- writer.close()
- }
-
- return rowCount
- }
-
- override fun <T> asQueryable(
- queryProvider: QueryProvider,
- schema: SchemaPlus,
- tableName: String,
- ): Queryable<T> {
- return object : AbstractTableQueryable<T>(queryProvider, schema, this@TraceTable, tableName) {
- override fun enumerator(): Enumerator<T> {
- val cancelFlag = AtomicBoolean(false)
- return TraceReaderEnumerator(
- this@TraceTable.table.newReader(),
- this@TraceTable.table.columns,
- cancelFlag,
- )
- }
-
- override fun toString(): String = "TraceTableQueryable[table=$tableName]"
- }
- }
-
- override fun getModifiableCollection(): MutableCollection<Any?>? = null
-
- override fun toModificationRel(
- cluster: RelOptCluster,
- table: RelOptTable,
- catalogReader: CatalogReader,
- child: RelNode,
- operation: TableModify.Operation,
- updateColumnList: MutableList<String>?,
- sourceExpressionList: MutableList<RexNode>?,
- flattened: Boolean,
- ): TableModify {
- cluster.planner.addRule(TraceTableModifyRule.DEFAULT.toRule())
-
- return LogicalTableModify.create(
- table,
- catalogReader,
- child,
- operation,
- updateColumnList,
- sourceExpressionList,
- flattened,
- )
- }
-
- override fun toString(): String = "TraceTable"
-
- private fun deduceRowType(typeFactory: JavaTypeFactory): RelDataType {
- val types = mutableListOf<RelDataType>()
- val names = mutableListOf<String>()
-
- for (column in table.columns) {
- names.add(column.name)
- types.add(mapType(typeFactory, column.type))
- }
-
- return typeFactory.createStructType(types, names)
- }
-
- private fun mapType(
- typeFactory: JavaTypeFactory,
- type: TableColumnType,
- ): RelDataType {
- return when (type) {
- is TableColumnType.Boolean -> typeFactory.createSqlType(SqlTypeName.BOOLEAN)
- is TableColumnType.Int -> typeFactory.createSqlType(SqlTypeName.INTEGER)
- is TableColumnType.Long -> typeFactory.createSqlType(SqlTypeName.BIGINT)
- is TableColumnType.Float -> typeFactory.createSqlType(SqlTypeName.FLOAT)
- is TableColumnType.Double -> typeFactory.createSqlType(SqlTypeName.DOUBLE)
- is TableColumnType.String -> typeFactory.createSqlType(SqlTypeName.VARCHAR)
- is TableColumnType.UUID -> typeFactory.createSqlType(SqlTypeName.BINARY, 16)
- is TableColumnType.Instant -> typeFactory.createSqlType(SqlTypeName.TIMESTAMP)
- is TableColumnType.Duration -> typeFactory.createSqlType(SqlTypeName.BIGINT)
- is TableColumnType.List -> typeFactory.createArrayType(mapType(typeFactory, type.elementType), -1)
- is TableColumnType.Set -> typeFactory.createMultisetType(mapType(typeFactory, type.elementType), -1)
- is TableColumnType.Map -> typeFactory.createMapType(mapType(typeFactory, type.keyType), mapType(typeFactory, type.valueType))
- }
- }
-}
diff --git a/opendc-trace/opendc-trace-calcite/src/main/kotlin/org/opendc/trace/calcite/TraceTableModify.kt b/opendc-trace/opendc-trace-calcite/src/main/kotlin/org/opendc/trace/calcite/TraceTableModify.kt
deleted file mode 100644
index eedff00d..00000000
--- a/opendc-trace/opendc-trace-calcite/src/main/kotlin/org/opendc/trace/calcite/TraceTableModify.kt
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Copyright (c) 2022 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.calcite
-
-import org.apache.calcite.adapter.enumerable.EnumerableRel
-import org.apache.calcite.adapter.enumerable.EnumerableRel.Prefer
-import org.apache.calcite.adapter.enumerable.EnumerableRelImplementor
-import org.apache.calcite.adapter.enumerable.EnumerableTableScan
-import org.apache.calcite.adapter.enumerable.JavaRowFormat
-import org.apache.calcite.adapter.enumerable.PhysTypeImpl
-import org.apache.calcite.adapter.java.JavaTypeFactory
-import org.apache.calcite.linq4j.Enumerable
-import org.apache.calcite.linq4j.tree.BlockBuilder
-import org.apache.calcite.linq4j.tree.Expressions
-import org.apache.calcite.linq4j.tree.Types
-import org.apache.calcite.plan.RelOptCluster
-import org.apache.calcite.plan.RelOptCost
-import org.apache.calcite.plan.RelOptPlanner
-import org.apache.calcite.plan.RelOptTable
-import org.apache.calcite.plan.RelTraitSet
-import org.apache.calcite.prepare.Prepare
-import org.apache.calcite.rel.RelNode
-import org.apache.calcite.rel.core.TableModify
-import org.apache.calcite.rel.metadata.RelMetadataQuery
-import org.apache.calcite.rex.RexNode
-import org.apache.calcite.schema.ModifiableTable
-import org.apache.calcite.util.BuiltInMethod
-import java.lang.reflect.Method
-
-/**
- * A [TableModify] expression that modifies a workload trace.
- */
-internal class TraceTableModify(
- cluster: RelOptCluster,
- traitSet: RelTraitSet,
- table: RelOptTable,
- schema: Prepare.CatalogReader,
- input: RelNode,
- operation: Operation,
- updateColumnList: List<String>?,
- sourceExpressionList: List<RexNode>?,
- flattened: Boolean,
-) : TableModify(cluster, traitSet, table, schema, input, operation, updateColumnList, sourceExpressionList, flattened),
- EnumerableRel {
- init {
- // Make sure the table is modifiable
- table.unwrap(ModifiableTable::class.java) ?: throw AssertionError() // TODO: user error in validator
- }
-
- override fun copy(
- traitSet: RelTraitSet,
- inputs: List<RelNode>?,
- ): RelNode {
- return TraceTableModify(
- cluster,
- traitSet,
- table,
- getCatalogReader(),
- sole(inputs),
- operation,
- updateColumnList,
- sourceExpressionList,
- isFlattened,
- )
- }
-
- override fun computeSelfCost(
- planner: RelOptPlanner,
- mq: RelMetadataQuery?,
- ): RelOptCost {
- // Prefer this plan compared to the standard EnumerableTableModify.
- return super.computeSelfCost(planner, mq)!!.multiplyBy(.1)
- }
-
- override fun implement(
- implementor: EnumerableRelImplementor,
- pref: Prefer,
- ): EnumerableRel.Result {
- val builder = BlockBuilder()
- val result = implementor.visitChild(this, 0, getInput() as EnumerableRel, pref)
- val childExp = builder.append("child", result.block)
- val convertedChildExpr =
- if (getInput().rowType != rowType) {
- val typeFactory = cluster.typeFactory as JavaTypeFactory
- val format = EnumerableTableScan.deduceFormat(table)
- val physType = PhysTypeImpl.of(typeFactory, table.rowType, format)
- val childPhysType = result.physType
- val o = Expressions.parameter(childPhysType.javaRowType, "o")
- val expressionList =
- List(childPhysType.rowType.fieldCount) { i ->
- childPhysType.fieldReference(o, i, physType.getJavaFieldType(i))
- }
-
- builder.append(
- "convertedChild",
- Expressions.call(
- childExp,
- BuiltInMethod.SELECT.method,
- Expressions.lambda<org.apache.calcite.linq4j.function.Function<*>>(physType.record(expressionList), o),
- ),
- )
- } else {
- childExp
- }
-
- if (!isInsert) {
- throw UnsupportedOperationException("Deletion and update not supported")
- }
-
- val expression = table.getExpression(InsertableTable::class.java)
- builder.add(
- Expressions.return_(
- null,
- Expressions.call(
- BuiltInMethod.SINGLETON_ENUMERABLE.method,
- Expressions.call(
- Long::class.java,
- expression,
- INSERT_METHOD,
- convertedChildExpr,
- ),
- ),
- ),
- )
-
- val rowFormat = if (pref === Prefer.ARRAY) JavaRowFormat.ARRAY else JavaRowFormat.SCALAR
- val physType = PhysTypeImpl.of(implementor.typeFactory, getRowType(), rowFormat)
- return implementor.result(physType, builder.toBlock())
- }
-
- private companion object {
- /**
- * Reference to [InsertableTable.insert] method.
- */
- val INSERT_METHOD: Method = Types.lookupMethod(InsertableTable::class.java, "insert", Enumerable::class.java)
- }
-}
diff --git a/opendc-trace/opendc-trace-calcite/src/main/kotlin/org/opendc/trace/calcite/TraceTableModifyRule.kt b/opendc-trace/opendc-trace-calcite/src/main/kotlin/org/opendc/trace/calcite/TraceTableModifyRule.kt
deleted file mode 100644
index 9c560984..00000000
--- a/opendc-trace/opendc-trace-calcite/src/main/kotlin/org/opendc/trace/calcite/TraceTableModifyRule.kt
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (c) 2022 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.calcite
-
-import org.apache.calcite.adapter.enumerable.EnumerableConvention
-import org.apache.calcite.plan.Convention
-import org.apache.calcite.rel.RelNode
-import org.apache.calcite.rel.convert.ConverterRule
-import org.apache.calcite.rel.core.TableModify
-import org.apache.calcite.rel.logical.LogicalTableModify
-import org.apache.calcite.schema.ModifiableTable
-
-/**
- * A [ConverterRule] from a [LogicalTableModify] to a [TraceTableModify].
- */
-internal class TraceTableModifyRule(config: Config) : ConverterRule(config) {
- override fun convert(rel: RelNode): RelNode? {
- val modify = rel as TableModify
- val table = modify.table!!
-
- // Make sure that the table is modifiable
- if (table.unwrap(ModifiableTable::class.java) == null) {
- return null
- }
-
- val traitSet = modify.traitSet.replace(EnumerableConvention.INSTANCE)
- return TraceTableModify(
- modify.cluster, traitSet,
- table,
- modify.catalogReader,
- convert(modify.input, traitSet),
- modify.operation,
- modify.updateColumnList,
- modify.sourceExpressionList,
- modify.isFlattened,
- )
- }
-
- companion object {
- /** Default configuration. */
- val DEFAULT: Config =
- Config.INSTANCE
- .withConversion(
- LogicalTableModify::class.java,
- Convention.NONE,
- EnumerableConvention.INSTANCE,
- "TraceTableModificationRule",
- )
- .withRuleFactory { config: Config -> TraceTableModifyRule(config) }
- }
-}
diff --git a/opendc-trace/opendc-trace-calcite/src/test/kotlin/org/opendc/trace/calcite/CalciteTest.kt b/opendc-trace/opendc-trace-calcite/src/test/kotlin/org/opendc/trace/calcite/CalciteTest.kt
deleted file mode 100644
index 6a945580..00000000
--- a/opendc-trace/opendc-trace-calcite/src/test/kotlin/org/opendc/trace/calcite/CalciteTest.kt
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * Copyright (c) 2022 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.calcite
-
-import io.mockk.every
-import io.mockk.mockk
-import org.apache.calcite.jdbc.CalciteConnection
-import org.junit.jupiter.api.Assertions.assertAll
-import org.junit.jupiter.api.Assertions.assertArrayEquals
-import org.junit.jupiter.api.Assertions.assertEquals
-import org.junit.jupiter.api.Assertions.assertFalse
-import org.junit.jupiter.api.Assertions.assertTrue
-import org.junit.jupiter.api.Test
-import org.opendc.trace.TableColumn
-import org.opendc.trace.TableColumnType
-import org.opendc.trace.TableReader
-import org.opendc.trace.Trace
-import org.opendc.trace.conv.TABLE_RESOURCES
-import java.nio.file.Files
-import java.nio.file.Paths
-import java.sql.DriverManager
-import java.sql.ResultSet
-import java.sql.Statement
-import java.sql.Timestamp
-import java.time.Duration
-import java.time.Instant
-import java.util.Properties
-import java.util.UUID
-
-/**
- * Smoke test for Apache Calcite integration.
- */
-class CalciteTest {
- /**
- * The trace to experiment with.
- */
- private val odcTrace = Trace.open(Paths.get("src/test/resources/trace"), format = "opendc-vm")
-
-// @Test
- fun testResources() {
- runQuery(odcTrace, "SELECT * FROM trace.resources") { rs ->
- assertAll(
- { assertTrue(rs.next()) },
- { assertEquals("1019", rs.getString("id")) },
- { assertEquals(1, rs.getInt("cpu_count")) },
- { assertEquals(Timestamp.valueOf("2013-08-12 13:40:46.0"), rs.getTimestamp("start_time")) },
- { assertEquals(181352.0, rs.getDouble("mem_capacity")) },
- { assertTrue(rs.next()) },
- { assertEquals("1023", rs.getString("id")) },
- { assertTrue(rs.next()) },
- { assertEquals("1052", rs.getString("id")) },
- { assertTrue(rs.next()) },
- { assertEquals("1073", rs.getString("id")) },
- { assertFalse(rs.next()) },
- )
- }
- }
-
- @Test
- fun testResourceStates() {
- runQuery(odcTrace, "SELECT * FROM trace.resource_states") { rs ->
- assertAll(
- { assertTrue(rs.next()) },
- { assertEquals("1019", rs.getString("id")) },
- { assertEquals(Timestamp.valueOf("2013-08-12 13:40:46.0"), rs.getTimestamp("timestamp")) },
- { assertEquals(300000, rs.getLong("duration")) },
- { assertEquals(0.0, rs.getDouble("cpu_usage")) },
- { assertTrue(rs.next()) },
- { assertEquals("1019", rs.getString("id")) },
- )
- }
- }
-
- @Test
- fun testInterferenceGroups() {
- runQuery(odcTrace, "SELECT * FROM trace.interference_groups") { rs ->
- assertAll(
- { assertTrue(rs.next()) },
- { assertArrayEquals(arrayOf("1019", "1023", "1052"), rs.getArray("members").array as Array<*>) },
- { assertEquals(0.0, rs.getDouble("target")) },
- { assertEquals(0.8830158730158756, rs.getDouble("score")) },
- )
- }
- }
-
- @Test
- fun testComplexQuery() {
- runQuery(odcTrace, "SELECT max(cpu_usage) as max_cpu_usage, avg(cpu_usage) as avg_cpu_usage FROM trace.resource_states") { rs ->
- assertAll(
- { assertTrue(rs.next()) },
- { assertEquals(249.59993808, rs.getDouble("max_cpu_usage")) },
- { assertEquals(5.387240309118493, rs.getDouble("avg_cpu_usage")) },
- )
- }
- }
-
-// @Test
- fun testInsert() {
- val tmp = Files.createTempDirectory("opendc")
- val newTrace = Trace.create(tmp, "opendc-vm")
-
- runStatement(newTrace) { stmt ->
- val count =
- stmt.executeUpdate(
- """
- INSERT INTO trace.resources (id, start_time, stop_time, cpu_count, cpu_capacity, mem_capacity)
- VALUES (1234, '2013-08-12 13:35:46.0', '2013-09-11 13:39:58.0', 1, 2926.0, 1024.0)
- """.trimIndent(),
- )
- assertEquals(1, count)
- }
-
- runQuery(newTrace, "SELECT * FROM trace.resources") { rs ->
- assertAll(
- { assertTrue(rs.next()) },
- { assertEquals("1234", rs.getString("id")) },
- { assertEquals(1, rs.getInt("cpu_count")) },
- { assertEquals(Timestamp.valueOf("2013-08-12 13:35:46.0"), rs.getTimestamp("start_time")) },
- { assertEquals(2926.0, rs.getDouble("cpu_capacity")) },
- { assertEquals(1024.0, rs.getDouble("mem_capacity")) },
- )
- }
- }
-
- @Test
- fun testUUID() {
- val trace = mockk<Trace>()
- every { trace.tables } returns listOf(TABLE_RESOURCES)
- every { trace.getTable(TABLE_RESOURCES)!!.columns } returns
- listOf(
- TableColumn("id", TableColumnType.UUID),
- )
- every { trace.getTable(TABLE_RESOURCES)!!.newReader() } answers {
- object : TableReader {
- override fun nextRow(): Boolean = true
-
- override fun resolve(name: String): Int {
- return when (name) {
- "id" -> 0
- else -> -1
- }
- }
-
- override fun isNull(index: Int): Boolean = false
-
- override fun getBoolean(index: Int): Boolean {
- TODO("not implemented")
- }
-
- override fun getInt(index: Int): Int {
- TODO("not implemented")
- }
-
- override fun getLong(index: Int): Long {
- TODO("not implemented")
- }
-
- override fun getFloat(index: Int): Float {
- TODO("not implemented")
- }
-
- override fun getDouble(index: Int): Double {
- TODO("not implemented")
- }
-
- override fun getString(index: Int): String? {
- TODO("not implemented")
- }
-
- override fun getUUID(index: Int): UUID = UUID(1, 2)
-
- override fun getInstant(index: Int): Instant? {
- TODO("not implemented")
- }
-
- override fun getDuration(index: Int): Duration? {
- TODO("not implemented")
- }
-
- override fun <T> getList(
- index: Int,
- elementType: Class<T>,
- ): List<T>? {
- TODO("not implemented")
- }
-
- override fun <T> getSet(
- index: Int,
- elementType: Class<T>,
- ): Set<T>? {
- TODO("not implemented")
- }
-
- override fun <K, V> getMap(
- index: Int,
- keyType: Class<K>,
- valueType: Class<V>,
- ): Map<K, V>? {
- TODO("not implemented")
- }
-
- override fun close() {}
- }
- }
-
- runQuery(trace, "SELECT id FROM trace.resources") { rs ->
- assertAll(
- { assertTrue(rs.next()) },
- { assertArrayEquals(byteArrayOf(0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2), rs.getBytes("id")) },
- )
- }
- }
-
- /**
- * Helper function to run statement for the specified trace.
- */
- private fun runQuery(
- trace: Trace,
- query: String,
- block: (ResultSet) -> Unit,
- ) {
- runStatement(trace) { stmt ->
- val rs = stmt.executeQuery(query)
- rs.use { block(rs) }
- }
- }
-
- /**
- * Helper function to run statement for the specified trace.
- */
- private fun runStatement(
- trace: Trace,
- block: (Statement) -> Unit,
- ) {
- val info = Properties()
- info.setProperty("lex", "JAVA")
- val connection = DriverManager.getConnection("jdbc:calcite:", info).unwrap(CalciteConnection::class.java)
- connection.rootSchema.add("trace", TraceSchema(trace))
-
- val stmt = connection.createStatement()
- try {
- block(stmt)
- } finally {
- stmt.close()
- connection.close()
- }
- }
-}
diff --git a/opendc-trace/opendc-trace-calcite/src/test/kotlin/org/opendc/trace/calcite/TraceSchemaFactoryTest.kt b/opendc-trace/opendc-trace-calcite/src/test/kotlin/org/opendc/trace/calcite/TraceSchemaFactoryTest.kt
deleted file mode 100644
index ddf325e8..00000000
--- a/opendc-trace/opendc-trace-calcite/src/test/kotlin/org/opendc/trace/calcite/TraceSchemaFactoryTest.kt
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (c) 2022 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.calcite
-
-import org.junit.jupiter.api.Assertions.assertAll
-import org.junit.jupiter.api.Assertions.assertEquals
-import org.junit.jupiter.api.Assertions.assertTrue
-import org.junit.jupiter.api.Test
-import org.junit.jupiter.api.assertThrows
-import java.sql.DriverManager
-import java.sql.Timestamp
-import java.util.Properties
-
-/**
- * Test suite for [TraceSchemaFactory].
- */
-class TraceSchemaFactoryTest {
-// @Test
- fun testSmoke() {
- val info = Properties()
- info.setProperty("lex", "JAVA")
- val connection = DriverManager.getConnection("jdbc:calcite:model=src/test/resources/model.json", info)
- val stmt = connection.createStatement()
- val rs = stmt.executeQuery("SELECT * FROM trace.resources")
- try {
- assertAll(
- { assertTrue(rs.next()) },
- { assertEquals("1019", rs.getString("id")) },
- { assertEquals(1, rs.getInt("cpu_count")) },
- { assertEquals(Timestamp.valueOf("2013-08-12 13:40:46.0"), rs.getTimestamp("start_time")) },
- { assertEquals(181352.0, rs.getDouble("mem_capacity")) },
- )
- } finally {
- rs.close()
- stmt.close()
- connection.close()
- }
- }
-
- @Test
- fun testWithoutParams() {
- assertThrows<java.lang.RuntimeException> {
- DriverManager.getConnection("jdbc:calcite:schemaFactory=org.opendc.trace.calcite.TraceSchemaFactory")
- }
- }
-
- @Test
- fun testWithoutPath() {
- assertThrows<java.lang.RuntimeException> {
- DriverManager.getConnection("jdbc:calcite:schemaFactory=org.opendc.trace.calcite.TraceSchemaFactory; schema.format=opendc-vm")
- }
- }
-
- @Test
- fun testWithoutFormat() {
- assertThrows<java.lang.RuntimeException> {
- DriverManager.getConnection("jdbc:calcite:schemaFactory=org.opendc.trace.calcite.TraceSchemaFactory; schema.path=trace")
- }
- }
-}
diff --git a/opendc-trace/opendc-trace-calcite/src/test/resources/model.json b/opendc-trace/opendc-trace-calcite/src/test/resources/model.json
deleted file mode 100644
index 91e2657f..00000000
--- a/opendc-trace/opendc-trace-calcite/src/test/resources/model.json
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "version": "1.0",
- "defaultSchema": "trace",
- "schemas": [
- {
- "name": "trace",
- "type": "custom",
- "factory": "org.opendc.trace.calcite.TraceSchemaFactory",
- "operand": {
- "path": "trace",
- "format": "opendc-vm"
- }
- }
- ]
-}
diff --git a/opendc-trace/opendc-trace-calcite/src/test/resources/trace/fragments.parquet b/opendc-trace/opendc-trace-calcite/src/test/resources/trace/fragments.parquet
deleted file mode 100644
index 00ab5835..00000000
--- a/opendc-trace/opendc-trace-calcite/src/test/resources/trace/fragments.parquet
+++ /dev/null
Binary files differ
diff --git a/opendc-trace/opendc-trace-calcite/src/test/resources/trace/interference-model.json b/opendc-trace/opendc-trace-calcite/src/test/resources/trace/interference-model.json
deleted file mode 100644
index 6a0616d9..00000000
--- a/opendc-trace/opendc-trace-calcite/src/test/resources/trace/interference-model.json
+++ /dev/null
@@ -1,20 +0,0 @@
-[
- {
- "vms": [
- "1019",
- "1023",
- "1052"
- ],
- "minServerLoad": 0.0,
- "performanceScore": 0.8830158730158756
- },
- {
- "vms": [
- "1023",
- "1052",
- "1073"
- ],
- "minServerLoad": 0.0,
- "performanceScore": 0.7133055555552751
- }
-]
diff --git a/opendc-trace/opendc-trace-calcite/src/test/resources/trace/tasks.parquet b/opendc-trace/opendc-trace-calcite/src/test/resources/trace/tasks.parquet
deleted file mode 100644
index d8184945..00000000
--- a/opendc-trace/opendc-trace-calcite/src/test/resources/trace/tasks.parquet
+++ /dev/null
Binary files differ
diff --git a/opendc-trace/opendc-trace-tools/build.gradle.kts b/opendc-trace/opendc-trace-tools/build.gradle.kts
deleted file mode 100644
index 654d37f7..00000000
--- a/opendc-trace/opendc-trace-tools/build.gradle.kts
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2021 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-description = "Tools for working with workload traces"
-
-// Build configuration
-plugins {
- `kotlin-conventions`
- application
-}
-
-application {
- mainClass.set("org.opendc.trace.tools.TraceTools")
-}
-
-dependencies {
- implementation(projects.opendcTrace.opendcTraceApi)
- implementation(projects.opendcTrace.opendcTraceCalcite)
- implementation(libs.kotlin.logging)
- implementation(libs.clikt)
- implementation(libs.jline)
-
- runtimeOnly(libs.log4j.core)
- runtimeOnly(libs.log4j.slf4j)
-}
diff --git a/opendc-trace/opendc-trace-tools/src/main/kotlin/org/opendc/trace/tools/ConvertCommand.kt b/opendc-trace/opendc-trace-tools/src/main/kotlin/org/opendc/trace/tools/ConvertCommand.kt
deleted file mode 100644
index aa7b09d5..00000000
--- a/opendc-trace/opendc-trace-tools/src/main/kotlin/org/opendc/trace/tools/ConvertCommand.kt
+++ /dev/null
@@ -1,522 +0,0 @@
-/*
- * Copyright (c) 2021 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.tools
-
-import com.github.ajalt.clikt.core.CliktCommand
-import com.github.ajalt.clikt.parameters.arguments.argument
-import com.github.ajalt.clikt.parameters.groups.OptionGroup
-import com.github.ajalt.clikt.parameters.groups.cooccurring
-import com.github.ajalt.clikt.parameters.groups.defaultByName
-import com.github.ajalt.clikt.parameters.groups.groupChoice
-import com.github.ajalt.clikt.parameters.options.default
-import com.github.ajalt.clikt.parameters.options.defaultLazy
-import com.github.ajalt.clikt.parameters.options.option
-import com.github.ajalt.clikt.parameters.options.required
-import com.github.ajalt.clikt.parameters.types.double
-import com.github.ajalt.clikt.parameters.types.file
-import com.github.ajalt.clikt.parameters.types.long
-import com.github.ajalt.clikt.parameters.types.restrictTo
-import mu.KotlinLogging
-import org.opendc.trace.TableWriter
-import org.opendc.trace.Trace
-import org.opendc.trace.conv.TABLE_RESOURCES
-import org.opendc.trace.conv.TABLE_RESOURCE_STATES
-import org.opendc.trace.conv.resourceCpuCapacity
-import org.opendc.trace.conv.resourceCpuCount
-import org.opendc.trace.conv.resourceDuration
-import org.opendc.trace.conv.resourceID
-import org.opendc.trace.conv.resourceMemCapacity
-import org.opendc.trace.conv.resourceStateCpuUsage
-import org.opendc.trace.conv.resourceStateCpuUsagePct
-import org.opendc.trace.conv.resourceStateDuration
-import org.opendc.trace.conv.resourceStateMemUsage
-import org.opendc.trace.conv.resourceStateTimestamp
-import org.opendc.trace.conv.resourceSubmissionTime
-import java.io.File
-import java.time.Duration
-import java.time.Instant
-import java.util.Random
-import kotlin.math.abs
-import kotlin.math.max
-import kotlin.math.min
-
-/**
- * A [CliktCommand] that can convert between workload trace formats.
- */
-internal class ConvertCommand : CliktCommand(name = "convert", help = "Convert between workload trace formats") {
- /**
- * The logger instance for the converter.
- */
- private val logger = KotlinLogging.logger {}
-
- /**
- * The directory where the trace should be stored.
- */
- private val output by option("-O", "--output", help = "path to store the trace")
- .file(canBeFile = false, mustExist = false)
- .defaultLazy { File("output") }
-
- /**
- * The directory where the input trace is located.
- */
- private val input by argument("input", help = "path to the input trace")
- .file(canBeFile = false)
-
- /**
- * The input format of the trace.
- */
- private val inputFormat by option("-f", "--input-format", help = "format of input trace")
- .required()
-
- /**
- * The format of the output trace.
- */
- private val outputFormat by option("--output-format", help = "format of output trace")
- .default("opendc-vm")
-
- /**
- * The sampling options.
- */
- private val samplingOptions by SamplingOptions().cooccurring()
-
- /**
- * The converter strategy to use.
- */
- private val converter by option("-c", "--converter", help = "converter strategy to use").groupChoice(
- "default" to DefaultTraceConverter(),
- "azure" to AzureTraceConverter(),
- ).defaultByName("default")
-
- override fun run() {
- val metaParquet = File(output, "meta.parquet")
- val traceParquet = File(output, "trace.parquet")
-
- if (metaParquet.exists()) {
- metaParquet.delete()
- }
- if (traceParquet.exists()) {
- traceParquet.delete()
- }
-
- val inputTrace = Trace.open(input, format = inputFormat)
- val outputTrace = Trace.create(output, format = outputFormat)
-
- logger.info { "Building resources table" }
-
- val metaWriter = outputTrace.getTable(TABLE_RESOURCES)!!.newWriter()
-
- val selectedVms = metaWriter.use { converter.convertResources(inputTrace, it, samplingOptions) }
-
- if (selectedVms.isEmpty()) {
- logger.warn { "No VMs selected" }
- return
- }
-
- logger.info { "Wrote ${selectedVms.size} rows" }
- logger.info { "Building resource states table" }
-
- val writer = outputTrace.getTable(TABLE_RESOURCE_STATES)!!.newWriter()
-
- val statesCount = writer.use { converter.convertResourceStates(inputTrace, it, selectedVms) }
- logger.info { "Wrote $statesCount rows" }
- }
-
- /**
- * Options for sampling the workload trace.
- */
- private class SamplingOptions : OptionGroup() {
- /**
- * The fraction of VMs to sample
- */
- val fraction by option("--sampling-fraction", help = "fraction of the workload to sample")
- .double()
- .restrictTo(0.0001, 1.0)
- .required()
-
- /**
- * The seed for sampling the trace.
- */
- val seed by option("--sampling-seed", help = "seed for sampling the workload")
- .long()
- .default(0)
- }
-
- /**
- * A trace conversion strategy.
- */
- private sealed class TraceConverter(name: String) : OptionGroup(name) {
- /**
- * Convert the resources table for the trace.
- *
- * @param trace The trace to convert.
- * @param writer The table writer for the target format.
- * @param samplingOptions The sampling options to use.
- * @return The map of resources that have been selected.
- */
- abstract fun convertResources(
- trace: Trace,
- writer: TableWriter,
- samplingOptions: SamplingOptions?,
- ): Map<String, Resource>
-
- /**
- * Convert the resource states table for the trace.
- *
- * @param trace The trace to convert.
- * @param writer The table writer for the target format.
- * @param selected The set of virtual machines that have been selected.
- * @return The number of rows written.
- */
- abstract fun convertResourceStates(
- trace: Trace,
- writer: TableWriter,
- selected: Map<String, Resource>,
- ): Int
-
- /**
- * A resource in the resource table.
- */
- data class Resource(
- val id: String,
- val startTime: Instant,
- val stopTime: Instant,
- val cpuCount: Int,
- val cpuCapacity: Double,
- val memCapacity: Double,
- )
- }
-
- /**
- * Default implementation of [TraceConverter].
- */
- private class DefaultTraceConverter : TraceConverter("default") {
- /**
- * The logger instance for the converter.
- */
- private val logger = KotlinLogging.logger {}
-
- /**
- * The interval at which the samples where taken.
- */
- private val sampleInterval = Duration.ofMinutes(5)
-
- /**
- * The difference in CPU usage for the algorithm to cascade samples.
- */
- private val sampleCascadeDiff = 0.1
-
- override fun convertResources(
- trace: Trace,
- writer: TableWriter,
- samplingOptions: SamplingOptions?,
- ): Map<String, Resource> {
- val random = samplingOptions?.let { Random(it.seed) }
- val samplingFraction = samplingOptions?.fraction ?: 1.0
- val reader = checkNotNull(trace.getTable(TABLE_RESOURCE_STATES)).newReader()
-
- var hasNextRow = reader.nextRow()
- val selectedVms = mutableMapOf<String, Resource>()
-
- val idCol = reader.resolve(resourceID)
- val timestampCol = reader.resolve(resourceStateTimestamp)
- val cpuCountCol = reader.resolve(resourceCpuCount)
- val cpuCapacityCol = reader.resolve(resourceCpuCapacity)
- val memCapacityCol = reader.resolve(resourceMemCapacity)
- val memUsageCol = reader.resolve(resourceStateMemUsage)
-
- while (hasNextRow) {
- var id: String
- var cpuCount = 0
- var cpuCapacity = 0.0
- var memCapacity = 0.0
- var memUsage = 0.0
- var startTime = Long.MAX_VALUE
- var stopTime = Long.MIN_VALUE
-
- do {
- id = reader.getString(idCol)!!
-
- val timestamp = reader.getInstant(timestampCol)!!.toEpochMilli()
- startTime = min(startTime, timestamp)
- stopTime = max(stopTime, timestamp)
-
- cpuCount = max(cpuCount, reader.getInt(cpuCountCol))
- cpuCapacity = max(cpuCapacity, reader.getDouble(cpuCapacityCol))
- memCapacity = max(memCapacity, reader.getDouble(memCapacityCol))
- if (memUsageCol > 0) {
- memUsage = max(memUsage, reader.getDouble(memUsageCol))
- }
-
- hasNextRow = reader.nextRow()
- } while (hasNextRow && id == reader.getString(resourceID))
-
- // Sample only a fraction of the VMs
- if (random != null && random.nextDouble() > samplingFraction) {
- continue
- }
-
- logger.info { "Selecting VM $id" }
-
- val startInstant = Instant.ofEpochMilli(startTime) - sampleInterval // Offset by sample interval
- val stopInstant = Instant.ofEpochMilli(stopTime)
-
- selectedVms.computeIfAbsent(id) {
- Resource(it, startInstant, stopInstant, cpuCount, cpuCapacity, max(memCapacity, memUsage))
- }
-
- writer.startRow()
- writer.setString(resourceID, id)
- writer.setInstant(resourceSubmissionTime, startInstant)
- writer.setInstant(resourceDuration, stopInstant)
- writer.setInt(resourceCpuCount, cpuCount)
- writer.setDouble(resourceCpuCapacity, cpuCapacity)
- writer.setDouble(resourceMemCapacity, max(memCapacity, memUsage))
- writer.endRow()
- }
-
- return selectedVms
- }
-
- override fun convertResourceStates(
- trace: Trace,
- writer: TableWriter,
- selected: Map<String, Resource>,
- ): Int {
- val reader = checkNotNull(trace.getTable(TABLE_RESOURCE_STATES)).newReader()
- val sampleInterval = sampleInterval.toMillis()
-
- val idCol = reader.resolve(resourceID)
- val timestampCol = reader.resolve(resourceStateTimestamp)
- val cpuCountCol = reader.resolve(resourceCpuCount)
- val cpuUsageCol = reader.resolve(resourceStateCpuUsage)
-
- var hasNextRow = reader.nextRow()
- var count = 0
-
- while (hasNextRow) {
- val id = reader.getString(idCol)!!
- val resource = selected[id]
- if (resource == null) {
- hasNextRow = reader.nextRow()
- continue
- }
-
- val cpuCount = reader.getInt(cpuCountCol)
- val cpuUsage = reader.getDouble(cpuUsageCol)
-
- val startTimestamp = reader.getInstant(timestampCol)!!.toEpochMilli()
- var timestamp: Long = startTimestamp
- var duration: Long = sampleInterval
-
- // Attempt to cascade further samples into one if they share the same CPU usage
- while (reader.nextRow().also { hasNextRow = it }) {
- val shouldCascade =
- id == reader.getString(idCol) &&
- abs(cpuUsage - reader.getDouble(cpuUsageCol)) < sampleCascadeDiff &&
- cpuCount == reader.getInt(cpuCountCol)
-
- // Check whether the next sample can be cascaded with the current sample:
- // (1) The VM identifier of both samples matches
- // (2) The CPU usage is almost identical (lower than `SAMPLE_CASCADE_DIFF`
- // (3) The CPU count of both samples is identical
- if (!shouldCascade) {
- break
- }
-
- val nextTimestamp = reader.getInstant(timestampCol)!!.toEpochMilli()
-
- // Check whether the interval between both samples is not higher than `SAMPLE_INTERVAL`
- if ((nextTimestamp - timestamp) > sampleInterval) {
- break
- }
-
- duration += nextTimestamp - timestamp
- timestamp = nextTimestamp
- }
-
- writer.startRow()
- writer.setString(resourceID, id)
- writer.setInstant(resourceStateTimestamp, Instant.ofEpochMilli(timestamp))
- writer.setDuration(resourceStateDuration, Duration.ofMillis(duration))
- writer.setInt(resourceCpuCount, cpuCount)
- writer.setDouble(resourceStateCpuUsage, cpuUsage)
- writer.endRow()
-
- count++
- }
-
- return count
- }
- }
-
- /**
- * Implementation of [TraceConverter] for the Azure trace format.
- */
- private class AzureTraceConverter : TraceConverter("default") {
- /**
- * The logger instance for the converter.
- */
- private val logger = KotlinLogging.logger {}
-
- /**
- * CPU capacity of the machines used by Azure.
- */
- private val cpuCapacity = 2500.0
-
- /**
- * The interval at which the samples where taken.
- */
- private val sampleInterval = Duration.ofMinutes(5)
-
- /**
- * The difference in CPU usage for the algorithm to cascade samples.
- */
- private val sampleCascadeDiff = 0.1
-
- override fun convertResources(
- trace: Trace,
- writer: TableWriter,
- samplingOptions: SamplingOptions?,
- ): Map<String, Resource> {
- val random = samplingOptions?.let { Random(it.seed) }
- val samplingFraction = samplingOptions?.fraction ?: 1.0
- val reader = checkNotNull(trace.getTable(TABLE_RESOURCES)).newReader()
-
- val idCol = reader.resolve(resourceID)
- val startTimeCol = reader.resolve(resourceSubmissionTime)
- val stopTimeCol = reader.resolve(resourceDuration)
- val cpuCountCol = reader.resolve(resourceCpuCount)
- val memCapacityCol = reader.resolve(resourceMemCapacity)
-
- val selectedVms = mutableMapOf<String, Resource>()
-
- while (reader.nextRow()) {
- // Sample only a fraction of the VMs
- if (random != null && random.nextDouble() > samplingFraction) {
- continue
- }
-
- val id = reader.getString(idCol)!!
- val startTime = reader.getInstant(startTimeCol)!!.toEpochMilli()
- val stopTime = reader.getInstant(stopTimeCol)!!.toEpochMilli()
- val cpuCount = reader.getInt(cpuCountCol)
- val memCapacity = reader.getDouble(memCapacityCol)
-
- logger.info { "Selecting VM $id" }
-
- val startInstant = Instant.ofEpochMilli(startTime)
- val stopInstant = Instant.ofEpochMilli(stopTime)
- val cpuCapacity = cpuCount * cpuCapacity
-
- selectedVms.computeIfAbsent(id) {
- Resource(it, startInstant, stopInstant, cpuCount, cpuCapacity, memCapacity)
- }
-
- writer.startRow()
- writer.setString(resourceID, id)
- writer.setInstant(resourceSubmissionTime, startInstant)
- writer.setInstant(resourceDuration, stopInstant)
- writer.setInt(resourceCpuCount, cpuCount)
- writer.setDouble(resourceCpuCapacity, cpuCapacity)
- writer.setDouble(resourceMemCapacity, memCapacity)
- writer.endRow()
- }
-
- return selectedVms
- }
-
- override fun convertResourceStates(
- trace: Trace,
- writer: TableWriter,
- selected: Map<String, Resource>,
- ): Int {
- val reader = checkNotNull(trace.getTable(TABLE_RESOURCE_STATES)).newReader()
- val states = HashMap<String, State>()
- val sampleInterval = sampleInterval.toMillis()
-
- val idCol = reader.resolve(resourceID)
- val timestampCol = reader.resolve(resourceStateTimestamp)
- val cpuUsageCol = reader.resolve(resourceStateCpuUsagePct)
-
- var count = 0
-
- while (reader.nextRow()) {
- val id = reader.getString(idCol)!!
- val resource = selected[id] ?: continue
-
- val cpuUsage = reader.getDouble(cpuUsageCol) * resource.cpuCapacity // MHz
- val state = states.computeIfAbsent(id) { State(resource, cpuUsage, sampleInterval) }
- val timestamp = reader.getInstant(timestampCol)!!.toEpochMilli()
- val delta = (timestamp - state.time)
-
- // Check whether the next sample can be cascaded with the current sample:
- // (1) The CPU usage is almost identical (lower than `SAMPLE_CASCADE_DIFF`)
- // (2) The interval between both samples is not higher than `SAMPLE_INTERVAL`
- if (abs(cpuUsage - state.cpuUsage) <= sampleCascadeDiff && delta <= sampleInterval) {
- state.time = timestamp
- state.duration += delta
- continue
- }
-
- state.write(writer)
- // Reset the state fields
- state.time = timestamp
- state.duration = sampleInterval
- // Count write
- count++
- }
-
- for ((_, state) in states) {
- state.write(writer)
- count++
- }
-
- return count
- }
-
- private class State(
- @JvmField val resource: Resource,
- @JvmField var cpuUsage: Double,
- @JvmField var duration: Long,
- ) {
- @JvmField var time: Long = resource.startTime.toEpochMilli()
- private var lastWrite: Long = Long.MIN_VALUE
-
- fun write(writer: TableWriter) {
- // Check whether this timestamp was already written
- if (lastWrite == time) {
- return
- }
- lastWrite = time
-
- writer.startRow()
- writer.setString(resourceID, resource.id)
- writer.setInstant(resourceStateTimestamp, Instant.ofEpochMilli(time))
- writer.setDuration(resourceStateDuration, Duration.ofMillis(duration))
- writer.setDouble(resourceStateCpuUsage, cpuUsage)
- writer.setInt(resourceCpuCount, resource.cpuCount)
- writer.endRow()
- }
- }
- }
-}
diff --git a/opendc-trace/opendc-trace-tools/src/main/kotlin/org/opendc/trace/tools/QueryCommand.kt b/opendc-trace/opendc-trace-tools/src/main/kotlin/org/opendc/trace/tools/QueryCommand.kt
deleted file mode 100644
index 7b7a2a64..00000000
--- a/opendc-trace/opendc-trace-tools/src/main/kotlin/org/opendc/trace/tools/QueryCommand.kt
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Copyright (c) 2022 AtLarge Research
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in all
- * copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-package org.opendc.trace.tools
-
-import com.github.ajalt.clikt.core.CliktCommand
-import com.github.ajalt.clikt.parameters.arguments.argument
-import com.github.ajalt.clikt.parameters.options.option
-import com.github.ajalt.clikt.parameters.options.required
-import com.github.ajalt.clikt.parameters.types.file
-import org.apache.calcite.jdbc.CalciteConnection
-import org.jline.builtins.Styles
-import org.jline.console.Printer
-import org.jline.console.impl.DefaultPrinter
-import org.jline.terminal.Terminal
-import org.jline.terminal.TerminalBuilder
-import org.jline.utils.AttributedStringBuilder
-import org.opendc.trace.Trace
-import org.opendc.trace.calcite.TraceSchema
-import java.nio.charset.StandardCharsets
-import java.sql.DriverManager
-import java.sql.ResultSet
-import java.sql.ResultSetMetaData
-import java.util.Properties
-
-/**
- * A [CliktCommand] that allows users to query workload traces using SQL.
- */
-internal class QueryCommand : CliktCommand(name = "query", help = "Query workload traces") {
- /**
- * The trace to open.
- */
- private val input by option("-i", "--input")
- .file(mustExist = true)
- .required()
-
- /**
- * The input format of the trace.
- */
- private val inputFormat by option("-f", "--format", help = "format of the trace")
- .required()
-
- /**
- * The query to execute.
- */
- private val query by argument()
-
- /**
- * Access to the terminal.
- */
- private val terminal =
- TerminalBuilder.builder()
- .system(false)
- .streams(System.`in`, System.out)
- .encoding(StandardCharsets.UTF_8)
- .build()
-
- /**
- * Helper class to print results to console.
- */
- private val printer = QueryPrinter(terminal)
-
- override fun run() {
- val inputTrace = Trace.open(input, format = inputFormat)
- val info = Properties().apply { this["lex"] = "JAVA" }
- val connection = DriverManager.getConnection("jdbc:calcite:", info).unwrap(CalciteConnection::class.java)
- connection.rootSchema.add("trace", TraceSchema(inputTrace))
- connection.schema = "trace"
-
- val stmt = connection.createStatement()
- stmt.executeQuery(query)
-
- val start = System.currentTimeMillis()
- val hasResults = stmt.execute(query)
-
- try {
- if (hasResults) {
- do {
- stmt.resultSet.use { rs ->
- val count: Int = printResults(rs)
- val duration = (System.currentTimeMillis() - start) / 1000.0
- printer.println("$count rows selected (${"%.3f".format(duration)} seconds)")
- }
- } while (stmt.moreResults)
- } else {
- val count: Int = stmt.updateCount
- val duration = (System.currentTimeMillis() - start) / 1000.0
-
- printer.println("$count rows affected (${"%0.3f".format(duration)} seconds)")
- }
- } finally {
- stmt.close()
- connection.close()
- }
- }
-
- /**
- * Helper function to print the results to console.
- */
- private fun printResults(rs: ResultSet): Int {
- var count = 0
- val meta: ResultSetMetaData = rs.metaData
-
- val options =
- mapOf(
- Printer.COLUMNS to List(meta.columnCount) { meta.getColumnName(it + 1) },
- Printer.BORDER to "|",
- )
- val data = mutableListOf<Map<String, Any>>()
-
- while (rs.next()) {
- val row = mutableMapOf<String, Any>()
- for (i in 1..meta.columnCount) {
- row[meta.getColumnName(i)] = rs.getObject(i)
- }
- data.add(row)
-
- count++
- }
-
- printer.println(options, data)
-
- return count
- }
-
- /**
- * Helper class to print the results of the query.
- */
- private class QueryPrinter(private val terminal: Terminal) : DefaultPrinter(null) {
- override fun terminal(): Terminal = terminal
-
- override fun highlightAndPrint(
- options: MutableMap<String, Any>,
- exception: Throwable,
- ) {
- if (options.getOrDefault("exception", "stack") == "stack") {
- exception.printStackTrace()
- } else {
- val asb = AttributedStringBuilder()
- asb.append(exception.message, Styles.prntStyle().resolve(".em"))
- asb.toAttributedString().println(terminal())
- }
- }
- }
-}
diff --git a/opendc-trace/opendc-trace-tools/src/main/resources/log4j2.xml b/opendc-trace/opendc-trace-tools/src/main/resources/log4j2.xml
deleted file mode 100644
index 32d81416..00000000
--- a/opendc-trace/opendc-trace-tools/src/main/resources/log4j2.xml
+++ /dev/null
@@ -1,38 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
- ~ Copyright (c) 2021 AtLarge Research
- ~
- ~ Permission is hereby granted, free of charge, to any person obtaining a copy
- ~ of this software and associated documentation files (the "Software"), to deal
- ~ in the Software without restriction, including without limitation the rights
- ~ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- ~ copies of the Software, and to permit persons to whom the Software is
- ~ furnished to do so, subject to the following conditions:
- ~
- ~ The above copyright notice and this permission notice shall be included in all
- ~ copies or substantial portions of the Software.
- ~
- ~ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- ~ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- ~ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- ~ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- ~ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- ~ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- ~ SOFTWARE.
- -->
-
-<Configuration status="WARN">
- <Appenders>
- <Console name="Console" target="SYSTEM_OUT">
- <PatternLayout pattern="%d{HH:mm:ss.SSS} [%highlight{%-5level}] %logger{36} - %msg%n" disableAnsi="false"/>
- </Console>
- </Appenders>
- <Loggers>
- <Logger name="org.opendc" level="info" additivity="false">
- <AppenderRef ref="Console"/>
- </Logger>
- <Root level="error">
- <AppenderRef ref="Console"/>
- </Root>
- </Loggers>
-</Configuration>