diff options
| author | Fabian Mastenbroek <mail.fabianm@gmail.com> | 2022-05-06 17:47:44 +0200 |
|---|---|---|
| committer | Fabian Mastenbroek <mail.fabianm@gmail.com> | 2022-05-06 17:47:44 +0200 |
| commit | a9657e4fa3b15e2c1c11884b5a250b0861bcc21d (patch) | |
| tree | 6b25de3d7a1def150ab4977a45723c52167e7211 /opendc-compute/opendc-compute-workload | |
| parent | 48da4538707cd074969287724ca6f02823f2ff5a (diff) | |
| parent | 8e3905273c7a3f2df4df5d5840e4088d99b0dffb (diff) | |
merge: Expose metrics directly to user (#80)
This pull request adds the ability to access the metrics of resources modeled
by the OpenDC Compute, Workflow, FaaS, and TensorFlow services directly from
their corresponding interfaces. Previously, users would have to interact with
OpenTelemetry to obtain these values, which is complex and provides
significant overhead.
With this pull request, users can access the metrics of all cloud resources
modeled by OpenDC via methods such as `getSchedulerStats()`, etc.
** Breaking Changes **
- `ComputeService.hostCount` removed in favour of `ComputeService.hosts.size`
Diffstat (limited to 'opendc-compute/opendc-compute-workload')
| -rw-r--r-- | opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/ComputeServiceHelper.kt | 21 | ||||
| -rw-r--r-- | opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetComputeMonitor.kt (renamed from opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetComputeMetricExporter.kt) | 8 |
2 files changed, 19 insertions, 10 deletions
diff --git a/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/ComputeServiceHelper.kt b/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/ComputeServiceHelper.kt index 4b0b343f..21cfdad2 100644 --- a/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/ComputeServiceHelper.kt +++ b/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/ComputeServiceHelper.kt @@ -26,6 +26,7 @@ import kotlinx.coroutines.coroutineScope import kotlinx.coroutines.delay import kotlinx.coroutines.launch import kotlinx.coroutines.yield +import org.opendc.compute.api.Server import org.opendc.compute.service.ComputeService import org.opendc.compute.service.scheduler.ComputeScheduler import org.opendc.compute.simulator.SimHost @@ -81,9 +82,19 @@ public class ComputeServiceHelper( } /** - * Converge a simulation of the [ComputeService] by replaying the workload trace given by [trace]. + * Run a simulation of the [ComputeService] by replaying the workload trace given by [trace]. + * + * @param trace The trace to simulate. + * @param seed The seed for the simulation. + * @param servers A list to which the created servers is added. + * @param submitImmediately A flag to indicate that the servers are scheduled immediately (so not at their start time). */ - public suspend fun run(trace: List<VirtualMachine>, seed: Long, submitImmediately: Boolean = false) { + public suspend fun run( + trace: List<VirtualMachine>, + seed: Long, + servers: MutableList<Server>? = null, + submitImmediately: Boolean = false + ) { val random = Random(seed) val injector = failureModel?.createInjector(context, clock, service, random) val client = service.newClient() @@ -129,12 +140,14 @@ public class ComputeServiceHelper( meta = mapOf("workload" to workload) ) + servers?.add(server) + // Wait for the server reach its end time val endTime = entry.stopTime.toEpochMilli() delay(endTime + workloadOffset - clock.millis() + 5 * 60 * 1000) - // Delete the server after reaching the end-time of the virtual machine - server.delete() + // Stop the server after reaching the end-time of the virtual machine + server.stop() } } } diff --git a/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetComputeMetricExporter.kt b/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetComputeMonitor.kt index a46885f4..6c515118 100644 --- a/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetComputeMetricExporter.kt +++ b/opendc-compute/opendc-compute-workload/src/main/kotlin/org/opendc/compute/workload/export/parquet/ParquetComputeMonitor.kt @@ -22,8 +22,6 @@ package org.opendc.compute.workload.export.parquet -import io.opentelemetry.sdk.common.CompletableResultCode -import org.opendc.telemetry.compute.ComputeMetricExporter import org.opendc.telemetry.compute.ComputeMonitor import org.opendc.telemetry.compute.table.HostTableReader import org.opendc.telemetry.compute.table.ServerTableReader @@ -33,7 +31,7 @@ import java.io.File /** * A [ComputeMonitor] that logs the events to a Parquet file. */ -public class ParquetComputeMetricExporter(base: File, partition: String, bufferSize: Int) : ComputeMetricExporter() { +public class ParquetComputeMonitor(base: File, partition: String, bufferSize: Int) : ComputeMonitor, AutoCloseable { private val serverWriter = ParquetServerDataWriter( File(base, "server/$partition/data.parquet").also { it.parentFile.mkdirs() }, bufferSize @@ -61,11 +59,9 @@ public class ParquetComputeMetricExporter(base: File, partition: String, bufferS serviceWriter.write(reader) } - override fun shutdown(): CompletableResultCode { + override fun close() { hostWriter.close() serviceWriter.close() serverWriter.close() - - return CompletableResultCode.ofSuccess() } } |
