From 3363df4c72a064e590ca98f8e01832cfa4e15a3f Mon Sep 17 00:00:00 2001 From: Dante Niewenhuis Date: Tue, 27 Aug 2024 13:48:46 +0200 Subject: Renamed input files and internally server is changed to task (#246) * Updated SimTrace to use a single ArrayDeque instead of three separate lists for deadline, cpuUsage, and coreCount * Renamed input files to tasks.parquet and fragments.parquet. Renamed server to task. OpenDC nows exports tasks.parquet instead of server.parquet --- .../compute/telemetry/ComputeMetricReader.kt | 92 ++++++------- .../org/opendc/compute/telemetry/ComputeMonitor.kt | 4 +- .../export/parquet/ComputeExportConfig.kt | 32 ++--- .../export/parquet/DfltServerExportColumns.kt | 153 --------------------- .../export/parquet/DfltServiceExportColumns.kt | 12 +- .../export/parquet/DfltTaskExportColumns.kt | 153 +++++++++++++++++++++ .../export/parquet/ParquetComputeMonitor.kt | 22 +-- .../compute/telemetry/export/parquet/README.md | 16 +-- .../opendc/compute/telemetry/table/ServerInfo.kt | 37 ----- .../compute/telemetry/table/ServerTableReader.kt | 109 --------------- .../opendc/compute/telemetry/table/ServiceData.kt | 12 +- .../compute/telemetry/table/ServiceTableReader.kt | 12 +- .../org/opendc/compute/telemetry/table/TaskInfo.kt | 37 +++++ .../compute/telemetry/table/TaskTableReader.kt | 109 +++++++++++++++ 14 files changed, 400 insertions(+), 400 deletions(-) delete mode 100644 opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/export/parquet/DfltServerExportColumns.kt create mode 100644 opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/export/parquet/DfltTaskExportColumns.kt delete mode 100644 opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/table/ServerInfo.kt delete mode 100644 opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/table/ServerTableReader.kt create mode 100644 opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/table/TaskInfo.kt create mode 100644 opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/table/TaskTableReader.kt (limited to 'opendc-compute/opendc-compute-telemetry/src') diff --git a/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/ComputeMetricReader.kt b/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/ComputeMetricReader.kt index 0b11b57d..5bd237fd 100644 --- a/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/ComputeMetricReader.kt +++ b/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/ComputeMetricReader.kt @@ -29,15 +29,15 @@ import kotlinx.coroutines.launch import mu.KotlinLogging import org.opendc.common.Dispatcher import org.opendc.common.asCoroutineDispatcher -import org.opendc.compute.api.Server +import org.opendc.compute.api.Task import org.opendc.compute.carbon.CarbonTrace import org.opendc.compute.service.ComputeService import org.opendc.compute.service.driver.Host import org.opendc.compute.telemetry.table.HostInfo import org.opendc.compute.telemetry.table.HostTableReader -import org.opendc.compute.telemetry.table.ServerInfo -import org.opendc.compute.telemetry.table.ServerTableReader import org.opendc.compute.telemetry.table.ServiceTableReader +import org.opendc.compute.telemetry.table.TaskInfo +import org.opendc.compute.telemetry.table.TaskTableReader import java.time.Duration import java.time.Instant @@ -73,9 +73,9 @@ public class ComputeMetricReader( private val hostTableReaders = mutableMapOf() /** - * Mapping from [Server] instances to [ServerTableReaderImpl] + * Mapping from [Task] instances to [TaskTableReaderImpl] */ - private val serverTableReaders = mutableMapOf() + private val taskTableReaders = mutableMapOf() /** * The background job that is responsible for collecting the metrics every cycle. @@ -109,8 +109,8 @@ public class ComputeMetricReader( reader.reset() } - for (server in this.service.servers) { - val reader = this.serverTableReaders.computeIfAbsent(server) { ServerTableReaderImpl(service, it, startTime) } + for (task in this.service.tasks) { + val reader = this.taskTableReaders.computeIfAbsent(task) { TaskTableReaderImpl(service, it, startTime) } reader.record(now) this.monitor.record(reader.copy()) reader.reset() @@ -147,9 +147,9 @@ public class ComputeMetricReader( _hostsUp = table.hostsUp _hostsDown = table.hostsDown - _serversTotal = table.serversTotal - _serversPending = table.serversPending - _serversActive = table.serversActive + _tasksTotal = table.tasksTotal + _tasksPending = table.tasksPending + _tasksActive = table.tasksActive _attemptsSuccess = table.attemptsSuccess _attemptsFailure = table.attemptsFailure _attemptsError = table.attemptsError @@ -171,17 +171,17 @@ public class ComputeMetricReader( get() = _hostsDown private var _hostsDown = 0 - override val serversTotal: Int - get() = _serversTotal - private var _serversTotal = 0 + override val tasksTotal: Int + get() = _tasksTotal + private var _tasksTotal = 0 - override val serversPending: Int - get() = _serversPending - private var _serversPending = 0 + override val tasksPending: Int + get() = _tasksPending + private var _tasksPending = 0 - override val serversActive: Int - get() = _serversActive - private var _serversActive = 0 + override val tasksActive: Int + get() = _tasksActive + private var _tasksActive = 0 override val attemptsSuccess: Int get() = _attemptsSuccess @@ -205,9 +205,9 @@ public class ComputeMetricReader( val stats = service.getSchedulerStats() _hostsUp = stats.hostsAvailable _hostsDown = stats.hostsUnavailable - _serversTotal = stats.serversTotal - _serversPending = stats.serversPending - _serversActive = stats.serversActive + _tasksTotal = stats.tasksTotal + _tasksPending = stats.tasksPending + _tasksActive = stats.tasksActive _attemptsSuccess = stats.attemptsSuccess.toInt() _attemptsFailure = stats.attemptsFailure.toInt() _attemptsError = stats.attemptsError.toInt() @@ -418,21 +418,21 @@ public class ComputeMetricReader( } /** - * An aggregator for server metrics before they are reported. + * An aggregator for task metrics before they are reported. */ - private class ServerTableReaderImpl( + private class TaskTableReaderImpl( private val service: ComputeService, - server: Server, + task: Task, private val startTime: Duration = Duration.ofMillis(0), - ) : ServerTableReader { - override fun copy(): ServerTableReader { - val newServerTable = ServerTableReaderImpl(service, _server) - newServerTable.setValues(this) + ) : TaskTableReader { + override fun copy(): TaskTableReader { + val newTaskTable = TaskTableReaderImpl(service, _task) + newTaskTable.setValues(this) - return newServerTable + return newTaskTable } - override fun setValues(table: ServerTableReader) { + override fun setValues(table: TaskTableReader) { host = table.host _timestamp = table.timestamp @@ -450,25 +450,25 @@ public class ComputeMetricReader( _bootTimeAbsolute = table.bootTimeAbsolute } - private val _server = server + private val _task = task /** - * The static information about this server. + * The static information about this task. */ - override val server = - ServerInfo( - server.uid.toString(), - server.name, + override val task = + TaskInfo( + task.uid.toString(), + task.name, "vm", "x86", - server.image.uid.toString(), - server.image.name, - server.flavor.coreCount, - server.flavor.memorySize, + task.image.uid.toString(), + task.image.name, + task.flavor.coreCount, + task.flavor.memorySize, ) /** - * The [HostInfo] of the host on which the server is hosted. + * The [HostInfo] of the host on which the task is hosted. */ override var host: HostInfo? = null private var _host: Host? = null @@ -531,14 +531,14 @@ public class ComputeMetricReader( * Record the next cycle. */ fun record(now: Instant) { - val newHost = service.lookupHost(_server) + val newHost = service.lookupHost(_task) if (newHost != null && newHost.uid != _host?.uid) { _host = newHost host = HostInfo(newHost.uid.toString(), newHost.name, "x86", newHost.model.cpuCount, newHost.model.memoryCapacity) } - val cpuStats = _host?.getCpuStats(_server) - val sysStats = _host?.getSystemStats(_server) + val cpuStats = _host?.getCpuStats(_task) + val sysStats = _host?.getSystemStats(_task) _timestamp = now _timestampAbsolute = now + startTime @@ -550,7 +550,7 @@ public class ComputeMetricReader( _cpuLostTime = cpuStats?.lostTime ?: 0 _uptime = sysStats?.uptime?.toMillis() ?: 0 _downtime = sysStats?.downtime?.toMillis() ?: 0 - _provisionTime = _server.launchedAt + _provisionTime = _task.launchedAt _bootTime = sysStats?.bootTime if (sysStats != null) { diff --git a/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/ComputeMonitor.kt b/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/ComputeMonitor.kt index b236a7df..1df058fb 100644 --- a/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/ComputeMonitor.kt +++ b/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/ComputeMonitor.kt @@ -23,8 +23,8 @@ package org.opendc.compute.telemetry import org.opendc.compute.telemetry.table.HostTableReader -import org.opendc.compute.telemetry.table.ServerTableReader import org.opendc.compute.telemetry.table.ServiceTableReader +import org.opendc.compute.telemetry.table.TaskTableReader /** * A monitor that tracks the metrics and events of the OpenDC Compute service. @@ -33,7 +33,7 @@ public interface ComputeMonitor { /** * Record an entry with the specified [reader]. */ - public fun record(reader: ServerTableReader) {} + public fun record(reader: TaskTableReader) {} /** * Record an entry with the specified [reader]. diff --git a/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/export/parquet/ComputeExportConfig.kt b/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/export/parquet/ComputeExportConfig.kt index 02e3e0bb..161c0936 100644 --- a/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/export/parquet/ComputeExportConfig.kt +++ b/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/export/parquet/ComputeExportConfig.kt @@ -36,8 +36,8 @@ import kotlinx.serialization.json.JsonElement import kotlinx.serialization.json.jsonObject import org.opendc.common.logger.logger import org.opendc.compute.telemetry.table.HostTableReader -import org.opendc.compute.telemetry.table.ServerTableReader import org.opendc.compute.telemetry.table.ServiceTableReader +import org.opendc.compute.telemetry.table.TaskTableReader import org.opendc.trace.util.parquet.exporter.ColListSerializer import org.opendc.trace.util.parquet.exporter.ExportColumn import org.opendc.trace.util.parquet.exporter.Exportable @@ -48,22 +48,22 @@ import org.opendc.trace.util.parquet.exporter.columnSerializer * parquet files for compute workloads. * * @param[hostExportColumns] the columns that will be included in the `host.parquet` raw output file. - * @param[serverExportColumns] the columns that will be included in the `server.parquet` raw output file. + * @param[taskExportColumns] the columns that will be included in the `task.parquet` raw output file. * @param[serviceExportColumns] the columns that will be included in the `service.parquet` raw output file. */ @Serializable(with = ComputeExportConfig.Companion.ComputeExportConfigSerializer::class) public data class ComputeExportConfig( public val hostExportColumns: Set>, - public val serverExportColumns: Set>, + public val taskExportColumns: Set>, public val serviceExportColumns: Set>, ) { public constructor( hostExportColumns: Collection>, - serverExportColumns: Collection>, + taskExportColumns: Collection>, serviceExportColumns: Collection>, ) : this( hostExportColumns.toSet() + DfltHostExportColumns.BASE_EXPORT_COLUMNS, - serverExportColumns.toSet() + DfltServerExportColumns.BASE_EXPORT_COLUMNS, + taskExportColumns.toSet() + DfltTaskExportColumns.BASE_EXPORT_COLUMNS, serviceExportColumns.toSet() + DfltServiceExportColumns.BASE_EXPORT_COLUMNS, ) @@ -74,7 +74,7 @@ public data class ComputeExportConfig( """ | === Compute Export Config === | Host columns : ${hostExportColumns.map { it.name }.toString().trim('[', ']')} - | Server columns : ${serverExportColumns.map { it.name }.toString().trim('[', ']')} + | Task columns : ${taskExportColumns.map { it.name }.toString().trim('[', ']')} | Service columns : ${serviceExportColumns.map { it.name }.toString().trim('[', ']')} """.trimIndent() @@ -87,20 +87,20 @@ public data class ComputeExportConfig( */ public fun loadDfltColumns() { DfltHostExportColumns - DfltServerExportColumns + DfltTaskExportColumns DfltServiceExportColumns } /** * Config that includes all columns defined in [DfltHostExportColumns], - * [DfltServerExportColumns], [DfltServiceExportColumns] among all other loaded - * columns for [HostTableReader], [ServerTableReader] and [ServiceTableReader]. + * [DfltTaskExportColumns], [DfltServiceExportColumns] among all other loaded + * columns for [HostTableReader], [TaskTableReader] and [ServiceTableReader]. */ public val ALL_COLUMNS: ComputeExportConfig by lazy { loadDfltColumns() ComputeExportConfig( hostExportColumns = ExportColumn.getAllLoadedColumns(), - serverExportColumns = ExportColumn.getAllLoadedColumns(), + taskExportColumns = ExportColumn.getAllLoadedColumns(), serviceExportColumns = ExportColumn.getAllLoadedColumns(), ) } @@ -118,8 +118,8 @@ public data class ComputeExportConfig( ListSerializer(columnSerializer()).descriptor, ) element( - "serverExportColumns", - ListSerializer(columnSerializer()).descriptor, + "taskExportColumns", + ListSerializer(columnSerializer()).descriptor, ) element( "serviceExportColumns", @@ -139,12 +139,12 @@ public data class ComputeExportConfig( val elem = jsonDec.decodeJsonElement().jsonObject val hostFields: List> = elem["hostExportColumns"].toFieldList() - val serverFields: List> = elem["serverExportColumns"].toFieldList() + val taskFields: List> = elem["taskExportColumns"].toFieldList() val serviceFields: List> = elem["serviceExportColumns"].toFieldList() return ComputeExportConfig( hostExportColumns = hostFields, - serverExportColumns = serverFields, + taskExportColumns = taskFields, serviceExportColumns = serviceFields, ) } @@ -163,8 +163,8 @@ public data class ComputeExportConfig( encodeSerializableElement( descriptor, 1, - ColListSerializer(columnSerializer()), - value.serverExportColumns.toList(), + ColListSerializer(columnSerializer()), + value.taskExportColumns.toList(), ) encodeSerializableElement( descriptor, diff --git a/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/export/parquet/DfltServerExportColumns.kt b/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/export/parquet/DfltServerExportColumns.kt deleted file mode 100644 index 91d6c9bf..00000000 --- a/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/export/parquet/DfltServerExportColumns.kt +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Copyright (c) 2024 AtLarge Research - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -package org.opendc.compute.telemetry.export.parquet - -import org.apache.parquet.io.api.Binary -import org.apache.parquet.schema.LogicalTypeAnnotation -import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.BINARY -import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.DOUBLE -import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.INT32 -import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.INT64 -import org.apache.parquet.schema.Types -import org.opendc.compute.telemetry.table.ServerTableReader -import org.opendc.trace.util.parquet.exporter.ExportColumn - -/** - * This object wraps the [ExportColumn]s to solves ambiguity for field - * names that are included in more than 1 exportable - * - * Additionally, it allows to load all the fields at once by just its symbol, - * so that these columns can be deserialized. Additional fields can be added - * from anywhere, and they are deserializable as long as they are loaded by the jvm. - * - * ```kotlin - * ... - * // Loads the column - * DfltServerExportColumns - * ... - * ``` - */ -public object DfltServerExportColumns { - public val TIMESTAMP: ExportColumn = - ExportColumn( - field = Types.required(INT64).named("timestamp"), - ) { it.timestamp.toEpochMilli() } - - public val TIMESTAMP_ABS: ExportColumn = - ExportColumn( - field = Types.required(INT64).named("timestamp_absolute"), - ) { it.timestampAbsolute.toEpochMilli() } - - public val SERVER_ID: ExportColumn = - ExportColumn( - field = - Types.required(BINARY) - .`as`(LogicalTypeAnnotation.stringType()) - .named("server_id"), - ) { Binary.fromString(it.server.id) } - - public val HOST_ID: ExportColumn = - ExportColumn( - field = - Types.optional(BINARY) - .`as`(LogicalTypeAnnotation.stringType()) - .named("host_id"), - ) { it.host?.id?.let { Binary.fromString(it) } } - - public val SERVER_NAME: ExportColumn = - ExportColumn( - field = - Types.required(BINARY) - .`as`(LogicalTypeAnnotation.stringType()) - .named("server_name"), - ) { Binary.fromString(it.server.name) } - - public val CPU_COUNT: ExportColumn = - ExportColumn( - field = Types.required(INT32).named("cpu_count"), - ) { it.server.cpuCount } - - public val MEM_CAPACITY: ExportColumn = - ExportColumn( - field = Types.required(INT64).named("mem_capacity"), - ) { it.server.memCapacity } - - public val CPU_LIMIT: ExportColumn = - ExportColumn( - field = Types.required(DOUBLE).named("cpu_limit"), - ) { it.cpuLimit } - - public val CPU_TIME_ACTIVE: ExportColumn = - ExportColumn( - field = Types.required(INT64).named("cpu_time_active"), - ) { it.cpuActiveTime } - - public val CPU_TIME_IDLE: ExportColumn = - ExportColumn( - field = Types.required(INT64).named("cpu_time_idle"), - ) { it.cpuIdleTime } - - public val CPU_TIME_STEAL: ExportColumn = - ExportColumn( - field = Types.required(INT64).named("cpu_time_steal"), - ) { it.cpuStealTime } - - public val CPU_TIME_LOST: ExportColumn = - ExportColumn( - field = Types.required(INT64).named("cpu_time_lost"), - ) { it.cpuLostTime } - - public val UP_TIME: ExportColumn = - ExportColumn( - field = Types.required(INT64).named("uptime"), - ) { it.uptime } - - public val DOWN_TIME: ExportColumn = - ExportColumn( - field = Types.required(INT64).named("downtime"), - ) { it.downtime } - - public val PROVISION_TIME: ExportColumn = - ExportColumn( - field = Types.optional(INT64).named("provision_time"), - ) { it.provisionTime?.toEpochMilli() } - - public val BOOT_TIME: ExportColumn = - ExportColumn( - field = Types.optional(INT64).named("boot_time"), - ) { it.bootTime?.toEpochMilli() } - - public val BOOT_TIME_ABS: ExportColumn = - ExportColumn( - field = Types.optional(INT64).named("boot_time_absolute"), - ) { it.bootTimeAbsolute?.toEpochMilli() } - - /** - * The columns that are always included in the output file. - */ - internal val BASE_EXPORT_COLUMNS = - setOf( - TIMESTAMP_ABS, - TIMESTAMP, - ) -} diff --git a/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/export/parquet/DfltServiceExportColumns.kt b/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/export/parquet/DfltServiceExportColumns.kt index 89396545..8038060d 100644 --- a/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/export/parquet/DfltServiceExportColumns.kt +++ b/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/export/parquet/DfltServiceExportColumns.kt @@ -59,15 +59,15 @@ public object DfltServiceExportColumns { field = Types.required(INT32).named("hosts_up"), ) { it.hostsUp } - public val SERVERS_PENDING: ExportColumn = + public val TASKS_PENDING: ExportColumn = ExportColumn( - field = Types.required(INT32).named("servers_pending"), - ) { it.serversPending } + field = Types.required(INT32).named("tasks_pending"), + ) { it.tasksPending } - public val SERVERS_ACTIVE: ExportColumn = + public val TASKS_ACTIVE: ExportColumn = ExportColumn( - field = Types.required(INT32).named("servers_active"), - ) { it.serversActive } + field = Types.required(INT32).named("tasks_active"), + ) { it.tasksActive } public val ATTEMPTS_SUCCESS: ExportColumn = ExportColumn( diff --git a/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/export/parquet/DfltTaskExportColumns.kt b/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/export/parquet/DfltTaskExportColumns.kt new file mode 100644 index 00000000..5bb7dd1f --- /dev/null +++ b/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/export/parquet/DfltTaskExportColumns.kt @@ -0,0 +1,153 @@ +/* + * Copyright (c) 2024 AtLarge Research + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package org.opendc.compute.telemetry.export.parquet + +import org.apache.parquet.io.api.Binary +import org.apache.parquet.schema.LogicalTypeAnnotation +import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.BINARY +import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.DOUBLE +import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.INT32 +import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.INT64 +import org.apache.parquet.schema.Types +import org.opendc.compute.telemetry.table.TaskTableReader +import org.opendc.trace.util.parquet.exporter.ExportColumn + +/** + * This object wraps the [ExportColumn]s to solves ambiguity for field + * names that are included in more than 1 exportable + * + * Additionally, it allows to load all the fields at once by just its symbol, + * so that these columns can be deserialized. Additional fields can be added + * from anywhere, and they are deserializable as long as they are loaded by the jvm. + * + * ```kotlin + * ... + * // Loads the column + * DfltTaskExportColumns + * ... + * ``` + */ +public object DfltTaskExportColumns { + public val TIMESTAMP: ExportColumn = + ExportColumn( + field = Types.required(INT64).named("timestamp"), + ) { it.timestamp.toEpochMilli() } + + public val TIMESTAMP_ABS: ExportColumn = + ExportColumn( + field = Types.required(INT64).named("timestamp_absolute"), + ) { it.timestampAbsolute.toEpochMilli() } + + public val TASK_ID: ExportColumn = + ExportColumn( + field = + Types.required(BINARY) + .`as`(LogicalTypeAnnotation.stringType()) + .named("task_id"), + ) { Binary.fromString(it.task.id) } + + public val HOST_ID: ExportColumn = + ExportColumn( + field = + Types.optional(BINARY) + .`as`(LogicalTypeAnnotation.stringType()) + .named("host_id"), + ) { it.host?.id?.let { Binary.fromString(it) } } + + public val TASK_NAME: ExportColumn = + ExportColumn( + field = + Types.required(BINARY) + .`as`(LogicalTypeAnnotation.stringType()) + .named("task_name"), + ) { Binary.fromString(it.task.name) } + + public val CPU_COUNT: ExportColumn = + ExportColumn( + field = Types.required(INT32).named("cpu_count"), + ) { it.task.cpuCount } + + public val MEM_CAPACITY: ExportColumn = + ExportColumn( + field = Types.required(INT64).named("mem_capacity"), + ) { it.task.memCapacity } + + public val CPU_LIMIT: ExportColumn = + ExportColumn( + field = Types.required(DOUBLE).named("cpu_limit"), + ) { it.cpuLimit } + + public val CPU_TIME_ACTIVE: ExportColumn = + ExportColumn( + field = Types.required(INT64).named("cpu_time_active"), + ) { it.cpuActiveTime } + + public val CPU_TIME_IDLE: ExportColumn = + ExportColumn( + field = Types.required(INT64).named("cpu_time_idle"), + ) { it.cpuIdleTime } + + public val CPU_TIME_STEAL: ExportColumn = + ExportColumn( + field = Types.required(INT64).named("cpu_time_steal"), + ) { it.cpuStealTime } + + public val CPU_TIME_LOST: ExportColumn = + ExportColumn( + field = Types.required(INT64).named("cpu_time_lost"), + ) { it.cpuLostTime } + + public val UP_TIME: ExportColumn = + ExportColumn( + field = Types.required(INT64).named("uptime"), + ) { it.uptime } + + public val DOWN_TIME: ExportColumn = + ExportColumn( + field = Types.required(INT64).named("downtime"), + ) { it.downtime } + + public val PROVISION_TIME: ExportColumn = + ExportColumn( + field = Types.optional(INT64).named("provision_time"), + ) { it.provisionTime?.toEpochMilli() } + + public val BOOT_TIME: ExportColumn = + ExportColumn( + field = Types.optional(INT64).named("boot_time"), + ) { it.bootTime?.toEpochMilli() } + + public val BOOT_TIME_ABS: ExportColumn = + ExportColumn( + field = Types.optional(INT64).named("boot_time_absolute"), + ) { it.bootTimeAbsolute?.toEpochMilli() } + + /** + * The columns that are always included in the output file. + */ + internal val BASE_EXPORT_COLUMNS = + setOf( + TIMESTAMP_ABS, + TIMESTAMP, + ) +} diff --git a/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/export/parquet/ParquetComputeMonitor.kt b/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/export/parquet/ParquetComputeMonitor.kt index 6bea4cc2..3b7a7c0c 100644 --- a/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/export/parquet/ParquetComputeMonitor.kt +++ b/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/export/parquet/ParquetComputeMonitor.kt @@ -24,8 +24,8 @@ package org.opendc.compute.telemetry.export.parquet import org.opendc.compute.telemetry.ComputeMonitor import org.opendc.compute.telemetry.table.HostTableReader -import org.opendc.compute.telemetry.table.ServerTableReader import org.opendc.compute.telemetry.table.ServiceTableReader +import org.opendc.compute.telemetry.table.TaskTableReader import org.opendc.trace.util.parquet.exporter.ExportColumn import org.opendc.trace.util.parquet.exporter.Exportable import org.opendc.trace.util.parquet.exporter.Exporter @@ -36,15 +36,15 @@ import java.io.File */ public class ParquetComputeMonitor( private val hostExporter: Exporter, - private val serverExporter: Exporter, + private val taskExporter: Exporter, private val serviceExporter: Exporter, ) : ComputeMonitor, AutoCloseable { override fun record(reader: HostTableReader) { hostExporter.write(reader) } - override fun record(reader: ServerTableReader) { - serverExporter.write(reader) + override fun record(reader: TaskTableReader) { + taskExporter.write(reader) } override fun record(reader: ServiceTableReader) { @@ -53,7 +53,7 @@ public class ParquetComputeMonitor( override fun close() { hostExporter.close() - serverExporter.close() + taskExporter.close() serviceExporter.close() } @@ -76,13 +76,13 @@ public class ParquetComputeMonitor( partition = partition, bufferSize = bufferSize, hostExportColumns = computeExportConfig.hostExportColumns, - serverExportColumns = computeExportConfig.serverExportColumns, + taskExportColumns = computeExportConfig.taskExportColumns, serviceExportColumns = computeExportConfig.serviceExportColumns, ) /** * Constructor that loads default [ExportColumn]s defined in - * [DfltHostExportColumns], [DfltServerExportColumns], [DfltServiceExportColumns] + * [DfltHostExportColumns], [DfltTaskExportColumns], [DfltServiceExportColumns] * in case optional parameters are omitted and all fields need to be retrieved. * * @param[base] parent pathname for output file. @@ -94,7 +94,7 @@ public class ParquetComputeMonitor( partition: String, bufferSize: Int, hostExportColumns: Collection>? = null, - serverExportColumns: Collection>? = null, + taskExportColumns: Collection>? = null, serviceExportColumns: Collection>? = null, ): ParquetComputeMonitor { // Loads the fields in case they need to be retrieved if optional params are omitted. @@ -107,10 +107,10 @@ public class ParquetComputeMonitor( columns = hostExportColumns ?: Exportable.getAllLoadedColumns(), bufferSize = bufferSize, ), - serverExporter = + taskExporter = Exporter( - outputFile = File(base, "$partition/server.parquet").also { it.parentFile.mkdirs() }, - columns = serverExportColumns ?: Exportable.getAllLoadedColumns(), + outputFile = File(base, "$partition/task.parquet").also { it.parentFile.mkdirs() }, + columns = taskExportColumns ?: Exportable.getAllLoadedColumns(), bufferSize = bufferSize, ), serviceExporter = diff --git a/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/export/parquet/README.md b/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/export/parquet/README.md index f48bc229..aee63fc9 100644 --- a/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/export/parquet/README.md +++ b/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/export/parquet/README.md @@ -1,8 +1,8 @@ ### Summary -Added output configuration, that can be defined in the scenario `.json` file, that allows to select which columns are to be included in the raw oputput files `host.parquet`, `server.parquet` and `service.parquet`. +Added output configuration, that can be defined in the scenario `.json` file, that allows to select which columns are to be included in the raw oputput files `host.parquet`, `task.parquet` and `service.parquet`. ### Columns -The 'default' columns are defined in `DfltHostExportcolumns`, `DfltServerExportColumns` and `DfltServiceExportColumns`. Any number of additional columns can be definied anywhere (`ExportColumn`) and it is going to be deserializable as long as it is loaded by the jvm. +The 'default' columns are defined in `DfltHostExportcolumns`, `DfltTaskExportColumns` and `DfltServiceExportColumns`. Any number of additional columns can be definied anywhere (`ExportColumn`) and it is going to be deserializable as long as it is loaded by the jvm. ### Deserialization Each `ExportColumn` has a `Regex`, used for deserialization. If no custom regex is provided, the default one is used. The default regex matches the column name in case-insensitive manner, either with `_` as in the name or with ` ` (blank space). @@ -21,7 +21,7 @@ Each `ExportColumn` has a `Regex`, used for deserialization. If no custom regex "type": "object", "properties": { "hostExportColumns": { "type": "array" }, - "serverExportColumns": { "type": "array" } , + "taskExportColumns": { "type": "array" } , "serviceExportColumns": { "type": "array" } , "required": [ /* NONE REQUIRED */ ] } @@ -49,8 +49,8 @@ Each `ExportColumn` has a `Regex`, used for deserialization. If no custom regex ... "computeExportConfig": { "hostExportColumns": ["timestamp", "timestamp_absolute", "invalid-entry1", "guests_invalid"], - "serverExportColumns": ["invalid-entry2"], - "serviceExportColumns": ["timestamp", "servers_active", "servers_pending"] + "taskExportColumns": ["invalid-entry2"], + "serviceExportColumns": ["timestamp", "tasks_active", "tasks_pending"] }, ... ``` @@ -59,12 +59,12 @@ Each `ExportColumn` has a `Regex`, used for deserialization. If no custom regex // console output 10:51:56.561 [ERROR] ColListSerializer - no match found for column "invalid-entry1", ignoring... 10:51:56.563 [ERROR] ColListSerializer - no match found for column "invalid-entry2", ignoring... -10:51:56.564 [WARN] ComputeExportConfig - deserialized list of export columns for exportable ServerTableReader produced empty list, falling back to all loaded columns +10:51:56.564 [WARN] ComputeExportConfig - deserialized list of export columns for exportable TaskTableReader produced empty list, falling back to all loaded columns 10:51:56.584 [INFO] ScenariosSpec - | === Compute Export Config === | Host columns : timestamp, timestamp_absolute, guests_invalid -| Server columns : timestamp, timestamp_absolute, server_id, server_name, cpu_count, mem_capacity, cpu_limit, cpu_time_active, cpu_time_idle, cpu_time_steal, cpu_time_lost, uptime, downtime, provision_time, boot_time, boot_time_absolute -| Service columns : timestamp, servers_active, servers_pending +| Task columns : timestamp, timestamp_absolute, task_id, task_name, cpu_count, mem_capacity, cpu_limit, cpu_time_active, cpu_time_idle, cpu_time_steal, cpu_time_lost, uptime, downtime, provision_time, boot_time, boot_time_absolute +| Service columns : timestamp, tasks_active, tasks_pending ``` diff --git a/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/table/ServerInfo.kt b/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/table/ServerInfo.kt deleted file mode 100644 index fb83bf06..00000000 --- a/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/table/ServerInfo.kt +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright (c) 2021 AtLarge Research - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -package org.opendc.compute.telemetry.table - -/** - * Static information about a server exposed to the telemetry service. - */ -public data class ServerInfo( - val id: String, - val name: String, - val type: String, - val arch: String, - val imageId: String, - val imageName: String, - val cpuCount: Int, - val memCapacity: Long, -) diff --git a/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/table/ServerTableReader.kt b/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/table/ServerTableReader.kt deleted file mode 100644 index a1aed778..00000000 --- a/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/table/ServerTableReader.kt +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright (c) 2021 AtLarge Research - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in all - * copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -package org.opendc.compute.telemetry.table - -import org.opendc.compute.telemetry.export.parquet.DfltServerExportColumns -import org.opendc.trace.util.parquet.exporter.Exportable -import java.time.Instant - -/** - * An interface that is used to read a row of a server trace entry. - */ -public interface ServerTableReader : Exportable { - public fun copy(): ServerTableReader - - public fun setValues(table: ServerTableReader) - - /** - * The timestamp of the current entry of the reader relative to the start of the workload. - */ - public val timestamp: Instant - - /** - * The timestamp of the current entry of the reader. - */ - public val timestampAbsolute: Instant - - /** - * The [ServerInfo] of the server to which the row belongs to. - */ - public val server: ServerInfo - - /** - * The [HostInfo] of the host on which the server is hosted or `null` if it has no host. - */ - public val host: HostInfo? - - /** - * The uptime of the host since last time in ms. - */ - public val uptime: Long - - /** - * The downtime of the host since last time in ms. - */ - public val downtime: Long - - /** - * The [Instant] at which the server was enqueued for the scheduler. - */ - public val provisionTime: Instant? - - /** - * The [Instant] at which the server booted relative to the start of the workload. - */ - public val bootTime: Instant? - - /** - * The [Instant] at which the server booted. - */ - public val bootTimeAbsolute: Instant? - - /** - * The capacity of the CPUs of Host on which the server is running (in MHz). - */ - public val cpuLimit: Double - - /** - * The duration (in seconds) that a CPU was active in the server. - */ - public val cpuActiveTime: Long - - /** - * The duration (in seconds) that a CPU was idle in the server. - */ - public val cpuIdleTime: Long - - /** - * The duration (in seconds) that a vCPU wanted to run, but no capacity was available. - */ - public val cpuStealTime: Long - - /** - * The duration (in seconds) of CPU time that was lost due to interference. - */ - public val cpuLostTime: Long -} - -// Loads the default export fields for deserialization whenever this file is loaded. -private val _ignore = DfltServerExportColumns diff --git a/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/table/ServiceData.kt b/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/table/ServiceData.kt index ad4b3d49..7a8ba6a7 100644 --- a/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/table/ServiceData.kt +++ b/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/table/ServiceData.kt @@ -31,9 +31,9 @@ public data class ServiceData( val timestamp: Instant, val hostsUp: Int, val hostsDown: Int, - val serversTotal: Int, - val serversPending: Int, - val serversActive: Int, + val tasksTotal: Int, + val tasksPending: Int, + val tasksActive: Int, val attemptsSuccess: Int, val attemptsFailure: Int, val attemptsError: Int, @@ -47,9 +47,9 @@ public fun ServiceTableReader.toServiceData(): ServiceData { timestamp, hostsUp, hostsDown, - serversTotal, - serversPending, - serversActive, + tasksTotal, + tasksPending, + tasksActive, attemptsSuccess, attemptsFailure, attemptsError, diff --git a/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/table/ServiceTableReader.kt b/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/table/ServiceTableReader.kt index c3a92fc7..23630fb4 100644 --- a/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/table/ServiceTableReader.kt +++ b/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/table/ServiceTableReader.kt @@ -54,19 +54,19 @@ public interface ServiceTableReader : Exportable { public val hostsDown: Int /** - * The number of servers that are registered with the compute service. + * The number of tasks that are registered with the compute service. */ - public val serversTotal: Int + public val tasksTotal: Int /** - * The number of servers that are pending to be scheduled. + * The number of tasks that are pending to be scheduled. */ - public val serversPending: Int + public val tasksPending: Int /** - * The number of servers that are currently active. + * The number of tasks that are currently active. */ - public val serversActive: Int + public val tasksActive: Int /** * The scheduling attempts that were successful. diff --git a/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/table/TaskInfo.kt b/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/table/TaskInfo.kt new file mode 100644 index 00000000..2d1ae91a --- /dev/null +++ b/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/table/TaskInfo.kt @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2021 AtLarge Research + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package org.opendc.compute.telemetry.table + +/** + * Static information about a task exposed to the telemetry service. + */ +public data class TaskInfo( + val id: String, + val name: String, + val type: String, + val arch: String, + val imageId: String, + val imageName: String, + val cpuCount: Int, + val memCapacity: Long, +) diff --git a/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/table/TaskTableReader.kt b/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/table/TaskTableReader.kt new file mode 100644 index 00000000..1e38d5eb --- /dev/null +++ b/opendc-compute/opendc-compute-telemetry/src/main/kotlin/org/opendc/compute/telemetry/table/TaskTableReader.kt @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2021 AtLarge Research + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +package org.opendc.compute.telemetry.table + +import org.opendc.compute.telemetry.export.parquet.DfltTaskExportColumns +import org.opendc.trace.util.parquet.exporter.Exportable +import java.time.Instant + +/** + * An interface that is used to read a row of a task trace entry. + */ +public interface TaskTableReader : Exportable { + public fun copy(): TaskTableReader + + public fun setValues(table: TaskTableReader) + + /** + * The timestamp of the current entry of the reader relative to the start of the workload. + */ + public val timestamp: Instant + + /** + * The timestamp of the current entry of the reader. + */ + public val timestampAbsolute: Instant + + /** + * The [TaskInfo] of the task to which the row belongs to. + */ + public val task: TaskInfo + + /** + * The [HostInfo] of the host on which the task is hosted or `null` if it has no host. + */ + public val host: HostInfo? + + /** + * The uptime of the host since last time in ms. + */ + public val uptime: Long + + /** + * The downtime of the host since last time in ms. + */ + public val downtime: Long + + /** + * The [Instant] at which the task was enqueued for the scheduler. + */ + public val provisionTime: Instant? + + /** + * The [Instant] at which the task booted relative to the start of the workload. + */ + public val bootTime: Instant? + + /** + * The [Instant] at which the task booted. + */ + public val bootTimeAbsolute: Instant? + + /** + * The capacity of the CPUs of Host on which the task is running (in MHz). + */ + public val cpuLimit: Double + + /** + * The duration (in seconds) that a CPU was active in the task. + */ + public val cpuActiveTime: Long + + /** + * The duration (in seconds) that a CPU was idle in the task. + */ + public val cpuIdleTime: Long + + /** + * The duration (in seconds) that a vCPU wanted to run, but no capacity was available. + */ + public val cpuStealTime: Long + + /** + * The duration (in seconds) of CPU time that was lost due to interference. + */ + public val cpuLostTime: Long +} + +// Loads the default export fields for deserialization whenever this file is loaded. +private val _ignore = DfltTaskExportColumns -- cgit v1.2.3