From 6f12a5e3bc59f4ca386cf8a6b231c1464ea10169 Mon Sep 17 00:00:00 2001 From: Fabian Mastenbroek Date: Thu, 7 May 2020 21:26:30 +0200 Subject: feat: Add support for more sophisticated logging --- opendc/opendc-compute/build.gradle.kts | 1 + .../virt/service/SimpleVirtProvisioningService.kt | 13 +++--- opendc/opendc-experiments-sc20/build.gradle.kts | 6 ++- .../opendc/experiments/sc20/Sc20Experiment.kt | 31 +++++++++------ .../opendc/experiments/sc20/Sc20ParquetReporter.kt | 5 ++- .../experiments/sc20/Sc20ParquetTraceReader.kt | 5 ++- .../src/main/resources/log4j2.xml | 46 ++++++++++++++++++++++ 7 files changed, 86 insertions(+), 21 deletions(-) create mode 100644 opendc/opendc-experiments-sc20/src/main/resources/log4j2.xml diff --git a/opendc/opendc-compute/build.gradle.kts b/opendc/opendc-compute/build.gradle.kts index 09c904f2..7d43b064 100644 --- a/opendc/opendc-compute/build.gradle.kts +++ b/opendc/opendc-compute/build.gradle.kts @@ -33,6 +33,7 @@ dependencies { implementation(kotlin("stdlib")) api(project(":odcsim:odcsim-api")) api(project(":opendc:opendc-core")) + implementation("io.github.microutils:kotlin-logging:1.7.9") testRuntimeOnly(project(":odcsim:odcsim-engine-omega")) testImplementation("org.junit.jupiter:junit-jupiter-api:${Library.JUNIT_JUPITER}") diff --git a/opendc/opendc-compute/src/main/kotlin/com/atlarge/opendc/compute/virt/service/SimpleVirtProvisioningService.kt b/opendc/opendc-compute/src/main/kotlin/com/atlarge/opendc/compute/virt/service/SimpleVirtProvisioningService.kt index 2185b372..3603ae69 100644 --- a/opendc/opendc-compute/src/main/kotlin/com/atlarge/opendc/compute/virt/service/SimpleVirtProvisioningService.kt +++ b/opendc/opendc-compute/src/main/kotlin/com/atlarge/opendc/compute/virt/service/SimpleVirtProvisioningService.kt @@ -24,10 +24,13 @@ import kotlinx.coroutines.flow.onEach import kotlinx.coroutines.launch import kotlinx.coroutines.suspendCancellableCoroutine import kotlinx.coroutines.withContext +import mu.KotlinLogging import kotlin.coroutines.Continuation import kotlin.coroutines.resume import kotlin.math.max +private val logger = KotlinLogging.logger {} + @OptIn(ExperimentalCoroutinesApi::class) class SimpleVirtProvisioningService( public override val allocationPolicy: AllocationPolicy, @@ -141,7 +144,7 @@ class SimpleVirtProvisioningService( unscheduledVms++ incomingImages -= imageInstance - println("[${clock.millis()}] CANNOT SPAWN ${imageInstance.image}") + logger.warn("Failed to spawn ${imageInstance.image}: does not fit [${clock.millis()}]") continue } else { break @@ -149,7 +152,7 @@ class SimpleVirtProvisioningService( } try { - println("[${clock.millis()}] SPAWN ${imageInstance.image} on ${selectedHv.server.uid} ${selectedHv.server.name} ${selectedHv.server.flavor}") + logger.info { "Spawning ${imageInstance.image} on ${selectedHv.server.uid} ${selectedHv.server.name} ${selectedHv.server.flavor}" } incomingImages -= imageInstance // Speculatively update the hypervisor view information to prevent other images in the queue from @@ -174,7 +177,7 @@ class SimpleVirtProvisioningService( when (event) { is ServerEvent.StateChanged -> { if (event.server.state == ServerState.SHUTOFF) { - println("[${clock.millis()}] FINISH ${event.server.uid} ${event.server.name} ${event.server.flavor}") + logger.info { "Server ${event.server.uid} ${event.server.name} ${event.server.flavor} finished." } runningVms-- finishedVms++ @@ -191,13 +194,13 @@ class SimpleVirtProvisioningService( } .launchIn(this) } catch (e: InsufficientMemoryOnServerException) { - println("Unable to deploy image due to insufficient memory") + logger.error("Failed to deploy VM", e) selectedHv.numberOfActiveServers-- selectedHv.provisionedCores -= imageInstance.flavor.cpuCount selectedHv.availableMemory += requiredMemory } catch (e: Throwable) { - e.printStackTrace() + logger.error("Failed to deploy VM", e) } } } diff --git a/opendc/opendc-experiments-sc20/build.gradle.kts b/opendc/opendc-experiments-sc20/build.gradle.kts index ccfa3038..76ec7cc4 100644 --- a/opendc/opendc-experiments-sc20/build.gradle.kts +++ b/opendc/opendc-experiments-sc20/build.gradle.kts @@ -40,12 +40,14 @@ dependencies { implementation(project(":opendc:opendc-format")) implementation(kotlin("stdlib")) implementation("com.xenomachina:kotlin-argparser:2.0.7") - api("com.fasterxml.jackson.module:jackson-module-kotlin:2.9.8") + implementation("org.slf4j:slf4j-api:${Library.SLF4J}") + implementation("io.github.microutils:kotlin-logging:1.7.9") implementation("org.apache.parquet:parquet-avro:1.11.0") implementation("org.apache.hadoop:hadoop-client:3.2.1") { exclude(group = "org.slf4j", module = "slf4j-log4j12") + exclude(group = "log4j") } - runtimeOnly("org.slf4j:slf4j-simple:${Library.SLF4J}") + runtimeOnly("org.apache.logging.log4j:log4j-slf4j-impl:2.13.1") runtimeOnly(project(":odcsim:odcsim-engine-omega")) testImplementation("org.junit.jupiter:junit-jupiter-api:${Library.JUNIT_JUPITER}") diff --git a/opendc/opendc-experiments-sc20/src/main/kotlin/com/atlarge/opendc/experiments/sc20/Sc20Experiment.kt b/opendc/opendc-experiments-sc20/src/main/kotlin/com/atlarge/opendc/experiments/sc20/Sc20Experiment.kt index c74189c2..2029d3e7 100644 --- a/opendc/opendc-experiments-sc20/src/main/kotlin/com/atlarge/opendc/experiments/sc20/Sc20Experiment.kt +++ b/opendc/opendc-experiments-sc20/src/main/kotlin/com/atlarge/opendc/experiments/sc20/Sc20Experiment.kt @@ -65,6 +65,7 @@ import kotlinx.coroutines.flow.onEach import kotlinx.coroutines.launch import kotlinx.coroutines.runBlocking import kotlinx.coroutines.withContext +import mu.KotlinLogging import java.io.File import java.io.FileReader import java.util.ServiceLoader @@ -73,6 +74,8 @@ import kotlin.math.ln import kotlin.math.max import kotlin.random.Random +private val logger = KotlinLogging.logger {} + class ExperimentParameters(parser: ArgParser) { val traceDirectory by parser.storing("path to the trace directory") val environmentFile by parser.storing("path to the environment file") @@ -244,12 +247,12 @@ suspend fun processTrace(reader: TraceReader, scheduler: SimpleVirtP // Check if VM in topology val clusterName = vmPlacements[vmId] if (clusterName == null) { - println("Could not find placement data in VM placement file for VM $vmId") + logger.warn { "Could not find placement data in VM placement file for VM $vmId" } continue } val machineInCluster = hypervisors.ceiling(clusterName)?.contains(clusterName) ?: false if (machineInCluster) { - println("Ignored VM") + logger.info { "Ignored VM $vmId" } continue } } @@ -290,13 +293,13 @@ suspend fun processTrace(reader: TraceReader, scheduler: SimpleVirtP @OptIn(ExperimentalCoroutinesApi::class) fun main(args: Array) { val cli = ArgParser(args).parseInto(::ExperimentParameters) - println("trace-directory: ${cli.traceDirectory}") - println("environment-file: ${cli.environmentFile}") - println("performance-interference-file: ${cli.performanceInterferenceFile}") - println("selected-vms-file: ${cli.selectedVmsFile}") - println("seed: ${cli.seed}") - println("failures: ${cli.failures}") - println("allocation-policy: ${cli.allocationPolicy}") + logger.info("trace-directory: ${cli.traceDirectory}") + logger.info("environment-file: ${cli.environmentFile}") + logger.info("performance-interference-file: ${cli.performanceInterferenceFile}") + logger.info("selected-vms-file: ${cli.selectedVmsFile}") + logger.info("seed: ${cli.seed}") + logger.info("failures: ${cli.failures}") + logger.info("allocation-policy: ${cli.allocationPolicy}") val start = System.currentTimeMillis() val reporter: Sc20Reporter = Sc20ParquetReporter(cli.outputFile) @@ -349,7 +352,7 @@ fun main(args: Array) { val (bareMetalProvisioner, scheduler) = createProvisioner(root, environmentReader, allocationPolicy) val failureDomain = if (cli.failures) { - println("ENABLING failures") + logger.info("ENABLING failures") createFailureDomain(cli.seed, cli.failureInterval, bareMetalProvisioner, chan) } else { null @@ -358,11 +361,15 @@ fun main(args: Array) { attachMonitor(scheduler, reporter) processTrace(traceReader, scheduler, chan, reporter, vmPlacements) - println("Finish SUBMIT=${scheduler.submittedVms} FAIL=${scheduler.unscheduledVms} QUEUE=${scheduler.queuedVms} RUNNING=${scheduler.runningVms} FINISH=${scheduler.finishedVms}") + logger.debug("SUBMIT=${scheduler.submittedVms}") + logger.debug("FAIL=${scheduler.unscheduledVms}") + logger.debug("QUEUED=${scheduler.queuedVms}") + logger.debug("RUNNING=${scheduler.runningVms}") + logger.debug("FINISHED=${scheduler.finishedVms}") failureDomain?.cancel() scheduler.terminate() - println("[${simulationContext.clock.millis()}] DONE ${System.currentTimeMillis() - start} milliseconds") + logger.info("Simulation took ${System.currentTimeMillis() - start} milliseconds") } runBlocking { diff --git a/opendc/opendc-experiments-sc20/src/main/kotlin/com/atlarge/opendc/experiments/sc20/Sc20ParquetReporter.kt b/opendc/opendc-experiments-sc20/src/main/kotlin/com/atlarge/opendc/experiments/sc20/Sc20ParquetReporter.kt index 2b653b69..fc3e98ae 100644 --- a/opendc/opendc-experiments-sc20/src/main/kotlin/com/atlarge/opendc/experiments/sc20/Sc20ParquetReporter.kt +++ b/opendc/opendc-experiments-sc20/src/main/kotlin/com/atlarge/opendc/experiments/sc20/Sc20ParquetReporter.kt @@ -6,6 +6,7 @@ import com.atlarge.opendc.compute.core.ServerState import com.atlarge.opendc.compute.metal.driver.BareMetalDriver import com.atlarge.opendc.compute.virt.driver.VirtDriver import kotlinx.coroutines.flow.first +import mu.KotlinLogging import org.apache.avro.SchemaBuilder import org.apache.avro.generic.GenericData import org.apache.hadoop.fs.Path @@ -14,6 +15,8 @@ import org.apache.parquet.hadoop.metadata.CompressionCodecName import java.util.concurrent.ArrayBlockingQueue import kotlin.concurrent.thread +private val logger = KotlinLogging.logger {} + class Sc20ParquetReporter( destination: String ) : Sc20Reporter { @@ -91,7 +94,7 @@ class Sc20ParquetReporter( ) } - println("[${simulationContext.clock.millis()}] HOST ${server.uid} ${server.state}") + logger.info("Host ${server.uid} changed state ${server.state} [${simulationContext.clock.millis()}]") lastServerStates[server] = Pair(server.state, simulationContext.clock.millis()) } diff --git a/opendc/opendc-experiments-sc20/src/main/kotlin/com/atlarge/opendc/experiments/sc20/Sc20ParquetTraceReader.kt b/opendc/opendc-experiments-sc20/src/main/kotlin/com/atlarge/opendc/experiments/sc20/Sc20ParquetTraceReader.kt index 0a7718e9..8ae1693c 100644 --- a/opendc/opendc-experiments-sc20/src/main/kotlin/com/atlarge/opendc/experiments/sc20/Sc20ParquetTraceReader.kt +++ b/opendc/opendc-experiments-sc20/src/main/kotlin/com/atlarge/opendc/experiments/sc20/Sc20ParquetTraceReader.kt @@ -32,6 +32,7 @@ import com.atlarge.opendc.compute.core.workload.VmWorkload import com.atlarge.opendc.core.User import com.atlarge.opendc.format.trace.TraceEntry import com.atlarge.opendc.format.trace.TraceReader +import mu.KotlinLogging import org.apache.avro.generic.GenericData import org.apache.hadoop.fs.Path import org.apache.parquet.avro.AvroParquetReader @@ -49,6 +50,8 @@ import java.util.concurrent.ArrayBlockingQueue import kotlin.concurrent.thread import kotlin.random.Random +private val logger = KotlinLogging.logger {} + /** * A [TraceReader] for the internal VM workload trace format. * @@ -190,7 +193,7 @@ class Sc20ParquetTraceReader( assert(uid !in takenIds) takenIds += uid - println(id) + logger.info("Processing VM $id") val internalBuffer = mutableListOf() val externalBuffer = mutableListOf() diff --git a/opendc/opendc-experiments-sc20/src/main/resources/log4j2.xml b/opendc/opendc-experiments-sc20/src/main/resources/log4j2.xml new file mode 100644 index 00000000..77a15e55 --- /dev/null +++ b/opendc/opendc-experiments-sc20/src/main/resources/log4j2.xml @@ -0,0 +1,46 @@ + + + + + + + + + + + + + + + + + + + + + + + + -- cgit v1.2.3