diff options
| author | Fabian Mastenbroek <mail.fabianm@gmail.com> | 2022-09-30 20:33:37 +0200 |
|---|---|---|
| committer | Fabian Mastenbroek <mail.fabianm@gmail.com> | 2022-10-03 20:47:11 +0200 |
| commit | ab0ae4779a674dd07d85ded4a812332d93888bc1 (patch) | |
| tree | 890ee7c52dc47e672ccfcc6060e071e9e867fdbf /opendc-experiments/opendc-experiments-capelin/src/main | |
| parent | 4010d0cfb49bb8a0ffdb2c3ac26fc0c8417a0bbf (diff) | |
refactor(exp/capelin): Use experiment base for Capelin experiments
This change updates the Capelin experiments to use the new
`opendc-experiments-base` module for setting up the experimental
environment and simulate the workloads.
Diffstat (limited to 'opendc-experiments/opendc-experiments-capelin/src/main')
| -rw-r--r-- | opendc-experiments/opendc-experiments-capelin/src/main/kotlin/org/opendc/experiments/capelin/CapelinRunner.kt | 78 |
1 files changed, 33 insertions, 45 deletions
diff --git a/opendc-experiments/opendc-experiments-capelin/src/main/kotlin/org/opendc/experiments/capelin/CapelinRunner.kt b/opendc-experiments/opendc-experiments-capelin/src/main/kotlin/org/opendc/experiments/capelin/CapelinRunner.kt index dbb5ced3..6bd470f3 100644 --- a/opendc-experiments/opendc-experiments-capelin/src/main/kotlin/org/opendc/experiments/capelin/CapelinRunner.kt +++ b/opendc-experiments/opendc-experiments-capelin/src/main/kotlin/org/opendc/experiments/capelin/CapelinRunner.kt @@ -22,15 +22,16 @@ package org.opendc.experiments.capelin -import org.opendc.compute.workload.ComputeServiceHelper +import org.opendc.compute.service.ComputeService import org.opendc.compute.workload.ComputeWorkloadLoader import org.opendc.compute.workload.createComputeScheduler import org.opendc.compute.workload.export.parquet.ParquetComputeMonitor import org.opendc.compute.workload.grid5000 -import org.opendc.compute.workload.telemetry.ComputeMetricReader -import org.opendc.compute.workload.topology.apply +import org.opendc.compute.workload.replay import org.opendc.experiments.capelin.model.Scenario import org.opendc.experiments.capelin.topology.clusterTopology +import org.opendc.experiments.compute.* +import org.opendc.experiments.provisioner.Provisioner import org.opendc.simulator.core.runBlockingSimulation import java.io.File import java.time.Duration @@ -58,54 +59,41 @@ public class CapelinRunner( * Run a single [scenario] with the specified seed. */ fun runScenario(scenario: Scenario, seed: Long) = runBlockingSimulation { - val seeder = Random(seed) - - val operationalPhenomena = scenario.operationalPhenomena - val computeScheduler = createComputeScheduler(scenario.allocationPolicy, seeder) - val failureModel = - if (operationalPhenomena.failureFrequency > 0) - grid5000(Duration.ofSeconds((operationalPhenomena.failureFrequency * 60).roundToLong())) - else - null - val vms = scenario.workload.source.resolve(workloadLoader, seeder) - val runner = ComputeServiceHelper( - coroutineContext, - clock, - computeScheduler, - seed, - ) - + val serviceDomain = "compute.opendc.org" val topology = clusterTopology(File(envPath, "${scenario.topology.name}.txt")) - val partitions = scenario.partitions + ("seed" to seed.toString()) - val partition = partitions.map { (k, v) -> "$k=$v" }.joinToString("/") - val exporter = if (outputPath != null) { - ComputeMetricReader( - this, - clock, - runner.service, - ParquetComputeMonitor( - outputPath, - partition, - bufferSize = 4096 - ), - exportInterval = Duration.ofMinutes(5) + Provisioner(coroutineContext, clock, seed).use { provisioner -> + provisioner.runSteps( + setupComputeService(serviceDomain, { createComputeScheduler(scenario.allocationPolicy, Random(it.seeder.nextLong())) }), + setupHosts(serviceDomain, topology.resolve(), optimize = true) ) - } else { - null - } - try { - // Instantiate the desired topology - runner.apply(topology, optimize = true) + if (outputPath != null) { + val partitions = scenario.partitions + ("seed" to seed.toString()) + val partition = partitions.map { (k, v) -> "$k=$v" }.joinToString("/") + + provisioner.runStep( + registerComputeMonitor( + serviceDomain, + ParquetComputeMonitor( + outputPath, + partition, + bufferSize = 4096 + ) + ) + ) + } - // Run the workload trace - runner.run(vms, failureModel = failureModel, interference = operationalPhenomena.hasInterference) + val service = provisioner.registry.resolve(serviceDomain, ComputeService::class.java)!! + val vms = scenario.workload.source.resolve(workloadLoader, Random(seed)) + val operationalPhenomena = scenario.operationalPhenomena + val failureModel = + if (operationalPhenomena.failureFrequency > 0) + grid5000(Duration.ofSeconds((operationalPhenomena.failureFrequency * 60).roundToLong())) + else + null - // Stop the metric collection - exporter?.close() - } finally { - runner.close() + service.replay(clock, vms, seed, failureModel = failureModel, interference = operationalPhenomena.hasInterference) } } } |
