diff options
| author | Fabian Mastenbroek <mail.fabianm@gmail.com> | 2021-03-08 22:19:37 +0100 |
|---|---|---|
| committer | Fabian Mastenbroek <mail.fabianm@gmail.com> | 2021-03-08 22:19:37 +0100 |
| commit | e97774dbf274fcb57b9d173f9d674a2ef1b982af (patch) | |
| tree | 7d98f62a230ca33d32e71e8bed045874f97a9619 /simulator/opendc-runner-web/src/main/kotlin/org | |
| parent | 75751865179c6cd5a05abb4a0641193595f59b45 (diff) | |
compute: Remove use of bare-metal provisioning from compute module
This change removes the usage of bare-metal provisioning from the OpenDC
Compute module. This significantly simplifies the experiment setup.
Diffstat (limited to 'simulator/opendc-runner-web/src/main/kotlin/org')
3 files changed, 29 insertions, 66 deletions
diff --git a/simulator/opendc-runner-web/src/main/kotlin/org/opendc/runner/web/Main.kt b/simulator/opendc-runner-web/src/main/kotlin/org/opendc/runner/web/Main.kt index 482fe754..b9aeecb8 100644 --- a/simulator/opendc-runner-web/src/main/kotlin/org/opendc/runner/web/Main.kt +++ b/simulator/opendc-runner-web/src/main/kotlin/org/opendc/runner/web/Main.kt @@ -46,11 +46,11 @@ import org.opendc.compute.service.scheduler.NumberOfActiveServersAllocationPolic import org.opendc.compute.service.scheduler.ProvisionedCoresAllocationPolicy import org.opendc.compute.service.scheduler.RandomAllocationPolicy import org.opendc.compute.simulator.allocation.* -import org.opendc.experiments.capelin.experiment.attachMonitor -import org.opendc.experiments.capelin.experiment.createFailureDomain -import org.opendc.experiments.capelin.experiment.createProvisioner -import org.opendc.experiments.capelin.experiment.processTrace +import org.opendc.experiments.capelin.attachMonitor +import org.opendc.experiments.capelin.createComputeService +import org.opendc.experiments.capelin.createFailureDomain import org.opendc.experiments.capelin.model.Workload +import org.opendc.experiments.capelin.processTrace import org.opendc.experiments.capelin.trace.Sc20ParquetTraceReader import org.opendc.experiments.capelin.trace.Sc20RawParquetTraceReader import org.opendc.format.trace.sc20.Sc20PerformanceInterferenceReader @@ -247,7 +247,7 @@ public class RunnerCli : CliktCommand(name = "runner") { val tracer = EventTracer(clock) testScope.launch { - val (bareMetalProvisioner, provisioner, scheduler) = createProvisioner( + val scheduler = createComputeService( this, clock, environment, @@ -262,7 +262,7 @@ public class RunnerCli : CliktCommand(name = "runner") { clock, seeder.nextInt(), operational.get("failureFrequency", Number::class.java)?.toDouble() ?: 24.0 * 7, - bareMetalProvisioner, + scheduler, chan ) } else { @@ -287,7 +287,6 @@ public class RunnerCli : CliktCommand(name = "runner") { failureDomain?.cancel() scheduler.close() - provisioner.close() } try { diff --git a/simulator/opendc-runner-web/src/main/kotlin/org/opendc/runner/web/TopologyParser.kt b/simulator/opendc-runner-web/src/main/kotlin/org/opendc/runner/web/TopologyParser.kt index 2f11347d..e7e99a3d 100644 --- a/simulator/opendc-runner-web/src/main/kotlin/org/opendc/runner/web/TopologyParser.kt +++ b/simulator/opendc-runner-web/src/main/kotlin/org/opendc/runner/web/TopologyParser.kt @@ -28,36 +28,24 @@ import com.mongodb.client.model.Aggregates import com.mongodb.client.model.Field import com.mongodb.client.model.Filters import com.mongodb.client.model.Projections -import kotlinx.coroutines.CoroutineScope -import kotlinx.coroutines.launch import org.bson.Document import org.bson.types.ObjectId -import org.opendc.compute.simulator.SimBareMetalDriver import org.opendc.compute.simulator.power.models.LinearPowerModel -import org.opendc.core.Environment -import org.opendc.core.Platform -import org.opendc.core.Zone -import org.opendc.core.services.ServiceRegistry import org.opendc.format.environment.EnvironmentReader -import org.opendc.metal.NODE_CLUSTER -import org.opendc.metal.service.ProvisioningService -import org.opendc.metal.service.SimpleProvisioningService +import org.opendc.format.environment.MachineDef import org.opendc.simulator.compute.SimMachineModel import org.opendc.simulator.compute.model.MemoryUnit import org.opendc.simulator.compute.model.ProcessingNode import org.opendc.simulator.compute.model.ProcessingUnit -import java.time.Clock import java.util.* /** * A helper class that converts the MongoDB topology into an OpenDC environment. */ public class TopologyParser(private val collection: MongoCollection<Document>, private val id: ObjectId) : EnvironmentReader { - /** - * Parse the topology with the specified [id]. - */ - override suspend fun construct(coroutineScope: CoroutineScope, clock: Clock): Environment { - val nodes = mutableListOf<SimBareMetalDriver>() + + public override fun read(): List<MachineDef> { + val nodes = mutableListOf<MachineDef>() val random = Random(0) for (machine in fetchMachines(id)) { @@ -85,36 +73,17 @@ public class TopologyParser(private val collection: MongoCollection<Document>, p val energyConsumptionW = machine.getList("cpus", Document::class.java).sumBy { it.getInteger("energyConsumptionW") }.toDouble() nodes.add( - SimBareMetalDriver( - coroutineScope, - clock, + MachineDef( UUID(random.nextLong(), random.nextLong()), "node-$clusterId-$position", - mapOf(NODE_CLUSTER to clusterId), + mapOf("cluster" to clusterId), SimMachineModel(processors, memoryUnits), LinearPowerModel(2 * energyConsumptionW, .5) ) ) } - val provisioningService = SimpleProvisioningService() - coroutineScope.launch { - for (node in nodes) { - provisioningService.create(node) - } - } - - val serviceRegistry = ServiceRegistry().put(ProvisioningService, provisioningService) - - val platform = Platform( - UUID.randomUUID(), - "opendc-platform", - listOf( - Zone(UUID.randomUUID(), "zone", serviceRegistry) - ) - ) - - return Environment(fetchName(id), null, listOf(platform)) + return nodes } override fun close() {} diff --git a/simulator/opendc-runner-web/src/main/kotlin/org/opendc/runner/web/WebExperimentMonitor.kt b/simulator/opendc-runner-web/src/main/kotlin/org/opendc/runner/web/WebExperimentMonitor.kt index fe814c76..a8ac6c10 100644 --- a/simulator/opendc-runner-web/src/main/kotlin/org/opendc/runner/web/WebExperimentMonitor.kt +++ b/simulator/opendc-runner-web/src/main/kotlin/org/opendc/runner/web/WebExperimentMonitor.kt @@ -27,10 +27,9 @@ import org.opendc.compute.api.Server import org.opendc.compute.api.ServerState import org.opendc.compute.service.ComputeServiceEvent import org.opendc.compute.service.driver.Host +import org.opendc.compute.service.driver.HostState import org.opendc.experiments.capelin.monitor.ExperimentMonitor import org.opendc.experiments.capelin.telemetry.HostEvent -import org.opendc.metal.Node -import org.opendc.metal.NodeState import kotlin.math.max /** @@ -38,7 +37,7 @@ import kotlin.math.max */ public class WebExperimentMonitor : ExperimentMonitor { private val logger = KotlinLogging.logger {} - private val currentHostEvent = mutableMapOf<Node, HostEvent>() + private val currentHostEvent = mutableMapOf<Host, HostEvent>() private var startTime = -1L override fun reportVmStateChange(time: Long, server: Server, newState: ServerState) { @@ -50,12 +49,8 @@ public class WebExperimentMonitor : ExperimentMonitor { } } - override fun reportHostStateChange( - time: Long, - driver: Host, - host: Node - ) { - logger.debug { "Host ${host.uid} changed state ${host.state} [$time]" } + override fun reportHostStateChange(time: Long, host: Host, newState: HostState) { + logger.debug { "Host ${host.uid} changed state $newState [$time]" } val previousEvent = currentHostEvent[host] @@ -84,9 +79,9 @@ public class WebExperimentMonitor : ExperimentMonitor { ) } - private val lastPowerConsumption = mutableMapOf<Node, Double>() + private val lastPowerConsumption = mutableMapOf<Host, Double>() - override fun reportPowerConsumption(host: Node, draw: Double) { + override fun reportPowerConsumption(host: Host, draw: Double) { lastPowerConsumption[host] = draw } @@ -99,7 +94,7 @@ public class WebExperimentMonitor : ExperimentMonitor { cpuUsage: Double, cpuDemand: Double, numberOfDeployedImages: Int, - host: Node, + host: Host, duration: Long ) { val previousEvent = currentHostEvent[host] @@ -117,7 +112,7 @@ public class WebExperimentMonitor : ExperimentMonitor { cpuUsage, cpuDemand, lastPowerConsumption[host] ?: 200.0, - host.flavor.cpuCount + host.model.cpuCount ) currentHostEvent[host] = event @@ -135,7 +130,7 @@ public class WebExperimentMonitor : ExperimentMonitor { cpuUsage, cpuDemand, lastPowerConsumption[host] ?: 200.0, - host.flavor.cpuCount + host.model.cpuCount ) currentHostEvent[host] = event @@ -155,7 +150,7 @@ public class WebExperimentMonitor : ExperimentMonitor { cpuUsage, cpuDemand, lastPowerConsumption[host] ?: 200.0, - host.flavor.cpuCount + host.model.cpuCount ) currentHostEvent[host] = event @@ -164,7 +159,7 @@ public class WebExperimentMonitor : ExperimentMonitor { } private var hostAggregateMetrics: AggregateHostMetrics = AggregateHostMetrics() - private val hostMetrics: MutableMap<Node, HostMetrics> = mutableMapOf() + private val hostMetrics: MutableMap<Host, HostMetrics> = mutableMapOf() private fun processHostEvent(event: HostEvent) { val slices = event.duration / SLICE_LENGTH @@ -175,14 +170,14 @@ public class WebExperimentMonitor : ExperimentMonitor { hostAggregateMetrics.totalOvercommittedBurst + event.overcommissionedBurst, hostAggregateMetrics.totalInterferedBurst + event.interferedBurst, hostAggregateMetrics.totalPowerDraw + (slices * (event.powerDraw / 12)), - hostAggregateMetrics.totalFailureSlices + if (event.node.state != NodeState.ACTIVE) slices.toLong() else 0, - hostAggregateMetrics.totalFailureVmSlices + if (event.node.state != NodeState.ACTIVE) event.vmCount * slices.toLong() else 0 + hostAggregateMetrics.totalFailureSlices + if (event.host.state != HostState.UP) slices else 0, + hostAggregateMetrics.totalFailureVmSlices + if (event.host.state != HostState.UP) event.vmCount * slices else 0 ) - hostMetrics.compute(event.node) { _, prev -> + hostMetrics.compute(event.host) { _, prev -> HostMetrics( - (event.cpuUsage.takeIf { event.node.state == NodeState.ACTIVE } ?: 0.0) + (prev?.cpuUsage ?: 0.0), - (event.cpuDemand.takeIf { event.node.state == NodeState.ACTIVE } ?: 0.0) + (prev?.cpuDemand ?: 0.0), + (event.cpuUsage.takeIf { event.host.state == HostState.UP } ?: 0.0) + (prev?.cpuUsage ?: 0.0), + (event.cpuDemand.takeIf { event.host.state == HostState.UP } ?: 0.0) + (prev?.cpuDemand ?: 0.0), event.vmCount + (prev?.vmCount ?: 0), 1 + (prev?.count ?: 0) ) |
