summaryrefslogtreecommitdiff
path: root/core/database
diff options
context:
space:
mode:
authorGeorgios Andreadis <info@gandreadis.com>2020-06-29 16:06:35 +0200
committerFabian Mastenbroek <mail.fabianm@gmail.com>2020-08-24 16:04:00 +0200
commit4a79cefdf5d71715b6c575d5c8bb4fea418c2ba6 (patch)
treefc68847d1e010e2962dac8345a0fd0cc9a2f0681 /core/database
parentad31b66503ec65e611ab96c2a540180ed25f5a6f (diff)
Prepare opendc repository for monorepo
This change prepares the opendc repository for a monorepo setup by moving all files to the core/ directory. After all repositories have been merged into this repository, we will move the correct files back.
Diffstat (limited to 'core/database')
-rw-r--r--core/database/Dockerfile8
-rw-r--r--core/database/README.md13
-rw-r--r--core/database/gwf_converter/gwf_converter.py115
-rw-r--r--core/database/gwf_converter/requirements.txt1
-rw-r--r--core/database/gwf_converter/traces/default.gwf6
-rw-r--r--core/database/rebuild-database.py32
-rw-r--r--core/database/rebuild.bat3
-rw-r--r--core/database/schema.sql818
-rw-r--r--core/database/test.sql381
-rw-r--r--core/database/view-table.py17
10 files changed, 1394 insertions, 0 deletions
diff --git a/core/database/Dockerfile b/core/database/Dockerfile
new file mode 100644
index 00000000..e30aed51
--- /dev/null
+++ b/core/database/Dockerfile
@@ -0,0 +1,8 @@
+FROM mariadb:10.1
+MAINTAINER Fabian Mastenbroek <f.s.mastenbroek@student.tudelft.nl>
+
+# Import schema into database
+ADD schema.sql /docker-entrypoint-initdb.d
+
+# Add test data into database
+#ADD test.sql /docker-entrypoint-initdb.d
diff --git a/core/database/README.md b/core/database/README.md
new file mode 100644
index 00000000..9fba2d5c
--- /dev/null
+++ b/core/database/README.md
@@ -0,0 +1,13 @@
+# OpenDC Database
+
+To rebuild the database at a location (or in this directory if none is specified):
+
+```bash
+python rebuild-database.py "path/to/database/directory"
+```
+
+To view a table in the database:
+
+```bash
+python view-table.py "path/to/database/directory" table_name
+```
diff --git a/core/database/gwf_converter/gwf_converter.py b/core/database/gwf_converter/gwf_converter.py
new file mode 100644
index 00000000..902bd93f
--- /dev/null
+++ b/core/database/gwf_converter/gwf_converter.py
@@ -0,0 +1,115 @@
+import os
+import sys
+
+import mysql.connector as mariadb
+
+
+class Job:
+ def __init__(self, gwf_id):
+ self.gwf_id = gwf_id
+ self.db_id = -1
+ self.tasks = []
+
+
+class Task:
+ def __init__(self, gwf_id, job, submit_time, run_time, num_processors, dependency_gwf_ids):
+ self.gwf_id = gwf_id
+ self.job = job
+ self.submit_time = submit_time
+ self.run_time = run_time
+ self.cores = num_processors
+ self.flops = 4000 * run_time * num_processors
+ self.dependency_gwf_ids = dependency_gwf_ids
+ self.db_id = -1
+ self.dependencies = []
+
+
+def get_jobs_from_gwf_file(file_name):
+ jobs = {}
+ tasks = {}
+
+ with open(file_name, "r") as f:
+ # Skip first CSV header line
+ f.readline()
+
+ for line in f:
+ if line.startswith("#") or len(line.strip()) == 0:
+ continue
+
+ values = [col.strip() for col in line.split(",")]
+ cast_values = [int(values[i]) for i in range(len(values) - 1)]
+ job_id, task_id, submit_time, run_time, num_processors, req_num_processors = cast_values
+ dependency_gwf_ids = [int(val) for val in values[-1].split(" ") if val != ""]
+
+ if job_id not in jobs:
+ jobs[job_id] = Job(job_id)
+
+ new_task = Task(task_id, jobs[job_id], submit_time, run_time, num_processors, dependency_gwf_ids)
+ tasks[task_id] = new_task
+ jobs[job_id].tasks.append(new_task)
+
+ for task in tasks.values():
+ for dependency_gwf_id in task.dependency_gwf_ids:
+ if dependency_gwf_id in tasks:
+ task.dependencies.append(tasks[dependency_gwf_id])
+
+ return jobs.values()
+
+
+def write_to_db(conn, trace_name, jobs):
+ cursor = conn.cursor()
+
+ trace_id = execute_insert_query(conn, cursor, "INSERT INTO traces (name) VALUES ('%s')" % trace_name)
+
+ for job in jobs:
+ job.db_id = execute_insert_query(conn, cursor, "INSERT INTO jobs (name, trace_id) VALUES ('%s',%d)"
+ % ("Job %d" % job.gwf_id, trace_id))
+
+ for task in job.tasks:
+ task.db_id = execute_insert_query(conn, cursor,
+ "INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) "
+ "VALUES (%d,%d,%d,%d)"
+ % (task.submit_time, task.flops, task.cores, job.db_id))
+
+ for job in jobs:
+ for task in job.tasks:
+ for dependency in task.dependencies:
+ execute_insert_query(conn, cursor, "INSERT INTO task_dependencies (first_task_id, second_task_id) "
+ "VALUES (%d,%d)"
+ % (dependency.db_id, task.db_id))
+
+def execute_insert_query(conn, cursor, sql):
+ try:
+ cursor.execute(sql)
+ except mariadb.Error as error:
+ print("SQL Error: {}".format(error))
+
+ conn.commit()
+ return cursor.lastrowid
+
+
+def main(trace_path):
+ trace_name = sys.argv[2] if (len(sys.argv) > 2) else \
+ os.path.splitext(os.path.basename(trace_path))[0]
+ gwf_jobs = get_jobs_from_gwf_file(trace_path)
+
+ host = os.environ.get('PERSISTENCE_HOST','localhost')
+ user = os.environ.get('PERSISTENCE_USER','opendc')
+ password = os.environ.get('PERSISTENCE_PASSWORD','opendcpassword')
+ database = os.environ.get('PERSISTENCE_DATABASE','opendc')
+ conn = mariadb.connect(host=host, user=user, password=password, database=database)
+ write_to_db(conn, trace_name, gwf_jobs)
+ conn.close()
+
+
+if __name__ == "__main__":
+ if len(sys.argv) < 2:
+ sys.exit("Usage: %s file [name]" % sys.argv[0])
+
+ if sys.argv[1] in ("-a", "--all"):
+ for f in os.listdir("traces"):
+ if f.endswith(".gwf"):
+ print("Converting {}".format(f))
+ main(os.path.join("traces", f))
+ else:
+ main(sys.argv[1])
diff --git a/core/database/gwf_converter/requirements.txt b/core/database/gwf_converter/requirements.txt
new file mode 100644
index 00000000..0eaebf12
--- /dev/null
+++ b/core/database/gwf_converter/requirements.txt
@@ -0,0 +1 @@
+mysql
diff --git a/core/database/gwf_converter/traces/default.gwf b/core/database/gwf_converter/traces/default.gwf
new file mode 100644
index 00000000..b1c55a17
--- /dev/null
+++ b/core/database/gwf_converter/traces/default.gwf
@@ -0,0 +1,6 @@
+WorkflowID, JobID , SubmitTime , RunTime , NProcs , ReqNProcs , Dependencies
+0 , 1 , 1 , 1 , 1 , 1, 5 4 3
+0 , 2 , 2 , 2 , 2 , 2, 3
+0 , 3 , 3 , 3 , 3 , 3, 5
+0 , 4 , 4 , 4 , 4 , 4,
+0 , 5 , 5 , 5 , 5 , 5,
diff --git a/core/database/rebuild-database.py b/core/database/rebuild-database.py
new file mode 100644
index 00000000..0cbeb27a
--- /dev/null
+++ b/core/database/rebuild-database.py
@@ -0,0 +1,32 @@
+import os
+import sqlite3
+import sys
+
+sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
+
+try:
+ BASE_DIR = directory_name=sys.argv[1]
+except:
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+db_location = os.path.join(BASE_DIR, 'opendc.db')
+
+if os.path.exists(db_location):
+ print "Removing old database..."
+ os.remove(db_location)
+
+print "Connecting to new database..."
+conn = sqlite3.connect(db_location)
+c = conn.cursor()
+
+print "Importing schema..."
+with open('schema.sql') as schema:
+ c.executescript(schema.read())
+
+print "Importing test data..."
+with open('test.sql') as test:
+ c.executescript(test.read())
+
+conn.commit()
+conn.close()
+
+print "Done."
diff --git a/core/database/rebuild.bat b/core/database/rebuild.bat
new file mode 100644
index 00000000..c0f38da1
--- /dev/null
+++ b/core/database/rebuild.bat
@@ -0,0 +1,3 @@
+del database.db
+sqlite3 database.db < schema.sql
+sqlite3 database.db < test.sql \ No newline at end of file
diff --git a/core/database/schema.sql b/core/database/schema.sql
new file mode 100644
index 00000000..f6286260
--- /dev/null
+++ b/core/database/schema.sql
@@ -0,0 +1,818 @@
+-- Tables referred to in foreign key constraints are defined after the constraints are defined
+SET FOREIGN_KEY_CHECKS = 0;
+
+/*
+* A user is identified by their google_id, which the server gets by authenticating with Google.
+*/
+
+-- Users
+DROP TABLE IF EXISTS users;
+CREATE TABLE users (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ google_id TEXT NOT NULL,
+ email TEXT,
+ given_name TEXT,
+ family_name TEXT
+);
+
+/*
+* The authorizations table defines which users are authorized to "OWN", "EDIT", or "VIEW" a simulation. The
+* authorization_level table defines the permission levels.
+*/
+
+-- User authorizations
+DROP TABLE IF EXISTS authorizations;
+CREATE TABLE authorizations (
+ user_id INTEGER NOT NULL,
+ simulation_id INTEGER NOT NULL,
+ authorization_level VARCHAR(50) NOT NULL,
+
+ FOREIGN KEY (user_id) REFERENCES users (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ FOREIGN KEY (simulation_id) REFERENCES simulations (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ FOREIGN KEY (authorization_level) REFERENCES authorization_levels (level)
+);
+
+CREATE UNIQUE INDEX authorizations_index
+ ON authorizations (
+ user_id,
+ simulation_id
+ );
+
+-- Authorization levels
+DROP TABLE IF EXISTS authorization_levels;
+CREATE TABLE authorization_levels (
+ level VARCHAR(50) PRIMARY KEY NOT NULL
+);
+INSERT INTO authorization_levels (level) VALUES ('OWN');
+INSERT INTO authorization_levels (level) VALUES ('EDIT');
+INSERT INTO authorization_levels (level) VALUES ('VIEW');
+
+/*
+* A Simulation has several Paths, which define the topology of the datacenter at different times. A Simulation also
+* has several Experiments, which can be run on a combination of Paths, Schedulers and Traces. Simulations also serve
+* as the scope to which different Users can be Authorized.
+*
+* The datetime_created and datetime_last_edited columns are in a subset of ISO-8601 (second fractions are ommitted):
+* YYYY-MM-DDTHH:MM:SS, where...
+* - YYYY is the four-digit year,
+* - MM is the two-digit month (1-12)
+* - DD is the two-digit day of the month (1-31)
+* - HH is the two-digit hours part (0-23)
+* - MM is the two-digit minutes part (0-59)
+* - SS is the two-digit seconds part (0-59)
+*/
+
+-- Simulation
+DROP TABLE IF EXISTS simulations;
+CREATE TABLE simulations (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ datetime_created VARCHAR(50) NOT NULL CHECK (datetime_created LIKE '____-__-__T__:__:__'),
+ datetime_last_edited VARCHAR(50) NOT NULL CHECK (datetime_last_edited LIKE '____-__-__T__:__:__'),
+ name VARCHAR(50) NOT NULL
+);
+
+/*
+* An Experiment consists of a Path, a Scheduler, and a Trace. The Path defines the topology of the datacenter at
+* different times in the simulation. The Scheduler defines which scheduler to use to simulate this experiment. The
+* Trace defines which tasks have to be run in the simulation.
+*/
+
+DROP TABLE IF EXISTS experiments;
+CREATE TABLE experiments (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ simulation_id INTEGER NOT NULL,
+ path_id INTEGER NOT NULL,
+ trace_id INTEGER NOT NULL,
+ scheduler_name VARCHAR(50) NOT NULL,
+ name TEXT NOT NULL,
+ state TEXT NOT NULL,
+ last_simulated_tick INTEGER NOT NULL DEFAULT 0 CHECK (last_simulated_tick >= 0),
+
+ FOREIGN KEY (simulation_id) REFERENCES simulations (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ FOREIGN KEY (path_id) REFERENCES paths (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ FOREIGN KEY (trace_id) REFERENCES traces (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ FOREIGN KEY (scheduler_name) REFERENCES schedulers (name)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE
+);
+
+/*
+* A Simulation has several Paths, which each contain Sections. A Section details which Datacenter topology to use
+* starting at which point in time (known internally as a "tick"). So, combining the several Sections in a Path
+* tells us which Datacenter topology to use at each tick.
+*/
+
+-- Path
+DROP TABLE IF EXISTS paths;
+CREATE TABLE paths (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ simulation_id INTEGER NOT NULL,
+ name TEXT,
+ datetime_created VARCHAR(50) NOT NULL CHECK (datetime_created LIKE '____-__-__T__:__:__'),
+
+ FOREIGN KEY (simulation_id) REFERENCES simulations (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE
+);
+
+-- Sections
+DROP TABLE IF EXISTS sections;
+CREATE TABLE sections (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ path_id INTEGER NOT NULL,
+ datacenter_id INTEGER NOT NULL,
+ start_tick INTEGER NOT NULL CHECK (start_tick >= 0),
+
+ FOREIGN KEY (path_id) REFERENCES paths (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ FOREIGN KEY (datacenter_id) REFERENCES datacenters (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE
+);
+
+-- Scheduler names
+DROP TABLE IF EXISTS schedulers;
+CREATE TABLE schedulers (
+ name VARCHAR(50) PRIMARY KEY NOT NULL
+);
+INSERT INTO schedulers (name) VALUES ('FIFO-FIRSTFIT');
+INSERT INTO schedulers (name) VALUES ('FIFO-BESTFIT');
+INSERT INTO schedulers (name) VALUES ('FIFO-WORSTFIT');
+INSERT INTO schedulers (name) VALUES ('FIFO-RANDOM');
+INSERT INTO schedulers (name) VALUES ('SRTF-FIRSTFIT');
+INSERT INTO schedulers (name) VALUES ('SRTF-BESTFIT');
+INSERT INTO schedulers (name) VALUES ('SRTF-WORSTFIT');
+INSERT INTO schedulers (name) VALUES ('SRTF-RANDOM');
+INSERT INTO schedulers (name) VALUES ('RANDOM-FIRSTFIT');
+INSERT INTO schedulers (name) VALUES ('RANDOM-BESTFIT');
+INSERT INTO schedulers (name) VALUES ('RANDOM-WORSTFIT');
+INSERT INTO schedulers (name) VALUES ('RANDOM-RANDOM');
+
+/*
+* Each simulation has a single trace. A trace contains tasks and their start times.
+*/
+
+-- A trace describes when tasks arrives in a datacenter
+DROP TABLE IF EXISTS traces;
+CREATE TABLE traces (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ name TEXT NOT NULL
+);
+
+-- A job
+DROP TABLE IF EXISTS jobs;
+CREATE TABLE jobs (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ name TEXT NOT NULL,
+ trace_id INTEGER NOT NULL,
+
+ FOREIGN KEY (trace_id) REFERENCES traces (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE
+);
+
+-- A task that's defined in terms of how many flops (floating point operations) it takes to complete
+DROP TABLE IF EXISTS tasks;
+CREATE TABLE tasks (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ start_tick INTEGER NOT NULL CHECK (start_tick >= 0),
+ total_flop_count BIGINT NOT NULL CHECK (total_flop_count >= 0),
+ core_count INTEGER NOT NULL CHECK (core_count >= 0),
+ job_id INTEGER NOT NULL,
+
+ FOREIGN KEY (job_id) REFERENCES jobs (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE
+);
+
+-- A dependency between two tasks.
+DROP TABLE IF EXISTS task_dependencies;
+CREATE TABLE task_dependencies (
+ first_task_id INTEGER NOT NULL,
+ second_task_id INTEGER NOT NULL,
+
+ PRIMARY KEY (first_task_id, second_task_id),
+ FOREIGN KEY (first_task_id) REFERENCES tasks (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ FOREIGN KEY (second_task_id) REFERENCES tasks (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE
+);
+
+/*
+* A task_state describes how much of a task has already been completed at the time of the current tick. Several
+* machine_states show which machines worked on the task.
+*/
+
+-- A state for a task_flop
+DROP TABLE IF EXISTS task_states;
+CREATE TABLE task_states (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ task_id INTEGER NOT NULL,
+ experiment_id INTEGER NOT NULL,
+ tick INTEGER NOT NULL CHECK (tick >= 0),
+ flops_left INTEGER NOT NULL CHECK (flops_left >= 0),
+ cores_used INTEGER NOT NULL CHECK (cores_used >= 0),
+
+ FOREIGN KEY (task_id) REFERENCES tasks (id),
+ FOREIGN KEY (experiment_id) REFERENCES experiments (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE
+);
+
+-- The measurements of a single stage
+DROP TABLE IF EXISTS stage_measurements;
+CREATE TABLE stage_measurements (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ experiment_id INTEGER NOT NULL,
+ tick INTEGER NOT NULL CHECK (tick >= 0),
+ stage INTEGER NOT NULL CHECK (stage >= 0),
+ cpu BIGINT NOT NULL CHECK (cpu >= 0),
+ wall BIGINT NOT NULL CHECK (wall >= 0),
+ size INTEGER NOT NULL CHECK (size >= 0),
+ iterations INTEGER NOT NULL CHECK (iterations >= 0),
+
+ FOREIGN KEY (experiment_id) REFERENCES experiments (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE
+);
+
+-- Metrics of a job task
+DROP TABLE IF EXISTS job_metrics;
+CREATE TABLE job_metrics (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ experiment_id INTEGER NOT NULL,
+ job_id INTEGER NOT NULL,
+ critical_path INTEGER NOT NULL CHECK (critical_path >= 0),
+ critical_path_length INTEGER NOT NULL CHECK (critical_path_length >= 0),
+ waiting_time INTEGER NOT NULL CHECK (waiting_time >= 0),
+ makespan INTEGER NOT NULL CHECK (makespan >= 0),
+ nsl INTEGER NOT NULL CHECK (nsl >= 0),
+
+ FOREIGN KEY (experiment_id) REFERENCES experiments (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ FOREIGN KEY (job_id) REFERENCES jobs (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE
+);
+
+-- Metrics of a single task
+DROP TABLE IF EXISTS task_metrics;
+CREATE TABLE task_metrics (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ experiment_id INTEGER NOT NULL,
+ task_id INTEGER NOT NULL,
+ job_id INTEGER NOT NULL,
+ waiting INTEGER NOT NULL CHECK (waiting >= 0),
+ execution INTEGER NOT NULL CHECK (execution >= 0),
+ turnaround INTEGER NOT NULL CHECK (turnaround >= 0),
+
+ FOREIGN KEY (experiment_id) REFERENCES experiments (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ FOREIGN KEY (task_id) REFERENCES tasks (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ FOREIGN KEY (job_id) REFERENCES jobs (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE
+);
+
+-- A machine state
+DROP TABLE IF EXISTS machine_states;
+CREATE TABLE machine_states (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ machine_id INTEGER NOT NULL,
+ experiment_id INTEGER NOT NULL,
+ tick INTEGER NOT NULL,
+ temperature_c REAL,
+ in_use_memory_mb INTEGER,
+ load_fraction REAL CHECK (load_fraction >= 0 AND load_fraction <= 1),
+
+ FOREIGN KEY (machine_id) REFERENCES machines (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ FOREIGN KEY (experiment_id) REFERENCES experiments (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE
+);
+
+/*
+* A Section references a Datacenter topology, which can be used by multiple Sections to create Paths that go back and
+* forth between different topologies.
+*/
+
+-- Datacenters
+DROP TABLE IF EXISTS datacenters;
+CREATE TABLE datacenters (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ simulation_id INTEGER NOT NULL,
+ starred INTEGER CHECK (starred = 0 OR starred = 1),
+
+ FOREIGN KEY (simulation_id) REFERENCES simulations (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE
+);
+
+/*
+* A datacenter consists of several rooms. A room has a type that specifies what kind of objects can be in it.
+*/
+
+-- Rooms in a datacenter
+DROP TABLE IF EXISTS rooms;
+CREATE TABLE rooms (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ name TEXT NOT NULL,
+ datacenter_id INTEGER NOT NULL,
+ type VARCHAR(50) NOT NULL,
+ topology_id INTEGER,
+
+ FOREIGN KEY (datacenter_id) REFERENCES datacenters (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ FOREIGN KEY (type) REFERENCES room_types (name)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ FOREIGN KEY (topology_id) REFERENCES rooms (id)
+ ON DELETE NO ACTION
+ ON UPDATE CASCADE
+);
+
+DROP TABLE IF EXISTS room_types;
+CREATE TABLE room_types (
+ name VARCHAR(50) PRIMARY KEY NOT NULL
+);
+INSERT INTO room_types (name) VALUES ('SERVER');
+INSERT INTO room_types (name) VALUES ('HALLWAY');
+INSERT INTO room_types (name) VALUES ('OFFICE');
+INSERT INTO room_types (name) VALUES ('POWER');
+INSERT INTO room_types (name) VALUES ('COOLING');
+
+/*
+* A room consists of tiles that have a quantized (x,y) position. The same tile can't be in multiple rooms. All tiles
+* in a room must touch at least one edge to another tile in that room. A tile is occupied by a single object, which
+* has a type from the object_types table.
+*/
+
+-- Tiles in a room
+DROP TABLE IF EXISTS tiles;
+CREATE TABLE tiles (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ position_x INTEGER NOT NULL,
+ position_y INTEGER NOT NULL,
+ room_id INTEGER NOT NULL,
+ object_id INTEGER,
+ topology_id INTEGER,
+
+ FOREIGN KEY (room_id) REFERENCES rooms (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ FOREIGN KEY (object_id) REFERENCES objects (id),
+ FOREIGN KEY (topology_id) REFERENCES tiles (id)
+ ON DELETE NO ACTION
+ ON UPDATE CASCADE,
+
+ UNIQUE (position_x, position_y, room_id), -- only one tile can be in the same position in a room
+ UNIQUE (object_id) -- an object can only be on one tile
+);
+
+DELIMITER //
+
+-- Make sure this datacenter doesn't already have a tile in this location
+-- and tiles in a room are connected.
+DROP TRIGGER IF EXISTS before_insert_tiles_check_existence;
+CREATE TRIGGER before_insert_tiles_check_existence
+ BEFORE INSERT
+ ON tiles
+ FOR EACH ROW
+ BEGIN
+ -- checking tile overlap
+ -- a tile already exists such that..
+ IF EXISTS(SELECT datacenter_id
+ FROM tiles
+ JOIN rooms ON tiles.room_id = rooms.id
+ WHERE (
+
+ -- it's in the same datacenter as the new tile...
+ datacenter_id = (SELECT datacenter_id
+ FROM rooms
+ WHERE rooms.id = NEW.room_id)
+
+ -- and in the the same position as the new tile.
+ AND NEW.position_x = tiles.position_x AND NEW.position_y = tiles.position_y
+ ))
+ THEN
+ -- raise an error
+ SIGNAL SQLSTATE '45000'
+ SET MESSAGE_TEXT = 'OccupiedTilePosition';
+ END IF;
+
+ -- checking tile adjacency
+ -- this isn't the first tile, ...
+ IF (EXISTS(SELECT *
+ FROM tiles
+ WHERE (NEW.room_id = tiles.room_id))
+
+ -- and the new tile isn't directly to right, to the left, above, or below an exisiting tile.
+ AND NOT EXISTS(SELECT *
+ FROM tiles
+ WHERE (
+ NEW.room_id = tiles.room_id AND (
+ (NEW.position_x + 1 = tiles.position_x AND NEW.position_y = tiles.position_y) -- right
+ OR (NEW.position_x - 1 = tiles.position_x AND NEW.position_y = tiles.position_y) -- left
+ OR (NEW.position_x = tiles.position_x AND NEW.position_y + 1 = tiles.position_y) -- above
+ OR (NEW.position_x = tiles.position_x AND NEW.position_y - 1 = tiles.position_y) -- below
+ )
+ )))
+ THEN
+ -- raise an error
+ SIGNAL SQLSTATE '45000'
+ SET MESSAGE_TEXT = 'InvalidTilePosition';
+ END IF;
+ END//
+
+DELIMITER ;
+
+/*
+* Objects are on tiles and have a type. They form an extra abstraction layer to make it easier to find what object is
+* on a tile, as well as to enforce that only objects of the right type are in a certain room.
+*
+* To add a PSU, cooling item, or rack to a tile, first add an object. Then use that object's ID as the value for the
+* object_id column of the PSU, cooling item, or rack table.
+*
+* The allowed_object table specifies what types of objects are allowed in what types of rooms.
+*/
+
+-- Objects
+DROP TABLE IF EXISTS objects;
+CREATE TABLE objects (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ type VARCHAR(50) NOT NULL,
+
+ FOREIGN KEY (type) REFERENCES object_types (name)
+);
+
+-- Object types
+DROP TABLE IF EXISTS object_types;
+CREATE TABLE object_types (
+ name VARCHAR(50) PRIMARY KEY NOT NULL
+);
+INSERT INTO object_types (name) VALUES ('PSU');
+INSERT INTO object_types (name) VALUES ('COOLING_ITEM');
+INSERT INTO object_types (name) VALUES ('RACK');
+
+-- Allowed objects table
+DROP TABLE IF EXISTS allowed_objects;
+CREATE TABLE allowed_objects (
+ room_type VARCHAR(50) NOT NULL,
+ object_type VARCHAR(50) NOT NULL,
+
+ FOREIGN KEY (room_type) REFERENCES room_types (name),
+ FOREIGN KEY (object_type) REFERENCES object_types (name)
+);
+
+-- Allowed objects per room
+INSERT INTO allowed_objects (room_type, object_type) VALUES ('SERVER', 'RACK');
+-- INSERT INTO allowed_objects (room_type, object_type) VALUES ('POWER', 'PSU');
+-- INSERT INTO allowed_objects (room_type, object_type) VALUES ('COOLING', 'COOLING_ITEM');
+
+DELIMITER //
+
+-- Make sure objects are added to tiles in rooms they're allowed to be in.
+DROP TRIGGER IF EXISTS before_update_tiles;
+CREATE TRIGGER before_update_tiles
+ BEFORE UPDATE
+ ON tiles
+ FOR EACH ROW
+ BEGIN
+
+ IF ((NEW.object_id IS NOT NULL) AND (
+
+ -- the type of the object being added to the tile...
+ (
+ SELECT objects.type
+ FROM objects
+ JOIN tiles ON tiles.object_id = objects.id
+ WHERE tiles.id = NEW.id
+ )
+
+ -- is not in the set of allowed object types for the room the tile is in.
+ NOT IN (
+ SELECT object_type
+ FROM allowed_objects
+ JOIN rooms ON rooms.type = allowed_objects.room_type
+ WHERE rooms.id = NEW.room_id
+ )
+ ))
+ THEN
+ -- raise an error
+ SIGNAL SQLSTATE '45000'
+ SET MESSAGE_TEXT = 'ForbiddenObjectType';
+ END IF;
+ END//
+
+DELIMITER ;
+
+/*
+* PSUs are a type of object.
+*/
+
+-- PSUs on tiles
+DROP TABLE IF EXISTS psus;
+CREATE TABLE psus (
+ id INTEGER NOT NULL,
+ energy_kwh INTEGER NOT NULL CHECK (energy_kwh > 0),
+ type VARCHAR(50) NOT NULL,
+ failure_model_id INTEGER NOT NULL,
+
+ FOREIGN KEY (id) REFERENCES objects (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ FOREIGN KEY (failure_model_id) REFERENCES failure_models (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+
+ PRIMARY KEY (id)
+);
+
+/*
+* Cooling items are a type of object.
+*/
+
+-- Cooling items on tiles
+DROP TABLE IF EXISTS cooling_items;
+CREATE TABLE cooling_items (
+ id INTEGER NOT NULL,
+ energy_consumption_w INTEGER NOT NULL CHECK (energy_consumption_w > 0),
+ type VARCHAR(50) NOT NULL,
+ failure_model_id INTEGER NOT NULL,
+
+ FOREIGN KEY (id) REFERENCES objects (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ FOREIGN KEY (failure_model_id) REFERENCES failure_models (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+
+ PRIMARY KEY (id)
+);
+
+/*
+* Racks are a type of object.
+*/
+
+-- Racks on tiles
+DROP TABLE IF EXISTS racks;
+CREATE TABLE racks (
+ id INTEGER NOT NULL,
+ name TEXT,
+ capacity INTEGER NOT NULL CHECK (capacity > 0),
+ power_capacity_w INTEGER NOT NULL CHECK (power_capacity_w > 0),
+ topology_id INTEGER,
+
+ FOREIGN KEY (id) REFERENCES objects (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ FOREIGN KEY (topology_id) REFERENCES racks (id)
+ ON DELETE NO ACTION
+ ON UPDATE CASCADE,
+
+ PRIMARY KEY (id)
+);
+
+/*
+* A rack contains a number of machines. A rack cannot have more than its capacity of machines in it. No more than one
+* machine can occupy a position in a rack at the same time.
+*/
+
+-- Machines in racks
+DROP TABLE IF EXISTS machines;
+CREATE TABLE machines (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ rack_id INTEGER NOT NULL,
+ position INTEGER NOT NULL CHECK (position > 0),
+ topology_id INTEGER,
+
+ FOREIGN KEY (rack_id) REFERENCES racks (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ FOREIGN KEY (topology_id) REFERENCES machines (id)
+ ON DELETE NO ACTION
+ ON UPDATE CASCADE,
+
+ -- Prevent machines from occupying the same position in a rack.
+ UNIQUE (rack_id, position)
+);
+
+DELIMITER //
+
+-- Make sure a machine is not inserted at a position that does not exist for its rack.
+DROP TRIGGER IF EXISTS before_insert_machine;
+CREATE TRIGGER before_insert_machine
+ BEFORE INSERT
+ ON machines
+ FOR EACH ROW
+ BEGIN
+ IF (
+ NEW.position > (SELECT capacity
+ FROM racks
+ WHERE racks.id = NEW.rack_id)
+ )
+ THEN
+ -- raise an error
+ SIGNAL SQLSTATE '45000'
+ SET MESSAGE_TEXT = 'InvalidMachinePosition';
+ END IF;
+ END//
+
+DELIMITER ;
+
+/*
+* A machine can have a tag for easy search and filtering.
+*/
+
+-- Tags for machines
+DROP TABLE IF EXISTS machine_tags;
+CREATE TABLE machine_tags (
+ name TEXT NOT NULL,
+ machine_id INTEGER NOT NULL,
+
+ FOREIGN KEY (machine_id) REFERENCES machines (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE
+);
+
+/*
+* A failure model defines the probability of a machine breaking at any given time.
+*/
+
+-- Failure models
+DROP TABLE IF EXISTS failure_models;
+CREATE TABLE failure_models (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ name TEXT NOT NULL,
+ rate REAL NOT NULL CHECK (rate >= 0 AND rate <= 1)
+);
+
+/*
+* A cpu stores information about a type of cpu. The machine_cpu table keeps track of which cpus are in which machines.
+*/
+
+-- CPU specs
+DROP TABLE IF EXISTS cpus;
+CREATE TABLE cpus (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ manufacturer TEXT NOT NULL,
+ family TEXT NOT NULL,
+ generation TEXT NOT NULL,
+ model TEXT NOT NULL,
+ clock_rate_mhz INTEGER NOT NULL CHECK (clock_rate_mhz > 0),
+ number_of_cores INTEGER NOT NULL CHECK (number_of_cores > 0),
+ energy_consumption_w REAL NOT NULL CHECK (energy_consumption_w > 0),
+ failure_model_id INTEGER NOT NULL,
+
+ FOREIGN KEY (failure_model_id) REFERENCES failure_models (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE
+);
+
+-- CPUs in machines
+DROP TABLE IF EXISTS machine_cpus;
+CREATE TABLE machine_cpus (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ machine_id INTEGER NOT NULL,
+ cpu_id INTEGER NOT NULL,
+
+ FOREIGN KEY (machine_id) REFERENCES machines (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ FOREIGN KEY (cpu_id) REFERENCES cpus (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE
+);
+
+/*
+* A gpu stores information about a type of gpu. The machine_gpu table keeps track of which gpus are in which machines.
+*/
+
+-- GPU specs
+DROP TABLE IF EXISTS gpus;
+CREATE TABLE gpus (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ manufacturer TEXT NOT NULL,
+ family TEXT NOT NULL,
+ generation TEXT NOT NULL,
+ model TEXT NOT NULL,
+ clock_rate_mhz INTEGER NOT NULL CHECK (clock_rate_mhz > 0),
+ number_of_cores INTEGER NOT NULL CHECK (number_of_cores > 0),
+ energy_consumption_w REAL NOT NULL CHECK (energy_consumption_w > 0),
+ failure_model_id INTEGER NOT NULL,
+
+ FOREIGN KEY (failure_model_id) REFERENCES failure_models (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE
+);
+
+-- GPUs in machines
+DROP TABLE IF EXISTS machine_gpus;
+CREATE TABLE machine_gpus (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ machine_id INTEGER NOT NULL,
+ gpu_id INTEGER NOT NULL,
+
+ FOREIGN KEY (machine_id) REFERENCES machines (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ FOREIGN KEY (gpu_id) REFERENCES gpus (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE
+);
+
+/*
+* A memory stores information about a type of memory. The machine_memory table keeps track of which memories are in
+* which machines.
+*/
+
+-- Memory specs
+DROP TABLE IF EXISTS memories;
+CREATE TABLE memories (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ manufacturer TEXT NOT NULL,
+ family TEXT NOT NULL,
+ generation TEXT NOT NULL,
+ model TEXT NOT NULL,
+ speed_mb_per_s INTEGER NOT NULL CHECK (speed_mb_per_s > 0),
+ size_mb INTEGER NOT NULL CHECK (size_mb > 0),
+ energy_consumption_w REAL NOT NULL CHECK (energy_consumption_w > 0),
+ failure_model_id INTEGER NOT NULL,
+
+ FOREIGN KEY (failure_model_id) REFERENCES failure_models (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE
+);
+
+-- Memory in machines
+DROP TABLE IF EXISTS machine_memories;
+CREATE TABLE machine_memories (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ machine_id INTEGER NOT NULL,
+ memory_id INTEGER NOT NULL,
+
+ FOREIGN KEY (machine_id) REFERENCES machines (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ FOREIGN KEY (memory_id) REFERENCES memories (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE
+);
+
+/*
+* A storage stores information about a type of storage. The machine_storage table keeps track of which storages are in
+* which machines.
+*/
+
+-- Storage specs
+DROP TABLE IF EXISTS storages;
+CREATE TABLE storages (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ manufacturer TEXT NOT NULL,
+ family TEXT NOT NULL,
+ generation TEXT NOT NULL,
+ model TEXT NOT NULL,
+ speed_mb_per_s INTEGER NOT NULL CHECK (speed_mb_per_s > 0),
+ size_mb INTEGER NOT NULL CHECK (size_mb > 0),
+ energy_consumption_w REAL NOT NULL CHECK (energy_consumption_w > 0),
+ failure_model_id INTEGER NOT NULL,
+
+ FOREIGN KEY (failure_model_id) REFERENCES failure_models (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE
+);
+
+-- Storage in machines
+DROP TABLE IF EXISTS machine_storages;
+CREATE TABLE machine_storages (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ machine_id INTEGER NOT NULL,
+ storage_id INTEGER NOT NULL,
+
+ FOREIGN KEY (machine_id) REFERENCES machines (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ FOREIGN KEY (storage_id) REFERENCES storages (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE
+);
diff --git a/core/database/test.sql b/core/database/test.sql
new file mode 100644
index 00000000..55801b76
--- /dev/null
+++ b/core/database/test.sql
@@ -0,0 +1,381 @@
+-- Users
+INSERT INTO users (google_id, email, given_name, family_name)
+VALUES ('106671218963420759042', 'l.overweel@gmail.com', 'Leon', 'Overweel');
+INSERT INTO users (google_id, email, given_name, family_name)
+VALUES ('118147174005839766927', 'jorgos.andreadis@gmail.com', 'Jorgos', 'Andreadis');
+
+-- Simulations
+INSERT INTO simulations (name, datetime_created, datetime_last_edited)
+VALUES ('Test Simulation 1', '2016-07-11T11:00:00', '2016-07-11T11:00:00');
+
+-- Authorizations
+INSERT INTO authorizations (user_id, simulation_id, authorization_level)
+VALUES (1, 1, 'OWN');
+INSERT INTO authorizations (user_id, simulation_id, authorization_level)
+VALUES (2, 1, 'OWN');
+
+-- Paths
+INSERT INTO paths (simulation_id, datetime_created)
+VALUES (1, '2016-07-11T11:00:00');
+INSERT INTO paths (simulation_id, datetime_created)
+VALUES (1, '2016-07-18T09:00:00');
+
+-- Datacenter
+INSERT INTO datacenters (starred, simulation_id) VALUES (0, 1);
+INSERT INTO datacenters (starred, simulation_id) VALUES (0, 1);
+INSERT INTO datacenters (starred, simulation_id) VALUES (0, 1);
+
+-- Sections
+INSERT INTO sections (path_id, datacenter_id, start_tick) VALUES (1, 1, 0);
+INSERT INTO sections (path_id, datacenter_id, start_tick) VALUES (1, 2, 50);
+INSERT INTO sections (path_id, datacenter_id, start_tick) VALUES (1, 3, 100);
+
+INSERT INTO sections (path_id, datacenter_id, start_tick) VALUES (2, 3, 0);
+
+-- Default Test Trace
+INSERT INTO traces (name) VALUES ('Default');
+
+-- Jobs
+INSERT INTO jobs (name, trace_id) VALUES ('Default', 1);
+
+-- Tasks
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 400000, 1, 1);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (25, 10000, 1, 1);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (25, 10000, 1, 1);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (26, 10000, 1, 1);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (80, 200000, 1, 1);
+
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (1, 5);
+
+-- Image Processing Trace
+INSERT INTO traces (name) VALUES ('Image Processing');
+
+-- Jobs
+INSERT INTO jobs (name, trace_id) VALUES ('Image Processing', 2);
+
+-- Tasks
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2);
+
+-- Path Planning Trace
+INSERT INTO traces (name) VALUES ('Path planning');
+
+-- Jobs
+INSERT INTO jobs (name, trace_id) VALUES ('Path planning', 3);
+
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 1000000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (12, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (13, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (14, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (12, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (13, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (14, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (12, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (13, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (14, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (12, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (13, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (14, 200000, 1, 3);
+
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 67);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 68);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 69);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 70);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 71);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 72);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 73);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 74);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 75);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 76);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 77);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 78);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 79);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 80);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 81);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 82);
+
+-- Parallelizable Trace
+INSERT INTO traces (name) VALUES ('Parallel heavy trace');
+
+-- Jobs
+INSERT INTO jobs (name, trace_id) VALUES ('Parallel heavy trace', 4);
+
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 4);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 900000, 1, 4);
+
+-- Sequential Trace
+INSERT INTO traces (name) VALUES ('Sequential heavy trace');
+
+-- Jobs
+INSERT INTO jobs (name, trace_id) VALUES ('Sequential heavy trace', 5);
+
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 5);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 900000, 1, 5);
+
+-- Experiments
+INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick)
+VALUES (1, 1, 3, 'fifo-bestfit', 'Path planning trace, FIFO', 'QUEUED', 0);
+INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick)
+VALUES (1, 1, 1, 'srtf-firstfit', 'Default trace, SRTF', 'QUEUED', 0);
+INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick)
+VALUES (1, 1, 2, 'srtf-firstfit', 'Image processing trace, SRTF', 'QUEUED', 0);
+INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick)
+VALUES (1, 1, 3, 'fifo-firstfit', 'Path planning trace, FIFO', 'QUEUED', 0);
+
+-- Rooms
+INSERT INTO rooms (name, datacenter_id, type) VALUES ('room 1', 1, 'SERVER');
+INSERT INTO rooms (name, datacenter_id, type, topology_id) VALUES ('room 1', 2, 'SERVER', 1);
+INSERT INTO rooms (name, datacenter_id, type, topology_id) VALUES ('room 1', 3, 'SERVER', 1);
+INSERT INTO rooms (name, datacenter_id, type) VALUES ('room 2', 3, 'SERVER');
+INSERT INTO rooms (name, datacenter_id, type) VALUES ('Power Room', 1, 'POWER');
+
+-- Tiles
+INSERT INTO tiles (position_x, position_y, room_id) VALUES (10, 10, 1);
+INSERT INTO tiles (position_x, position_y, room_id) VALUES (9, 10, 1);
+INSERT INTO tiles (position_x, position_y, room_id) VALUES (10, 11, 1);
+
+INSERT INTO tiles (position_x, position_y, room_id, topology_id) VALUES (10, 10, 2, 1);
+INSERT INTO tiles (position_x, position_y, room_id, topology_id) VALUES (9, 10, 2, 2);
+INSERT INTO tiles (position_x, position_y, room_id, topology_id) VALUES (10, 11, 2, 3);
+INSERT INTO tiles (position_x, position_y, room_id) VALUES (11, 11, 2);
+
+INSERT INTO tiles (position_x, position_y, room_id, topology_id) VALUES (10, 10, 3, 1);
+INSERT INTO tiles (position_x, position_y, room_id, topology_id) VALUES (9, 10, 3, 2);
+INSERT INTO tiles (position_x, position_y, room_id, topology_id) VALUES (10, 11, 3, 3);
+INSERT INTO tiles (position_x, position_y, room_id, topology_id) VALUES (11, 11, 3, 7);
+
+INSERT INTO tiles (position_x, position_y, room_id) VALUES (11, 10, 4);
+INSERT INTO tiles (position_x, position_y, room_id) VALUES (12, 10, 4);
+
+INSERT INTO tiles (position_x, position_y, room_id) VALUES (10, 12, 5);
+INSERT INTO tiles (position_x, position_y, room_id) VALUES (10, 13, 5);
+
+-- Racks
+INSERT INTO objects (type) VALUES ('RACK');
+INSERT INTO racks (id, capacity, name, power_capacity_w) VALUES (1, 42, 'Rack 1', 5000);
+UPDATE tiles
+SET object_id = 1
+WHERE id = 1;
+INSERT INTO objects (type) VALUES ('RACK');
+INSERT INTO racks (id, capacity, name, power_capacity_w) VALUES (2, 42, 'Rack 2', 5000);
+UPDATE tiles
+SET object_id = 2
+WHERE id = 2;
+
+INSERT INTO objects (type) VALUES ('RACK');
+INSERT INTO racks (id, capacity, name, power_capacity_w, topology_id) VALUES (3, 42, 'Rack 1', 5000, 1);
+UPDATE tiles
+SET object_id = 3
+WHERE id = 4;
+INSERT INTO objects (type) VALUES ('RACK');
+INSERT INTO racks (id, capacity, name, power_capacity_w, topology_id) VALUES (4, 42, 'Rack 2', 5000, 2);
+UPDATE tiles
+SET object_id = 4
+WHERE id = 5;
+INSERT INTO objects (type) VALUES ('RACK');
+INSERT INTO racks (id, capacity, name, power_capacity_w) VALUES (5, 42, 'Rack 3', 5000);
+UPDATE tiles
+SET object_id = 5
+WHERE id = 7;
+
+INSERT INTO objects (type) VALUES ('RACK');
+INSERT INTO racks (id, capacity, name, power_capacity_w, topology_id) VALUES (6, 42, 'Rack 1', 5000, 1);
+UPDATE tiles
+SET object_id = 6
+WHERE id = 8;
+
+INSERT INTO objects (type) VALUES ('RACK');
+INSERT INTO racks (id, capacity, name, power_capacity_w, topology_id) VALUES (7, 42, 'Rack 2', 5000, 2);
+UPDATE tiles
+SET object_id = 7
+WHERE id = 9;
+
+INSERT INTO objects (type) VALUES ('RACK');
+INSERT INTO racks (id, capacity, name, power_capacity_w, topology_id) VALUES (8, 42, 'Rack 3', 5000, 5);
+UPDATE tiles
+SET object_id = 8
+WHERE id = 11;
+
+INSERT INTO objects (type) VALUES ('RACK');
+INSERT INTO racks (id, capacity, name, power_capacity_w) VALUES (9, 42, 'Rack 4', 5000);
+UPDATE tiles
+SET object_id = 9
+WHERE id = 12;
+
+-- Machines
+INSERT INTO machines (rack_id, position) VALUES (1, 1);
+INSERT INTO machines (rack_id, position) VALUES (1, 2);
+INSERT INTO machines (rack_id, position) VALUES (1, 6);
+INSERT INTO machines (rack_id, position) VALUES (1, 10);
+INSERT INTO machines (rack_id, position) VALUES (2, 1);
+INSERT INTO machines (rack_id, position) VALUES (2, 2);
+
+INSERT INTO machines (rack_id, position, topology_id) VALUES (3, 1, 1);
+INSERT INTO machines (rack_id, position, topology_id) VALUES (3, 2, 2);
+INSERT INTO machines (rack_id, position, topology_id) VALUES (3, 6, 3);
+INSERT INTO machines (rack_id, position, topology_id) VALUES (3, 10, 4);
+INSERT INTO machines (rack_id, position, topology_id) VALUES (4, 1, 5);
+INSERT INTO machines (rack_id, position, topology_id) VALUES (4, 2, 6);
+INSERT INTO machines (rack_id, position) VALUES (5, 1);
+INSERT INTO machines (rack_id, position) VALUES (5, 2);
+INSERT INTO machines (rack_id, position) VALUES (5, 3);
+
+INSERT INTO machines (rack_id, position, topology_id) VALUES (6, 1, 1);
+INSERT INTO machines (rack_id, position, topology_id) VALUES (6, 2, 2);
+INSERT INTO machines (rack_id, position, topology_id) VALUES (6, 6, 3);
+INSERT INTO machines (rack_id, position, topology_id) VALUES (6, 10, 4);
+INSERT INTO machines (rack_id, position, topology_id) VALUES (7, 1, 5);
+INSERT INTO machines (rack_id, position, topology_id) VALUES (7, 2, 6);
+INSERT INTO machines (rack_id, position, topology_id) VALUES (8, 1, 13);
+INSERT INTO machines (rack_id, position, topology_id) VALUES (8, 2, 14);
+INSERT INTO machines (rack_id, position, topology_id) VALUES (8, 3, 15);
+INSERT INTO machines (rack_id, position) VALUES (9, 4);
+INSERT INTO machines (rack_id, position) VALUES (9, 5);
+INSERT INTO machines (rack_id, position) VALUES (9, 6);
+INSERT INTO machines (rack_id, position) VALUES (9, 7);
+
+-- Tags
+INSERT INTO machine_tags (name, machine_id) VALUES ('my fave machine', 1);
+INSERT INTO machine_tags (name, machine_id) VALUES ('my best machine', 2);
+
+-- Failure models
+INSERT INTO failure_models (name, rate) VALUES ('test_model', 0);
+
+-- CPUs
+INSERT INTO cpus (manufacturer, family, generation, model, clock_rate_mhz, number_of_cores, energy_consumption_w,
+ failure_model_id) VALUES ('intel', 'i7', 'v6', '6700k', 4100, 4, 70, 1);
+INSERT INTO cpus (manufacturer, family, generation, model, clock_rate_mhz, number_of_cores, energy_consumption_w,
+ failure_model_id) VALUES ('intel', 'i5', 'v6', '6700k', 3500, 2, 50, 1);
+
+-- GPUs
+INSERT INTO gpus (manufacturer, family, generation, model, clock_rate_mhz, number_of_cores, energy_consumption_w,
+ failure_model_id) VALUES ('NVIDIA', 'GTX', '4', '1080', 1200, 200, 250, 1);
+
+-- CPUs in machines
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (1, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (1, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (1, 2);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (2, 2);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (2, 2);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (3, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (3, 2);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (3, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (4, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (4, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (4, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (5, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (6, 1);
+
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (7, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (7, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (7, 2);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (8, 2);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (8, 2);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (9, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (9, 2);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (9, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (10, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (10, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (10, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (11, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (12, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (13, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (14, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (15, 1);
+
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (16, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (16, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (16, 2);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (17, 2);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (17, 2);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (18, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (18, 2);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (18, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (19, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (19, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (19, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (20, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (21, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (22, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (23, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (24, 1);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (25, 2);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (26, 2);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (27, 2);
+INSERT INTO machine_cpus (machine_id, cpu_id) VALUES (28, 2);
+
+-- GPUs
+INSERT INTO gpus (manufacturer, family, generation, model, clock_rate_mhz, number_of_cores, energy_consumption_w,
+ failure_model_id) VALUES ('nvidia', 'GeForce GTX Series', '10', '80', 1607, 2560, 70, 1);
+
+-- Memories
+
+INSERT INTO memories (manufacturer, family, generation, model, speed_mb_per_s, size_mb, energy_consumption_w,
+ failure_model_id) VALUES ('samsung', 'PC DRAM', 'K4A4G045WD', 'DDR4', 16000, 4000, 10, 1);
+
+-- Storages
+
+INSERT INTO storages (manufacturer, family, generation, model, speed_mb_per_s, size_mb, energy_consumption_w,
+ failure_model_id) VALUES ('samsung', 'EVO', '2016', 'SATA III', 6000, 250000, 10, 1);
diff --git a/core/database/view-table.py b/core/database/view-table.py
new file mode 100644
index 00000000..615b4081
--- /dev/null
+++ b/core/database/view-table.py
@@ -0,0 +1,17 @@
+import os
+import sqlite3
+import sys
+
+try:
+ BASE_DIR = directory_name=sys.argv[1]
+except:
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+db_location = os.path.join(BASE_DIR, 'opendc.db')
+
+conn = sqlite3.connect(db_location)
+c = conn.cursor()
+
+rows = c.execute('SELECT * FROM ' + sys.argv[2])
+
+for row in rows:
+ print row