summaryrefslogtreecommitdiff
path: root/database
diff options
context:
space:
mode:
authorFabian Mastenbroek <fabianishere@outlook.com>2018-08-14 19:21:32 +0200
committerGitHub <noreply@github.com>2018-08-14 19:21:32 +0200
commitdd885a3a802ae4a736b72d0935e0a67353e12bae (patch)
treeecdadd6a0d6a2a3996de2969bf86d47974349847 /database
parent0dfd05fe2763cac25f548b5afb96503786c7b579 (diff)
parent47994b66619872336519079846e845b313e948b0 (diff)
Merge pull request #13 from atlarge-research/feature/sc18-experiments
feat: Implement Scheduler Reference Architecture
Diffstat (limited to 'database')
-rw-r--r--database/Dockerfile8
-rw-r--r--database/gwf_converter/gwf_converter.py115
-rw-r--r--database/gwf_converter/requirements.txt1
-rw-r--r--database/schema.sql118
-rw-r--r--database/test.sql220
5 files changed, 332 insertions, 130 deletions
diff --git a/database/Dockerfile b/database/Dockerfile
new file mode 100644
index 00000000..0e933b40
--- /dev/null
+++ b/database/Dockerfile
@@ -0,0 +1,8 @@
+FROM mariadb:10.1
+MAINTAINER Fabian Mastenbroek <f.s.mastenbroek@student.tudelft.nl>
+
+# Import schema into database
+ADD schema.sql /docker-entrypoint-initdb.d
+
+# Add test data into database
+ADD test.sql /docker-entrypoint-initdb.d
diff --git a/database/gwf_converter/gwf_converter.py b/database/gwf_converter/gwf_converter.py
new file mode 100644
index 00000000..902bd93f
--- /dev/null
+++ b/database/gwf_converter/gwf_converter.py
@@ -0,0 +1,115 @@
+import os
+import sys
+
+import mysql.connector as mariadb
+
+
+class Job:
+ def __init__(self, gwf_id):
+ self.gwf_id = gwf_id
+ self.db_id = -1
+ self.tasks = []
+
+
+class Task:
+ def __init__(self, gwf_id, job, submit_time, run_time, num_processors, dependency_gwf_ids):
+ self.gwf_id = gwf_id
+ self.job = job
+ self.submit_time = submit_time
+ self.run_time = run_time
+ self.cores = num_processors
+ self.flops = 4000 * run_time * num_processors
+ self.dependency_gwf_ids = dependency_gwf_ids
+ self.db_id = -1
+ self.dependencies = []
+
+
+def get_jobs_from_gwf_file(file_name):
+ jobs = {}
+ tasks = {}
+
+ with open(file_name, "r") as f:
+ # Skip first CSV header line
+ f.readline()
+
+ for line in f:
+ if line.startswith("#") or len(line.strip()) == 0:
+ continue
+
+ values = [col.strip() for col in line.split(",")]
+ cast_values = [int(values[i]) for i in range(len(values) - 1)]
+ job_id, task_id, submit_time, run_time, num_processors, req_num_processors = cast_values
+ dependency_gwf_ids = [int(val) for val in values[-1].split(" ") if val != ""]
+
+ if job_id not in jobs:
+ jobs[job_id] = Job(job_id)
+
+ new_task = Task(task_id, jobs[job_id], submit_time, run_time, num_processors, dependency_gwf_ids)
+ tasks[task_id] = new_task
+ jobs[job_id].tasks.append(new_task)
+
+ for task in tasks.values():
+ for dependency_gwf_id in task.dependency_gwf_ids:
+ if dependency_gwf_id in tasks:
+ task.dependencies.append(tasks[dependency_gwf_id])
+
+ return jobs.values()
+
+
+def write_to_db(conn, trace_name, jobs):
+ cursor = conn.cursor()
+
+ trace_id = execute_insert_query(conn, cursor, "INSERT INTO traces (name) VALUES ('%s')" % trace_name)
+
+ for job in jobs:
+ job.db_id = execute_insert_query(conn, cursor, "INSERT INTO jobs (name, trace_id) VALUES ('%s',%d)"
+ % ("Job %d" % job.gwf_id, trace_id))
+
+ for task in job.tasks:
+ task.db_id = execute_insert_query(conn, cursor,
+ "INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) "
+ "VALUES (%d,%d,%d,%d)"
+ % (task.submit_time, task.flops, task.cores, job.db_id))
+
+ for job in jobs:
+ for task in job.tasks:
+ for dependency in task.dependencies:
+ execute_insert_query(conn, cursor, "INSERT INTO task_dependencies (first_task_id, second_task_id) "
+ "VALUES (%d,%d)"
+ % (dependency.db_id, task.db_id))
+
+def execute_insert_query(conn, cursor, sql):
+ try:
+ cursor.execute(sql)
+ except mariadb.Error as error:
+ print("SQL Error: {}".format(error))
+
+ conn.commit()
+ return cursor.lastrowid
+
+
+def main(trace_path):
+ trace_name = sys.argv[2] if (len(sys.argv) > 2) else \
+ os.path.splitext(os.path.basename(trace_path))[0]
+ gwf_jobs = get_jobs_from_gwf_file(trace_path)
+
+ host = os.environ.get('PERSISTENCE_HOST','localhost')
+ user = os.environ.get('PERSISTENCE_USER','opendc')
+ password = os.environ.get('PERSISTENCE_PASSWORD','opendcpassword')
+ database = os.environ.get('PERSISTENCE_DATABASE','opendc')
+ conn = mariadb.connect(host=host, user=user, password=password, database=database)
+ write_to_db(conn, trace_name, gwf_jobs)
+ conn.close()
+
+
+if __name__ == "__main__":
+ if len(sys.argv) < 2:
+ sys.exit("Usage: %s file [name]" % sys.argv[0])
+
+ if sys.argv[1] in ("-a", "--all"):
+ for f in os.listdir("traces"):
+ if f.endswith(".gwf"):
+ print("Converting {}".format(f))
+ main(os.path.join("traces", f))
+ else:
+ main(sys.argv[1])
diff --git a/database/gwf_converter/requirements.txt b/database/gwf_converter/requirements.txt
new file mode 100644
index 00000000..0eaebf12
--- /dev/null
+++ b/database/gwf_converter/requirements.txt
@@ -0,0 +1 @@
+mysql
diff --git a/database/schema.sql b/database/schema.sql
index 7f0d5879..f6286260 100644
--- a/database/schema.sql
+++ b/database/schema.sql
@@ -63,7 +63,7 @@ INSERT INTO authorization_levels (level) VALUES ('VIEW');
* - DD is the two-digit day of the month (1-31)
* - HH is the two-digit hours part (0-23)
* - MM is the two-digit minutes part (0-59)
-* - SS is the two-digit secodns part (0-59)
+* - SS is the two-digit seconds part (0-59)
*/
-- Simulation
@@ -146,9 +146,18 @@ DROP TABLE IF EXISTS schedulers;
CREATE TABLE schedulers (
name VARCHAR(50) PRIMARY KEY NOT NULL
);
-INSERT INTO schedulers (name) VALUES ('DEFAULT');
-INSERT INTO schedulers (name) VALUES ('SRTF');
-INSERT INTO schedulers (name) VALUES ('FIFO');
+INSERT INTO schedulers (name) VALUES ('FIFO-FIRSTFIT');
+INSERT INTO schedulers (name) VALUES ('FIFO-BESTFIT');
+INSERT INTO schedulers (name) VALUES ('FIFO-WORSTFIT');
+INSERT INTO schedulers (name) VALUES ('FIFO-RANDOM');
+INSERT INTO schedulers (name) VALUES ('SRTF-FIRSTFIT');
+INSERT INTO schedulers (name) VALUES ('SRTF-BESTFIT');
+INSERT INTO schedulers (name) VALUES ('SRTF-WORSTFIT');
+INSERT INTO schedulers (name) VALUES ('SRTF-RANDOM');
+INSERT INTO schedulers (name) VALUES ('RANDOM-FIRSTFIT');
+INSERT INTO schedulers (name) VALUES ('RANDOM-BESTFIT');
+INSERT INTO schedulers (name) VALUES ('RANDOM-WORSTFIT');
+INSERT INTO schedulers (name) VALUES ('RANDOM-RANDOM');
/*
* Each simulation has a single trace. A trace contains tasks and their start times.
@@ -176,17 +185,28 @@ CREATE TABLE jobs (
-- A task that's defined in terms of how many flops (floating point operations) it takes to complete
DROP TABLE IF EXISTS tasks;
CREATE TABLE tasks (
- id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
- start_tick INTEGER NOT NULL CHECK (start_tick >= 0),
- total_flop_count INTEGER NOT NULL,
- job_id INTEGER NOT NULL,
- task_dependency_id INTEGER NULL,
- parallelizability VARCHAR(50) NOT NULL,
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ start_tick INTEGER NOT NULL CHECK (start_tick >= 0),
+ total_flop_count BIGINT NOT NULL CHECK (total_flop_count >= 0),
+ core_count INTEGER NOT NULL CHECK (core_count >= 0),
+ job_id INTEGER NOT NULL,
FOREIGN KEY (job_id) REFERENCES jobs (id)
ON DELETE CASCADE
+ ON UPDATE CASCADE
+);
+
+-- A dependency between two tasks.
+DROP TABLE IF EXISTS task_dependencies;
+CREATE TABLE task_dependencies (
+ first_task_id INTEGER NOT NULL,
+ second_task_id INTEGER NOT NULL,
+
+ PRIMARY KEY (first_task_id, second_task_id),
+ FOREIGN KEY (first_task_id) REFERENCES tasks (id)
+ ON DELETE CASCADE
ON UPDATE CASCADE,
- FOREIGN KEY (task_dependency_id) REFERENCES tasks (id)
+ FOREIGN KEY (second_task_id) REFERENCES tasks (id)
ON DELETE CASCADE
ON UPDATE CASCADE
);
@@ -212,11 +232,69 @@ CREATE TABLE task_states (
ON UPDATE CASCADE
);
+-- The measurements of a single stage
+DROP TABLE IF EXISTS stage_measurements;
+CREATE TABLE stage_measurements (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ experiment_id INTEGER NOT NULL,
+ tick INTEGER NOT NULL CHECK (tick >= 0),
+ stage INTEGER NOT NULL CHECK (stage >= 0),
+ cpu BIGINT NOT NULL CHECK (cpu >= 0),
+ wall BIGINT NOT NULL CHECK (wall >= 0),
+ size INTEGER NOT NULL CHECK (size >= 0),
+ iterations INTEGER NOT NULL CHECK (iterations >= 0),
+
+ FOREIGN KEY (experiment_id) REFERENCES experiments (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE
+);
+
+-- Metrics of a job task
+DROP TABLE IF EXISTS job_metrics;
+CREATE TABLE job_metrics (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ experiment_id INTEGER NOT NULL,
+ job_id INTEGER NOT NULL,
+ critical_path INTEGER NOT NULL CHECK (critical_path >= 0),
+ critical_path_length INTEGER NOT NULL CHECK (critical_path_length >= 0),
+ waiting_time INTEGER NOT NULL CHECK (waiting_time >= 0),
+ makespan INTEGER NOT NULL CHECK (makespan >= 0),
+ nsl INTEGER NOT NULL CHECK (nsl >= 0),
+
+ FOREIGN KEY (experiment_id) REFERENCES experiments (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ FOREIGN KEY (job_id) REFERENCES jobs (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE
+);
+
+-- Metrics of a single task
+DROP TABLE IF EXISTS task_metrics;
+CREATE TABLE task_metrics (
+ id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
+ experiment_id INTEGER NOT NULL,
+ task_id INTEGER NOT NULL,
+ job_id INTEGER NOT NULL,
+ waiting INTEGER NOT NULL CHECK (waiting >= 0),
+ execution INTEGER NOT NULL CHECK (execution >= 0),
+ turnaround INTEGER NOT NULL CHECK (turnaround >= 0),
+
+ FOREIGN KEY (experiment_id) REFERENCES experiments (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ FOREIGN KEY (task_id) REFERENCES tasks (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE,
+ FOREIGN KEY (job_id) REFERENCES jobs (id)
+ ON DELETE CASCADE
+ ON UPDATE CASCADE
+);
+
-- A machine state
DROP TABLE IF EXISTS machine_states;
CREATE TABLE machine_states (
id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT,
- task_id INTEGER,
machine_id INTEGER NOT NULL,
experiment_id INTEGER NOT NULL,
tick INTEGER NOT NULL,
@@ -224,7 +302,6 @@ CREATE TABLE machine_states (
in_use_memory_mb INTEGER,
load_fraction REAL CHECK (load_fraction >= 0 AND load_fraction <= 1),
- FOREIGN KEY (task_id) REFERENCES tasks (id),
FOREIGN KEY (machine_id) REFERENCES machines (id)
ON DELETE CASCADE
ON UPDATE CASCADE,
@@ -318,8 +395,9 @@ DELIMITER //
-- and tiles in a room are connected.
DROP TRIGGER IF EXISTS before_insert_tiles_check_existence;
CREATE TRIGGER before_insert_tiles_check_existence
-BEFORE INSERT ON tiles
-FOR EACH ROW
+ BEFORE INSERT
+ ON tiles
+ FOR EACH ROW
BEGIN
-- checking tile overlap
-- a tile already exists such that..
@@ -416,8 +494,9 @@ DELIMITER //
-- Make sure objects are added to tiles in rooms they're allowed to be in.
DROP TRIGGER IF EXISTS before_update_tiles;
CREATE TRIGGER before_update_tiles
-BEFORE UPDATE ON tiles
-FOR EACH ROW
+ BEFORE UPDATE
+ ON tiles
+ FOR EACH ROW
BEGIN
IF ((NEW.object_id IS NOT NULL) AND (
@@ -543,8 +622,9 @@ DELIMITER //
-- Make sure a machine is not inserted at a position that does not exist for its rack.
DROP TRIGGER IF EXISTS before_insert_machine;
CREATE TRIGGER before_insert_machine
-BEFORE INSERT ON machines
-FOR EACH ROW
+ BEFORE INSERT
+ ON machines
+ FOR EACH ROW
BEGIN
IF (
NEW.position > (SELECT capacity
diff --git a/database/test.sql b/database/test.sql
index fa7fb8aa..55801b76 100644
--- a/database/test.sql
+++ b/database/test.sql
@@ -39,12 +39,13 @@ INSERT INTO traces (name) VALUES ('Default');
INSERT INTO jobs (name, trace_id) VALUES ('Default', 1);
-- Tasks
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 400000, 1, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (25, 10000, 1, 'PARALLEL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (25, 10000, 1, 'PARALLEL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (26, 10000, 1, 'PARALLEL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (80, 200000, 1, 1, 'PARALLEL');
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 400000, 1, 1);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (25, 10000, 1, 1);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (25, 10000, 1, 1);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (26, 10000, 1, 1);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (80, 200000, 1, 1);
+
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (1, 5);
-- Image Processing Trace
INSERT INTO traces (name) VALUES ('Image Processing');
@@ -53,66 +54,66 @@ INSERT INTO traces (name) VALUES ('Image Processing');
INSERT INTO jobs (name, trace_id) VALUES ('Image Processing', 2);
-- Tasks
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL');
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2);
-- Path Planning Trace
INSERT INTO traces (name) VALUES ('Path planning');
@@ -120,39 +121,40 @@ INSERT INTO traces (name) VALUES ('Path planning');
-- Jobs
INSERT INTO jobs (name, trace_id) VALUES ('Path planning', 3);
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 1000000, 3, 'PARALLEL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (11, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (12, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (13, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (14, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (11, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (12, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (13, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (14, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (11, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (12, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (13, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (14, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (11, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (12, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (13, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (14, 200000, 3, 66, 'SEQUENTIAL');
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 1000000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (12, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (13, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (14, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (12, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (13, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (14, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (12, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (13, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (14, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (12, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (13, 200000, 1, 3);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (14, 200000, 1, 3);
+
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 67);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 68);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 69);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 70);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 71);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 72);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 73);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 74);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 75);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 76);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 77);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 78);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 79);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 80);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 81);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 82);
-- Parallelizable Trace
INSERT INTO traces (name) VALUES ('Parallel heavy trace');
@@ -160,8 +162,8 @@ INSERT INTO traces (name) VALUES ('Parallel heavy trace');
-- Jobs
INSERT INTO jobs (name, trace_id) VALUES ('Parallel heavy trace', 4);
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 4, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 900000, 4, 'PARALLEL');
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 4);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 900000, 1, 4);
-- Sequential Trace
INSERT INTO traces (name) VALUES ('Sequential heavy trace');
@@ -169,22 +171,18 @@ INSERT INTO traces (name) VALUES ('Sequential heavy trace');
-- Jobs
INSERT INTO jobs (name, trace_id) VALUES ('Sequential heavy trace', 5);
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 5, 'PARALLEL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 900000, 5, 'SEQUENTIAL');
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 5);
+INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 900000, 1, 5);
-- Experiments
INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick)
-VALUES (1, 1, 1, 'SRTF', 'Default trace, SRTF', 'QUEUED', 110);
-INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick)
-VALUES (1, 1, 2, 'SRTF', 'Image processing trace, SRTF', 'QUEUED', 0);
-INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick)
-VALUES (1, 1, 3, 'SRTF', 'Path planning trace, FIFI', 'QUEUED', 0);
+VALUES (1, 1, 3, 'fifo-bestfit', 'Path planning trace, FIFO', 'QUEUED', 0);
INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick)
-VALUES (1, 1, 1, 'FIFO', 'Default trace, SRTF', 'QUEUED', 0);
+VALUES (1, 1, 1, 'srtf-firstfit', 'Default trace, SRTF', 'QUEUED', 0);
INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick)
-VALUES (1, 1, 2, 'FIFO', 'Image processing trace, SRTF', 'QUEUED', 0);
+VALUES (1, 1, 2, 'srtf-firstfit', 'Image processing trace, SRTF', 'QUEUED', 0);
INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick)
-VALUES (1, 1, 3, 'FIFO', 'Path planning trace, FIFO', 'QUEUED', 0);
+VALUES (1, 1, 3, 'fifo-firstfit', 'Path planning trace, FIFO', 'QUEUED', 0);
-- Rooms
INSERT INTO rooms (name, datacenter_id, type) VALUES ('room 1', 1, 'SERVER');