From f8df97d51d56e27ae880a2b5bd71149559e91b4a Mon Sep 17 00:00:00 2001 From: Fabian Mastenbroek Date: Thu, 15 Feb 2018 23:03:01 +0100 Subject: chore: Automatically import database schema in Docker image This change will make the database image automatically import the schema and test data. --- database/Dockerfile | 8 ++++++++ docker-compose.yml | 4 +++- 2 files changed, 11 insertions(+), 1 deletion(-) create mode 100644 database/Dockerfile diff --git a/database/Dockerfile b/database/Dockerfile new file mode 100644 index 00000000..0e933b40 --- /dev/null +++ b/database/Dockerfile @@ -0,0 +1,8 @@ +FROM mariadb:10.1 +MAINTAINER Fabian Mastenbroek + +# Import schema into database +ADD schema.sql /docker-entrypoint-initdb.d + +# Add test data into database +ADD test.sql /docker-entrypoint-initdb.d diff --git a/docker-compose.yml b/docker-compose.yml index ac4bc27f..2407e9ff 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -29,7 +29,9 @@ services: - PERSISTENCE_USER=opendc - PERSISTENCE_PASSWORD=opendcpassword mariadb: - image: mariadb:10.1 + build: + context: ./database + image: database restart: on-failure ports: - "3306:3306" # comment this line out in production -- cgit v1.2.3 From dfddb6c25c96598295ad8b50092c9f4dd946e560 Mon Sep 17 00:00:00 2001 From: Fabian Mastenbroek Date: Fri, 20 Jul 2018 00:19:03 +0200 Subject: feat: Implement Scheduler Reference Architecture This change implements the parts of the Datacenter Scheduling Reference Architecture as published in SC18. This commit consists of changes to the database schema such as: - **Removal of `parallelizability` column in `Task`** This field was not used by the simulator and we opted to use a field describing the amount of cores the task can run on instead. - **Removal of `task_id` column in `MachineState`** This field did not make sense anymore now that a machine can run multiple tasks. Fortunately, this field is not used in the frontend. --- database/schema.sql | 47 ++++++++++++++++++++++-------- database/test.sql | 82 ++++++++++++++++++++++++++--------------------------- docker-compose.yml | 4 +-- opendc-frontend | 2 +- opendc-simulator | 2 +- opendc-web-server | 2 +- 6 files changed, 80 insertions(+), 59 deletions(-) diff --git a/database/schema.sql b/database/schema.sql index 7f0d5879..15dbf043 100644 --- a/database/schema.sql +++ b/database/schema.sql @@ -63,7 +63,7 @@ INSERT INTO authorization_levels (level) VALUES ('VIEW'); * - DD is the two-digit day of the month (1-31) * - HH is the two-digit hours part (0-23) * - MM is the two-digit minutes part (0-59) -* - SS is the two-digit secodns part (0-59) +* - SS is the two-digit seconds part (0-59) */ -- Simulation @@ -146,9 +146,18 @@ DROP TABLE IF EXISTS schedulers; CREATE TABLE schedulers ( name VARCHAR(50) PRIMARY KEY NOT NULL ); -INSERT INTO schedulers (name) VALUES ('DEFAULT'); -INSERT INTO schedulers (name) VALUES ('SRTF'); -INSERT INTO schedulers (name) VALUES ('FIFO'); +INSERT INTO schedulers (name) VALUES ('FIFO-FIRSTFIT'); +INSERT INTO schedulers (name) VALUES ('FIFO-BESTFIT'); +INSERT INTO schedulers (name) VALUES ('FIFO-WORSTFIT'); +INSERT INTO schedulers (name) VALUES ('FIFO-RANDOM'); +INSERT INTO schedulers (name) VALUES ('SRTF-FIRSTFIT'); +INSERT INTO schedulers (name) VALUES ('SRTF-BESTFIT'); +INSERT INTO schedulers (name) VALUES ('SRTF-WORSTFIT'); +INSERT INTO schedulers (name) VALUES ('SRTF-RANDOM'); +INSERT INTO schedulers (name) VALUES ('RANDOM-FIRSTFIT'); +INSERT INTO schedulers (name) VALUES ('RANDOM-BESTFIT'); +INSERT INTO schedulers (name) VALUES ('RANDOM-WORSTFIT'); +INSERT INTO schedulers (name) VALUES ('RANDOM-RANDOM'); /* * Each simulation has a single trace. A trace contains tasks and their start times. @@ -180,13 +189,24 @@ CREATE TABLE tasks ( start_tick INTEGER NOT NULL CHECK (start_tick >= 0), total_flop_count INTEGER NOT NULL, job_id INTEGER NOT NULL, - task_dependency_id INTEGER NULL, parallelizability VARCHAR(50) NOT NULL, FOREIGN KEY (job_id) REFERENCES jobs (id) + ON DELETE CASCADE + ON UPDATE CASCADE +); + +-- A dependency between two tasks. +DROP TABLE IF EXISTS task_dependencies; +CREATE TABLE task_dependencies ( + first_task_id INTEGER NOT NULL, + second_task_id INTEGER NOT NULL, + + PRIMARY KEY (first_task_id, second_task_id), + FOREIGN KEY (first_task_id) REFERENCES tasks (id) ON DELETE CASCADE ON UPDATE CASCADE, - FOREIGN KEY (task_dependency_id) REFERENCES tasks (id) + FOREIGN KEY (second_task_id) REFERENCES tasks (id) ON DELETE CASCADE ON UPDATE CASCADE ); @@ -318,8 +338,9 @@ DELIMITER // -- and tiles in a room are connected. DROP TRIGGER IF EXISTS before_insert_tiles_check_existence; CREATE TRIGGER before_insert_tiles_check_existence -BEFORE INSERT ON tiles -FOR EACH ROW + BEFORE INSERT + ON tiles + FOR EACH ROW BEGIN -- checking tile overlap -- a tile already exists such that.. @@ -416,8 +437,9 @@ DELIMITER // -- Make sure objects are added to tiles in rooms they're allowed to be in. DROP TRIGGER IF EXISTS before_update_tiles; CREATE TRIGGER before_update_tiles -BEFORE UPDATE ON tiles -FOR EACH ROW + BEFORE UPDATE + ON tiles + FOR EACH ROW BEGIN IF ((NEW.object_id IS NOT NULL) AND ( @@ -543,8 +565,9 @@ DELIMITER // -- Make sure a machine is not inserted at a position that does not exist for its rack. DROP TRIGGER IF EXISTS before_insert_machine; CREATE TRIGGER before_insert_machine -BEFORE INSERT ON machines -FOR EACH ROW + BEFORE INSERT + ON machines + FOR EACH ROW BEGIN IF ( NEW.position > (SELECT capacity diff --git a/database/test.sql b/database/test.sql index fa7fb8aa..a2c6d165 100644 --- a/database/test.sql +++ b/database/test.sql @@ -43,8 +43,9 @@ INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALU INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (25, 10000, 1, 'PARALLEL'); INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (25, 10000, 1, 'PARALLEL'); INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (26, 10000, 1, 'PARALLEL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability) -VALUES (80, 200000, 1, 1, 'PARALLEL'); +INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (80, 200000, 1, 'PARALLEL'); + +INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (1, 5); -- Image Processing Trace INSERT INTO traces (name) VALUES ('Image Processing'); @@ -121,38 +122,39 @@ INSERT INTO traces (name) VALUES ('Path planning'); INSERT INTO jobs (name, trace_id) VALUES ('Path planning', 3); INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 1000000, 3, 'PARALLEL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability) -VALUES (11, 200000, 3, 66, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability) -VALUES (12, 200000, 3, 66, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability) -VALUES (13, 200000, 3, 66, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability) -VALUES (14, 200000, 3, 66, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability) -VALUES (11, 200000, 3, 66, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability) -VALUES (12, 200000, 3, 66, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability) -VALUES (13, 200000, 3, 66, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability) -VALUES (14, 200000, 3, 66, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability) -VALUES (11, 200000, 3, 66, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability) -VALUES (12, 200000, 3, 66, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability) -VALUES (13, 200000, 3, 66, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability) -VALUES (14, 200000, 3, 66, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability) -VALUES (11, 200000, 3, 66, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability) -VALUES (12, 200000, 3, 66, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability) -VALUES (13, 200000, 3, 66, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability) -VALUES (14, 200000, 3, 66, 'SEQUENTIAL'); +INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 200000, 3, 'SEQUENTIAL'); +INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (12, 200000, 3, 'SEQUENTIAL'); +INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (13, 200000, 3, 'SEQUENTIAL'); +INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (14, 200000, 3, 'SEQUENTIAL'); +INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 200000, 3, 'SEQUENTIAL'); +INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (12, 200000, 3, 'SEQUENTIAL'); +INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (13, 200000, 3, 'SEQUENTIAL'); +INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (14, 200000, 3, 'SEQUENTIAL'); +INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 200000, 3, 'SEQUENTIAL'); +INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (12, 200000, 3, 'SEQUENTIAL'); +INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (13, 200000, 3, 'SEQUENTIAL'); +INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (14, 200000, 3, 'SEQUENTIAL'); +INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 200000, 3, 'SEQUENTIAL'); +INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (12, 200000, 3, 'SEQUENTIAL'); +INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (13, 200000, 3, 'SEQUENTIAL'); +INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (14, 200000, 3, 'SEQUENTIAL'); + +INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 67); +INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 68); +INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 69); +INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 70); +INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 71); +INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 72); +INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 73); +INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 74); +INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 75); +INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 76); +INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 77); +INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 78); +INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 79); +INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 80); +INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 81); +INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 82); -- Parallelizable Trace INSERT INTO traces (name) VALUES ('Parallel heavy trace'); @@ -174,17 +176,13 @@ INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALU -- Experiments INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick) -VALUES (1, 1, 1, 'SRTF', 'Default trace, SRTF', 'QUEUED', 110); -INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick) -VALUES (1, 1, 2, 'SRTF', 'Image processing trace, SRTF', 'QUEUED', 0); -INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick) -VALUES (1, 1, 3, 'SRTF', 'Path planning trace, FIFI', 'QUEUED', 0); +VALUES (1, 1, 3, 'fifo-bestfit', 'Path planning trace, FIFO', 'QUEUED', 0); INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick) -VALUES (1, 1, 1, 'FIFO', 'Default trace, SRTF', 'QUEUED', 0); +VALUES (1, 1, 1, 'srtf-firstfit', 'Default trace, SRTF', 'QUEUED', 0); INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick) -VALUES (1, 1, 2, 'FIFO', 'Image processing trace, SRTF', 'QUEUED', 0); +VALUES (1, 1, 2, 'srtf-firstfit', 'Image processing trace, SRTF', 'QUEUED', 0); INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick) -VALUES (1, 1, 3, 'FIFO', 'Path planning trace, FIFO', 'QUEUED', 0); +VALUES (1, 1, 3, 'fifo-firstfit', 'Path planning trace, FIFO', 'QUEUED', 0); -- Rooms INSERT INTO rooms (name, datacenter_id, type) VALUES ('room 1', 1, 'SERVER'); diff --git a/docker-compose.yml b/docker-compose.yml index 2407e9ff..a89b7260 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,4 +1,4 @@ -version: "2" +version: "3" services: frontend: build: ./ @@ -17,7 +17,7 @@ services: simulator: build: context: ./opendc-simulator - dockerfile: opendc-integration-jpa/mysql/Dockerfile + dockerfile: opendc-model-odc/setup/Dockerfile image: simulator restart: on-failure links: diff --git a/opendc-frontend b/opendc-frontend index 4d08462e..5ce639ef 160000 --- a/opendc-frontend +++ b/opendc-frontend @@ -1 +1 @@ -Subproject commit 4d08462eb8d662ea153c6183c9aca318a3c51390 +Subproject commit 5ce639ef8c86ebc296b7bce30984a84f9a1eb7b0 diff --git a/opendc-simulator b/opendc-simulator index 8666a78b..b4c7f400 160000 --- a/opendc-simulator +++ b/opendc-simulator @@ -1 +1 @@ -Subproject commit 8666a78b86a40c1d8dab28dd18e841318c01f97f +Subproject commit b4c7f4007ee8890ff25b0fcad4cd650cb0ef4f9a diff --git a/opendc-web-server b/opendc-web-server index b87faa0b..9a2a9854 160000 --- a/opendc-web-server +++ b/opendc-web-server @@ -1 +1 @@ -Subproject commit b87faa0bccf661a2b6a948d9420d52a19a63d9a2 +Subproject commit 9a2a98548ab50217a78d433a13da72af3001a785 -- cgit v1.2.3 From 1e7e32cf7c65a5c9138c54495f1cc3f277529dd1 Mon Sep 17 00:00:00 2001 From: Georgios Andreadis Date: Tue, 6 Feb 2018 11:59:54 +0100 Subject: feat: Add GWF conversion script This change adds a conversion script that allows users to import traces from the Grid Workload Archive (see http://gwa.ewi.tudelft.nl/) into the OpenDC database. --- database/gwf_converter/gwf_converter.py | 115 +++++++++++++++++++++ database/gwf_converter/requirements.txt | 1 + database/schema.sql | 12 +-- database/test.sql | 172 ++++++++++++++++---------------- 4 files changed, 207 insertions(+), 93 deletions(-) create mode 100644 database/gwf_converter/gwf_converter.py create mode 100644 database/gwf_converter/requirements.txt diff --git a/database/gwf_converter/gwf_converter.py b/database/gwf_converter/gwf_converter.py new file mode 100644 index 00000000..902bd93f --- /dev/null +++ b/database/gwf_converter/gwf_converter.py @@ -0,0 +1,115 @@ +import os +import sys + +import mysql.connector as mariadb + + +class Job: + def __init__(self, gwf_id): + self.gwf_id = gwf_id + self.db_id = -1 + self.tasks = [] + + +class Task: + def __init__(self, gwf_id, job, submit_time, run_time, num_processors, dependency_gwf_ids): + self.gwf_id = gwf_id + self.job = job + self.submit_time = submit_time + self.run_time = run_time + self.cores = num_processors + self.flops = 4000 * run_time * num_processors + self.dependency_gwf_ids = dependency_gwf_ids + self.db_id = -1 + self.dependencies = [] + + +def get_jobs_from_gwf_file(file_name): + jobs = {} + tasks = {} + + with open(file_name, "r") as f: + # Skip first CSV header line + f.readline() + + for line in f: + if line.startswith("#") or len(line.strip()) == 0: + continue + + values = [col.strip() for col in line.split(",")] + cast_values = [int(values[i]) for i in range(len(values) - 1)] + job_id, task_id, submit_time, run_time, num_processors, req_num_processors = cast_values + dependency_gwf_ids = [int(val) for val in values[-1].split(" ") if val != ""] + + if job_id not in jobs: + jobs[job_id] = Job(job_id) + + new_task = Task(task_id, jobs[job_id], submit_time, run_time, num_processors, dependency_gwf_ids) + tasks[task_id] = new_task + jobs[job_id].tasks.append(new_task) + + for task in tasks.values(): + for dependency_gwf_id in task.dependency_gwf_ids: + if dependency_gwf_id in tasks: + task.dependencies.append(tasks[dependency_gwf_id]) + + return jobs.values() + + +def write_to_db(conn, trace_name, jobs): + cursor = conn.cursor() + + trace_id = execute_insert_query(conn, cursor, "INSERT INTO traces (name) VALUES ('%s')" % trace_name) + + for job in jobs: + job.db_id = execute_insert_query(conn, cursor, "INSERT INTO jobs (name, trace_id) VALUES ('%s',%d)" + % ("Job %d" % job.gwf_id, trace_id)) + + for task in job.tasks: + task.db_id = execute_insert_query(conn, cursor, + "INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) " + "VALUES (%d,%d,%d,%d)" + % (task.submit_time, task.flops, task.cores, job.db_id)) + + for job in jobs: + for task in job.tasks: + for dependency in task.dependencies: + execute_insert_query(conn, cursor, "INSERT INTO task_dependencies (first_task_id, second_task_id) " + "VALUES (%d,%d)" + % (dependency.db_id, task.db_id)) + +def execute_insert_query(conn, cursor, sql): + try: + cursor.execute(sql) + except mariadb.Error as error: + print("SQL Error: {}".format(error)) + + conn.commit() + return cursor.lastrowid + + +def main(trace_path): + trace_name = sys.argv[2] if (len(sys.argv) > 2) else \ + os.path.splitext(os.path.basename(trace_path))[0] + gwf_jobs = get_jobs_from_gwf_file(trace_path) + + host = os.environ.get('PERSISTENCE_HOST','localhost') + user = os.environ.get('PERSISTENCE_USER','opendc') + password = os.environ.get('PERSISTENCE_PASSWORD','opendcpassword') + database = os.environ.get('PERSISTENCE_DATABASE','opendc') + conn = mariadb.connect(host=host, user=user, password=password, database=database) + write_to_db(conn, trace_name, gwf_jobs) + conn.close() + + +if __name__ == "__main__": + if len(sys.argv) < 2: + sys.exit("Usage: %s file [name]" % sys.argv[0]) + + if sys.argv[1] in ("-a", "--all"): + for f in os.listdir("traces"): + if f.endswith(".gwf"): + print("Converting {}".format(f)) + main(os.path.join("traces", f)) + else: + main(sys.argv[1]) diff --git a/database/gwf_converter/requirements.txt b/database/gwf_converter/requirements.txt new file mode 100644 index 00000000..0eaebf12 --- /dev/null +++ b/database/gwf_converter/requirements.txt @@ -0,0 +1 @@ +mysql diff --git a/database/schema.sql b/database/schema.sql index 15dbf043..aa0ad1e5 100644 --- a/database/schema.sql +++ b/database/schema.sql @@ -185,11 +185,11 @@ CREATE TABLE jobs ( -- A task that's defined in terms of how many flops (floating point operations) it takes to complete DROP TABLE IF EXISTS tasks; CREATE TABLE tasks ( - id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT, - start_tick INTEGER NOT NULL CHECK (start_tick >= 0), - total_flop_count INTEGER NOT NULL, - job_id INTEGER NOT NULL, - parallelizability VARCHAR(50) NOT NULL, + id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT, + start_tick INTEGER NOT NULL CHECK (start_tick >= 0), + total_flop_count BIGINT NOT NULL CHECK (total_flop_count >= 0), + core_count INTEGER NOT NULL CHECK (core_count >= 0), + job_id INTEGER NOT NULL, FOREIGN KEY (job_id) REFERENCES jobs (id) ON DELETE CASCADE @@ -236,7 +236,6 @@ CREATE TABLE task_states ( DROP TABLE IF EXISTS machine_states; CREATE TABLE machine_states ( id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT, - task_id INTEGER, machine_id INTEGER NOT NULL, experiment_id INTEGER NOT NULL, tick INTEGER NOT NULL, @@ -244,7 +243,6 @@ CREATE TABLE machine_states ( in_use_memory_mb INTEGER, load_fraction REAL CHECK (load_fraction >= 0 AND load_fraction <= 1), - FOREIGN KEY (task_id) REFERENCES tasks (id), FOREIGN KEY (machine_id) REFERENCES machines (id) ON DELETE CASCADE ON UPDATE CASCADE, diff --git a/database/test.sql b/database/test.sql index a2c6d165..55801b76 100644 --- a/database/test.sql +++ b/database/test.sql @@ -39,11 +39,11 @@ INSERT INTO traces (name) VALUES ('Default'); INSERT INTO jobs (name, trace_id) VALUES ('Default', 1); -- Tasks -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 400000, 1, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (25, 10000, 1, 'PARALLEL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (25, 10000, 1, 'PARALLEL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (26, 10000, 1, 'PARALLEL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (80, 200000, 1, 'PARALLEL'); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 400000, 1, 1); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (25, 10000, 1, 1); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (25, 10000, 1, 1); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (26, 10000, 1, 1); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (80, 200000, 1, 1); INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (1, 5); @@ -54,66 +54,66 @@ INSERT INTO traces (name) VALUES ('Image Processing'); INSERT INTO jobs (name, trace_id) VALUES ('Image Processing', 2); -- Tasks -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL'); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2); -- Path Planning Trace INSERT INTO traces (name) VALUES ('Path planning'); @@ -121,23 +121,23 @@ INSERT INTO traces (name) VALUES ('Path planning'); -- Jobs INSERT INTO jobs (name, trace_id) VALUES ('Path planning', 3); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 1000000, 3, 'PARALLEL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (12, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (13, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (14, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (12, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (13, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (14, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (12, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (13, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (14, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (12, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (13, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (14, 200000, 3, 'SEQUENTIAL'); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 1000000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (12, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (13, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (14, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (12, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (13, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (14, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (12, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (13, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (14, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (12, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (13, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (14, 200000, 1, 3); INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 67); INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 68); @@ -162,8 +162,8 @@ INSERT INTO traces (name) VALUES ('Parallel heavy trace'); -- Jobs INSERT INTO jobs (name, trace_id) VALUES ('Parallel heavy trace', 4); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 4, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 900000, 4, 'PARALLEL'); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 4); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 900000, 1, 4); -- Sequential Trace INSERT INTO traces (name) VALUES ('Sequential heavy trace'); @@ -171,8 +171,8 @@ INSERT INTO traces (name) VALUES ('Sequential heavy trace'); -- Jobs INSERT INTO jobs (name, trace_id) VALUES ('Sequential heavy trace', 5); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 5, 'PARALLEL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 900000, 5, 'SEQUENTIAL'); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 5); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 900000, 1, 5); -- Experiments INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick) -- cgit v1.2.3 From d8f5e2a7270a6aed04dac17048c6ef7229d3caab Mon Sep 17 00:00:00 2001 From: Fabian Mastenbroek Date: Sun, 11 Mar 2018 20:35:28 +0100 Subject: feat: Add stage measurements to schema --- database/schema.sql | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/database/schema.sql b/database/schema.sql index aa0ad1e5..9e356b4a 100644 --- a/database/schema.sql +++ b/database/schema.sql @@ -232,6 +232,20 @@ CREATE TABLE task_states ( ON UPDATE CASCADE ); +-- The measurements of a single stage +CREATE TABLE stage_measurements ( + id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT, + experiment_id INTEGER NOT NULL, + tick INTEGER NOT NULL CHECK (tick >= 0), + stage INTEGER NOT NULL CHECK (stage >= 0), + duration INTEGER NOT NULL CHECK (duration >= 0), + size INTEGER NOT NULL CHECK (size >= 0), + + FOREIGN KEY (experiment_id) REFERENCES experiments (id) + ON DELETE CASCADE + ON UPDATE CASCADE +); + -- A machine state DROP TABLE IF EXISTS machine_states; CREATE TABLE machine_states ( -- cgit v1.2.3 From 1ccc68defdb1f67f39dc836ee6659a89135c038c Mon Sep 17 00:00:00 2001 From: Georgios Andreadis Date: Mon, 12 Mar 2018 07:33:59 +0100 Subject: feat: Add task metrics to DB schema --- database/schema.sql | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/database/schema.sql b/database/schema.sql index 9e356b4a..3b751c30 100644 --- a/database/schema.sql +++ b/database/schema.sql @@ -233,6 +233,7 @@ CREATE TABLE task_states ( ); -- The measurements of a single stage +DROP TABLE IF EXISTS stage_measurements; CREATE TABLE stage_measurements ( id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT, experiment_id INTEGER NOT NULL, @@ -246,6 +247,24 @@ CREATE TABLE stage_measurements ( ON UPDATE CASCADE ); +-- Metrics of a single task +DROP TABLE IF EXISTS task_metrics; +CREATE TABLE task_metrics ( + id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT, + experiment_id INTEGER NOT NULL, + task_id INTEGER NOT NULL, + waiting INTEGER NOT NULL CHECK (waiting >= 0), + execution INTEGER NOT NULL CHECK (execution >= 0), + turnaround INTEGER NOT NULL CHECK (turnaround >= 0), + + FOREIGN KEY (experiment_id) REFERENCES experiments (id) + ON DELETE CASCADE + ON UPDATE CASCADE, + FOREIGN KEY (task_id) REFERENCES tasks (id) + ON DELETE CASCADE + ON UPDATE CASCADE +); + -- A machine state DROP TABLE IF EXISTS machine_states; CREATE TABLE machine_states ( -- cgit v1.2.3 From aa97e067caae9cbe25fc9140fc3e625933615fb9 Mon Sep 17 00:00:00 2001 From: Georgios Andreadis Date: Sat, 24 Mar 2018 11:55:45 +0100 Subject: feat: Add job id to DB schema --- database/schema.sql | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/database/schema.sql b/database/schema.sql index 3b751c30..b9460701 100644 --- a/database/schema.sql +++ b/database/schema.sql @@ -253,6 +253,7 @@ CREATE TABLE task_metrics ( id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT, experiment_id INTEGER NOT NULL, task_id INTEGER NOT NULL, + job_id INTEGER NOT NULL, waiting INTEGER NOT NULL CHECK (waiting >= 0), execution INTEGER NOT NULL CHECK (execution >= 0), turnaround INTEGER NOT NULL CHECK (turnaround >= 0), @@ -261,6 +262,9 @@ CREATE TABLE task_metrics ( ON DELETE CASCADE ON UPDATE CASCADE, FOREIGN KEY (task_id) REFERENCES tasks (id) + ON DELETE CASCADE + ON UPDATE CASCADE, + FOREIGN KEY (job_id) REFERENCES jobs (id) ON DELETE CASCADE ON UPDATE CASCADE ); -- cgit v1.2.3 From fc7c58da98f0eddbd0109e2a09bf394dde555176 Mon Sep 17 00:00:00 2001 From: Fabian Mastenbroek Date: Sat, 24 Mar 2018 12:22:02 +0100 Subject: feat: Add support for both wall and cpu time --- database/schema.sql | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/database/schema.sql b/database/schema.sql index b9460701..36db1e17 100644 --- a/database/schema.sql +++ b/database/schema.sql @@ -239,8 +239,10 @@ CREATE TABLE stage_measurements ( experiment_id INTEGER NOT NULL, tick INTEGER NOT NULL CHECK (tick >= 0), stage INTEGER NOT NULL CHECK (stage >= 0), - duration INTEGER NOT NULL CHECK (duration >= 0), + cpu INTEGER NOT NULL CHECK (cpu >= 0), + wall INTEGER NOT NULL CHECK (wall >= 0), size INTEGER NOT NULL CHECK (size >= 0), + iterations INTEGER NOT NULL CHECK (iterations >= 0), FOREIGN KEY (experiment_id) REFERENCES experiments (id) ON DELETE CASCADE -- cgit v1.2.3 From 47994b66619872336519079846e845b313e948b0 Mon Sep 17 00:00:00 2001 From: Fabian Mastenbroek Date: Mon, 26 Mar 2018 00:47:14 +0200 Subject: feat: Add job metrics --- database/schema.sql | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/database/schema.sql b/database/schema.sql index 36db1e17..f6286260 100644 --- a/database/schema.sql +++ b/database/schema.sql @@ -239,8 +239,8 @@ CREATE TABLE stage_measurements ( experiment_id INTEGER NOT NULL, tick INTEGER NOT NULL CHECK (tick >= 0), stage INTEGER NOT NULL CHECK (stage >= 0), - cpu INTEGER NOT NULL CHECK (cpu >= 0), - wall INTEGER NOT NULL CHECK (wall >= 0), + cpu BIGINT NOT NULL CHECK (cpu >= 0), + wall BIGINT NOT NULL CHECK (wall >= 0), size INTEGER NOT NULL CHECK (size >= 0), iterations INTEGER NOT NULL CHECK (iterations >= 0), @@ -249,6 +249,26 @@ CREATE TABLE stage_measurements ( ON UPDATE CASCADE ); +-- Metrics of a job task +DROP TABLE IF EXISTS job_metrics; +CREATE TABLE job_metrics ( + id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT, + experiment_id INTEGER NOT NULL, + job_id INTEGER NOT NULL, + critical_path INTEGER NOT NULL CHECK (critical_path >= 0), + critical_path_length INTEGER NOT NULL CHECK (critical_path_length >= 0), + waiting_time INTEGER NOT NULL CHECK (waiting_time >= 0), + makespan INTEGER NOT NULL CHECK (makespan >= 0), + nsl INTEGER NOT NULL CHECK (nsl >= 0), + + FOREIGN KEY (experiment_id) REFERENCES experiments (id) + ON DELETE CASCADE + ON UPDATE CASCADE, + FOREIGN KEY (job_id) REFERENCES jobs (id) + ON DELETE CASCADE + ON UPDATE CASCADE +); + -- Metrics of a single task DROP TABLE IF EXISTS task_metrics; CREATE TABLE task_metrics ( -- cgit v1.2.3