summaryrefslogtreecommitdiff
path: root/database
diff options
context:
space:
mode:
authorFabian Mastenbroek <mail.fabianm@gmail.com>2018-07-20 00:19:03 +0200
committerFabian Mastenbroek <mail.fabianm@gmail.com>2018-08-14 19:15:16 +0200
commitdfddb6c25c96598295ad8b50092c9f4dd946e560 (patch)
treedc22c649515a88ce1f236d6971e46e9daea3fa27 /database
parentf8df97d51d56e27ae880a2b5bd71149559e91b4a (diff)
feat: Implement Scheduler Reference Architecture
This change implements the parts of the Datacenter Scheduling Reference Architecture as published in SC18. This commit consists of changes to the database schema such as: - **Removal of `parallelizability` column in `Task`** This field was not used by the simulator and we opted to use a field describing the amount of cores the task can run on instead. - **Removal of `task_id` column in `MachineState`** This field did not make sense anymore now that a machine can run multiple tasks. Fortunately, this field is not used in the frontend.
Diffstat (limited to 'database')
-rw-r--r--database/schema.sql47
-rw-r--r--database/test.sql82
2 files changed, 75 insertions, 54 deletions
diff --git a/database/schema.sql b/database/schema.sql
index 7f0d5879..15dbf043 100644
--- a/database/schema.sql
+++ b/database/schema.sql
@@ -63,7 +63,7 @@ INSERT INTO authorization_levels (level) VALUES ('VIEW');
* - DD is the two-digit day of the month (1-31)
* - HH is the two-digit hours part (0-23)
* - MM is the two-digit minutes part (0-59)
-* - SS is the two-digit secodns part (0-59)
+* - SS is the two-digit seconds part (0-59)
*/
-- Simulation
@@ -146,9 +146,18 @@ DROP TABLE IF EXISTS schedulers;
CREATE TABLE schedulers (
name VARCHAR(50) PRIMARY KEY NOT NULL
);
-INSERT INTO schedulers (name) VALUES ('DEFAULT');
-INSERT INTO schedulers (name) VALUES ('SRTF');
-INSERT INTO schedulers (name) VALUES ('FIFO');
+INSERT INTO schedulers (name) VALUES ('FIFO-FIRSTFIT');
+INSERT INTO schedulers (name) VALUES ('FIFO-BESTFIT');
+INSERT INTO schedulers (name) VALUES ('FIFO-WORSTFIT');
+INSERT INTO schedulers (name) VALUES ('FIFO-RANDOM');
+INSERT INTO schedulers (name) VALUES ('SRTF-FIRSTFIT');
+INSERT INTO schedulers (name) VALUES ('SRTF-BESTFIT');
+INSERT INTO schedulers (name) VALUES ('SRTF-WORSTFIT');
+INSERT INTO schedulers (name) VALUES ('SRTF-RANDOM');
+INSERT INTO schedulers (name) VALUES ('RANDOM-FIRSTFIT');
+INSERT INTO schedulers (name) VALUES ('RANDOM-BESTFIT');
+INSERT INTO schedulers (name) VALUES ('RANDOM-WORSTFIT');
+INSERT INTO schedulers (name) VALUES ('RANDOM-RANDOM');
/*
* Each simulation has a single trace. A trace contains tasks and their start times.
@@ -180,13 +189,24 @@ CREATE TABLE tasks (
start_tick INTEGER NOT NULL CHECK (start_tick >= 0),
total_flop_count INTEGER NOT NULL,
job_id INTEGER NOT NULL,
- task_dependency_id INTEGER NULL,
parallelizability VARCHAR(50) NOT NULL,
FOREIGN KEY (job_id) REFERENCES jobs (id)
ON DELETE CASCADE
+ ON UPDATE CASCADE
+);
+
+-- A dependency between two tasks.
+DROP TABLE IF EXISTS task_dependencies;
+CREATE TABLE task_dependencies (
+ first_task_id INTEGER NOT NULL,
+ second_task_id INTEGER NOT NULL,
+
+ PRIMARY KEY (first_task_id, second_task_id),
+ FOREIGN KEY (first_task_id) REFERENCES tasks (id)
+ ON DELETE CASCADE
ON UPDATE CASCADE,
- FOREIGN KEY (task_dependency_id) REFERENCES tasks (id)
+ FOREIGN KEY (second_task_id) REFERENCES tasks (id)
ON DELETE CASCADE
ON UPDATE CASCADE
);
@@ -318,8 +338,9 @@ DELIMITER //
-- and tiles in a room are connected.
DROP TRIGGER IF EXISTS before_insert_tiles_check_existence;
CREATE TRIGGER before_insert_tiles_check_existence
-BEFORE INSERT ON tiles
-FOR EACH ROW
+ BEFORE INSERT
+ ON tiles
+ FOR EACH ROW
BEGIN
-- checking tile overlap
-- a tile already exists such that..
@@ -416,8 +437,9 @@ DELIMITER //
-- Make sure objects are added to tiles in rooms they're allowed to be in.
DROP TRIGGER IF EXISTS before_update_tiles;
CREATE TRIGGER before_update_tiles
-BEFORE UPDATE ON tiles
-FOR EACH ROW
+ BEFORE UPDATE
+ ON tiles
+ FOR EACH ROW
BEGIN
IF ((NEW.object_id IS NOT NULL) AND (
@@ -543,8 +565,9 @@ DELIMITER //
-- Make sure a machine is not inserted at a position that does not exist for its rack.
DROP TRIGGER IF EXISTS before_insert_machine;
CREATE TRIGGER before_insert_machine
-BEFORE INSERT ON machines
-FOR EACH ROW
+ BEFORE INSERT
+ ON machines
+ FOR EACH ROW
BEGIN
IF (
NEW.position > (SELECT capacity
diff --git a/database/test.sql b/database/test.sql
index fa7fb8aa..a2c6d165 100644
--- a/database/test.sql
+++ b/database/test.sql
@@ -43,8 +43,9 @@ INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALU
INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (25, 10000, 1, 'PARALLEL');
INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (25, 10000, 1, 'PARALLEL');
INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (26, 10000, 1, 'PARALLEL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (80, 200000, 1, 1, 'PARALLEL');
+INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (80, 200000, 1, 'PARALLEL');
+
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (1, 5);
-- Image Processing Trace
INSERT INTO traces (name) VALUES ('Image Processing');
@@ -121,38 +122,39 @@ INSERT INTO traces (name) VALUES ('Path planning');
INSERT INTO jobs (name, trace_id) VALUES ('Path planning', 3);
INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 1000000, 3, 'PARALLEL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (11, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (12, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (13, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (14, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (11, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (12, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (13, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (14, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (11, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (12, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (13, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (14, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (11, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (12, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (13, 200000, 3, 66, 'SEQUENTIAL');
-INSERT INTO tasks (start_tick, total_flop_count, job_id, task_dependency_id, parallelizability)
-VALUES (14, 200000, 3, 66, 'SEQUENTIAL');
+INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 200000, 3, 'SEQUENTIAL');
+INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (12, 200000, 3, 'SEQUENTIAL');
+INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (13, 200000, 3, 'SEQUENTIAL');
+INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (14, 200000, 3, 'SEQUENTIAL');
+INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 200000, 3, 'SEQUENTIAL');
+INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (12, 200000, 3, 'SEQUENTIAL');
+INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (13, 200000, 3, 'SEQUENTIAL');
+INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (14, 200000, 3, 'SEQUENTIAL');
+INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 200000, 3, 'SEQUENTIAL');
+INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (12, 200000, 3, 'SEQUENTIAL');
+INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (13, 200000, 3, 'SEQUENTIAL');
+INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (14, 200000, 3, 'SEQUENTIAL');
+INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 200000, 3, 'SEQUENTIAL');
+INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (12, 200000, 3, 'SEQUENTIAL');
+INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (13, 200000, 3, 'SEQUENTIAL');
+INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (14, 200000, 3, 'SEQUENTIAL');
+
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 67);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 68);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 69);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 70);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 71);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 72);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 73);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 74);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 75);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 76);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 77);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 78);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 79);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 80);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 81);
+INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 82);
-- Parallelizable Trace
INSERT INTO traces (name) VALUES ('Parallel heavy trace');
@@ -174,17 +176,13 @@ INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALU
-- Experiments
INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick)
-VALUES (1, 1, 1, 'SRTF', 'Default trace, SRTF', 'QUEUED', 110);
-INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick)
-VALUES (1, 1, 2, 'SRTF', 'Image processing trace, SRTF', 'QUEUED', 0);
-INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick)
-VALUES (1, 1, 3, 'SRTF', 'Path planning trace, FIFI', 'QUEUED', 0);
+VALUES (1, 1, 3, 'fifo-bestfit', 'Path planning trace, FIFO', 'QUEUED', 0);
INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick)
-VALUES (1, 1, 1, 'FIFO', 'Default trace, SRTF', 'QUEUED', 0);
+VALUES (1, 1, 1, 'srtf-firstfit', 'Default trace, SRTF', 'QUEUED', 0);
INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick)
-VALUES (1, 1, 2, 'FIFO', 'Image processing trace, SRTF', 'QUEUED', 0);
+VALUES (1, 1, 2, 'srtf-firstfit', 'Image processing trace, SRTF', 'QUEUED', 0);
INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick)
-VALUES (1, 1, 3, 'FIFO', 'Path planning trace, FIFO', 'QUEUED', 0);
+VALUES (1, 1, 3, 'fifo-firstfit', 'Path planning trace, FIFO', 'QUEUED', 0);
-- Rooms
INSERT INTO rooms (name, datacenter_id, type) VALUES ('room 1', 1, 'SERVER');