diff options
| -rw-r--r-- | .gitignore | 8 | ||||
| -rw-r--r-- | .gitmodules | 6 | ||||
| -rw-r--r-- | Dockerfile | 8 | ||||
| -rw-r--r-- | database/gwf_converter/gwf_converter.py | 44 | ||||
| -rw-r--r-- | database/schema.sql | 86 | ||||
| -rw-r--r-- | database/test.sql | 184 | ||||
| -rw-r--r-- | docker-compose.yml | 5 | ||||
| m--------- | opendc-frontend | 32 | ||||
| m--------- | opendc-simulator | 0 | ||||
| m--------- | opendc-web-server | 27 |
10 files changed, 259 insertions, 141 deletions
@@ -1,8 +1,12 @@ -.DS_Store - # JetBrains platform .idea/ +# Credential setup file keys.json +# pyenv version files +.python-version mongodb/opendc_testing/* + +# macOS-specific files +.DS_Store diff --git a/.gitmodules b/.gitmodules index c24e2f48..e562655d 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,12 +1,12 @@ [submodule "opendc-web-server"] path = opendc-web-server - url = https://atlarge.ewi.tudelft.nl/gitlab/opendc/opendc-web-server.git + url = https://github.com/atlarge-research/opendc-web-server.git branch = master [submodule "opendc-frontend"] path = opendc-frontend - url = https://atlarge.ewi.tudelft.nl/gitlab/opendc/opendc-frontend.git + url = https://github.com/atlarge-research/opendc-frontend.git branch = master [submodule "opendc-simulator"] path = opendc-simulator - url = https://atlarge.ewi.tudelft.nl/gitlab/opendc/opendc-simulator.git + url = https://github.com/atlarge-research/opendc-simulator.git branch = master @@ -19,13 +19,15 @@ RUN echo "deb http://ftp.debian.org/debian stretch main" >> /etc/apt/sources.lis COPY ./ /opendc # Setting up simulator -RUN chmod 555 /opendc/build/configure.sh \ +RUN pip install -e /opendc/opendc-web-server \ + && python /opendc/opendc-web-server/setup.py install \ + && chmod 555 /opendc/build/configure.sh \ && cd /opendc/opendc-frontend \ && rm -rf ./build \ && rm -rf ./node_modules \ - && npm install \ + && yarn \ && export REACT_APP_OAUTH_CLIENT_ID=$(cat ../keys.json | python -c "import sys, json; print json.load(sys.stdin)['OAUTH_CLIENT_ID']") \ - && npm run build + && yarn build # Set working directory WORKDIR /opendc diff --git a/database/gwf_converter/gwf_converter.py b/database/gwf_converter/gwf_converter.py index 81de2440..902bd93f 100644 --- a/database/gwf_converter/gwf_converter.py +++ b/database/gwf_converter/gwf_converter.py @@ -17,7 +17,8 @@ class Task: self.job = job self.submit_time = submit_time self.run_time = run_time - self.flops = 10 ** 9 * run_time * num_processors + self.cores = num_processors + self.flops = 4000 * run_time * num_processors self.dependency_gwf_ids = dependency_gwf_ids self.db_id = -1 self.dependencies = [] @@ -55,8 +56,7 @@ def get_jobs_from_gwf_file(file_name): return jobs.values() -def write_to_db(trace_name, jobs): - conn = mariadb.connect(user='opendc', password='opendcpassword', database='opendc') +def write_to_db(conn, trace_name, jobs): cursor = conn.cursor() trace_id = execute_insert_query(conn, cursor, "INSERT INTO traces (name) VALUES ('%s')" % trace_name) @@ -66,9 +66,10 @@ def write_to_db(trace_name, jobs): % ("Job %d" % job.gwf_id, trace_id)) for task in job.tasks: - task.db_id = execute_insert_query(conn, cursor, "INSERT INTO tasks (start_tick, total_flop_count, job_id, " - "parallelizability) VALUES (%d,%d,%d,'PARALLEL')" - % (task.submit_time, task.flops, job.db_id)) + task.db_id = execute_insert_query(conn, cursor, + "INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) " + "VALUES (%d,%d,%d,%d)" + % (task.submit_time, task.flops, task.cores, job.db_id)) for job in jobs: for task in job.tasks: @@ -77,9 +78,6 @@ def write_to_db(trace_name, jobs): "VALUES (%d,%d)" % (dependency.db_id, task.db_id)) - conn.close() - - def execute_insert_query(conn, cursor, sql): try: cursor.execute(sql) @@ -90,10 +88,28 @@ def execute_insert_query(conn, cursor, sql): return cursor.lastrowid +def main(trace_path): + trace_name = sys.argv[2] if (len(sys.argv) > 2) else \ + os.path.splitext(os.path.basename(trace_path))[0] + gwf_jobs = get_jobs_from_gwf_file(trace_path) + + host = os.environ.get('PERSISTENCE_HOST','localhost') + user = os.environ.get('PERSISTENCE_USER','opendc') + password = os.environ.get('PERSISTENCE_PASSWORD','opendcpassword') + database = os.environ.get('PERSISTENCE_DATABASE','opendc') + conn = mariadb.connect(host=host, user=user, password=password, database=database) + write_to_db(conn, trace_name, gwf_jobs) + conn.close() + + if __name__ == "__main__": if len(sys.argv) < 2: - sys.exit("Usage: %s trace-name" % sys.argv[0]) - - gwf_trace_name = sys.argv[1] - gwf_jobs = get_jobs_from_gwf_file(os.path.join("traces", gwf_trace_name + ".gwf")) - write_to_db(gwf_trace_name, gwf_jobs) + sys.exit("Usage: %s file [name]" % sys.argv[0]) + + if sys.argv[1] in ("-a", "--all"): + for f in os.listdir("traces"): + if f.endswith(".gwf"): + print("Converting {}".format(f)) + main(os.path.join("traces", f)) + else: + main(sys.argv[1]) diff --git a/database/schema.sql b/database/schema.sql index c6f34f17..f6286260 100644 --- a/database/schema.sql +++ b/database/schema.sql @@ -146,9 +146,18 @@ DROP TABLE IF EXISTS schedulers; CREATE TABLE schedulers ( name VARCHAR(50) PRIMARY KEY NOT NULL ); -INSERT INTO schedulers (name) VALUES ('DEFAULT'); -INSERT INTO schedulers (name) VALUES ('SRTF'); -INSERT INTO schedulers (name) VALUES ('FIFO'); +INSERT INTO schedulers (name) VALUES ('FIFO-FIRSTFIT'); +INSERT INTO schedulers (name) VALUES ('FIFO-BESTFIT'); +INSERT INTO schedulers (name) VALUES ('FIFO-WORSTFIT'); +INSERT INTO schedulers (name) VALUES ('FIFO-RANDOM'); +INSERT INTO schedulers (name) VALUES ('SRTF-FIRSTFIT'); +INSERT INTO schedulers (name) VALUES ('SRTF-BESTFIT'); +INSERT INTO schedulers (name) VALUES ('SRTF-WORSTFIT'); +INSERT INTO schedulers (name) VALUES ('SRTF-RANDOM'); +INSERT INTO schedulers (name) VALUES ('RANDOM-FIRSTFIT'); +INSERT INTO schedulers (name) VALUES ('RANDOM-BESTFIT'); +INSERT INTO schedulers (name) VALUES ('RANDOM-WORSTFIT'); +INSERT INTO schedulers (name) VALUES ('RANDOM-RANDOM'); /* * Each simulation has a single trace. A trace contains tasks and their start times. @@ -176,11 +185,11 @@ CREATE TABLE jobs ( -- A task that's defined in terms of how many flops (floating point operations) it takes to complete DROP TABLE IF EXISTS tasks; CREATE TABLE tasks ( - id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT, - start_tick INTEGER NOT NULL CHECK (start_tick >= 0), - total_flop_count BIGINT NOT NULL, - job_id INTEGER NOT NULL, - parallelizability VARCHAR(50) NOT NULL, + id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT, + start_tick INTEGER NOT NULL CHECK (start_tick >= 0), + total_flop_count BIGINT NOT NULL CHECK (total_flop_count >= 0), + core_count INTEGER NOT NULL CHECK (core_count >= 0), + job_id INTEGER NOT NULL, FOREIGN KEY (job_id) REFERENCES jobs (id) ON DELETE CASCADE @@ -223,11 +232,69 @@ CREATE TABLE task_states ( ON UPDATE CASCADE ); +-- The measurements of a single stage +DROP TABLE IF EXISTS stage_measurements; +CREATE TABLE stage_measurements ( + id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT, + experiment_id INTEGER NOT NULL, + tick INTEGER NOT NULL CHECK (tick >= 0), + stage INTEGER NOT NULL CHECK (stage >= 0), + cpu BIGINT NOT NULL CHECK (cpu >= 0), + wall BIGINT NOT NULL CHECK (wall >= 0), + size INTEGER NOT NULL CHECK (size >= 0), + iterations INTEGER NOT NULL CHECK (iterations >= 0), + + FOREIGN KEY (experiment_id) REFERENCES experiments (id) + ON DELETE CASCADE + ON UPDATE CASCADE +); + +-- Metrics of a job task +DROP TABLE IF EXISTS job_metrics; +CREATE TABLE job_metrics ( + id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT, + experiment_id INTEGER NOT NULL, + job_id INTEGER NOT NULL, + critical_path INTEGER NOT NULL CHECK (critical_path >= 0), + critical_path_length INTEGER NOT NULL CHECK (critical_path_length >= 0), + waiting_time INTEGER NOT NULL CHECK (waiting_time >= 0), + makespan INTEGER NOT NULL CHECK (makespan >= 0), + nsl INTEGER NOT NULL CHECK (nsl >= 0), + + FOREIGN KEY (experiment_id) REFERENCES experiments (id) + ON DELETE CASCADE + ON UPDATE CASCADE, + FOREIGN KEY (job_id) REFERENCES jobs (id) + ON DELETE CASCADE + ON UPDATE CASCADE +); + +-- Metrics of a single task +DROP TABLE IF EXISTS task_metrics; +CREATE TABLE task_metrics ( + id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT, + experiment_id INTEGER NOT NULL, + task_id INTEGER NOT NULL, + job_id INTEGER NOT NULL, + waiting INTEGER NOT NULL CHECK (waiting >= 0), + execution INTEGER NOT NULL CHECK (execution >= 0), + turnaround INTEGER NOT NULL CHECK (turnaround >= 0), + + FOREIGN KEY (experiment_id) REFERENCES experiments (id) + ON DELETE CASCADE + ON UPDATE CASCADE, + FOREIGN KEY (task_id) REFERENCES tasks (id) + ON DELETE CASCADE + ON UPDATE CASCADE, + FOREIGN KEY (job_id) REFERENCES jobs (id) + ON DELETE CASCADE + ON UPDATE CASCADE +); + -- A machine state DROP TABLE IF EXISTS machine_states; CREATE TABLE machine_states ( id INTEGER PRIMARY KEY NOT NULL AUTO_INCREMENT, - task_id INTEGER, machine_id INTEGER NOT NULL, experiment_id INTEGER NOT NULL, tick INTEGER NOT NULL, @@ -235,7 +302,6 @@ CREATE TABLE machine_states ( in_use_memory_mb INTEGER, load_fraction REAL CHECK (load_fraction >= 0 AND load_fraction <= 1), - FOREIGN KEY (task_id) REFERENCES tasks (id), FOREIGN KEY (machine_id) REFERENCES machines (id) ON DELETE CASCADE ON UPDATE CASCADE, diff --git a/database/test.sql b/database/test.sql index 452544e0..55801b76 100644 --- a/database/test.sql +++ b/database/test.sql @@ -39,11 +39,11 @@ INSERT INTO traces (name) VALUES ('Default'); INSERT INTO jobs (name, trace_id) VALUES ('Default', 1); -- Tasks -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 400000, 1, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (25, 10000, 1, 'PARALLEL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (25, 10000, 1, 'PARALLEL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (26, 10000, 1, 'PARALLEL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (80, 200000, 1, 'PARALLEL'); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 400000, 1, 1); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (25, 10000, 1, 1); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (25, 10000, 1, 1); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (26, 10000, 1, 1); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (80, 200000, 1, 1); INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (1, 5); @@ -54,66 +54,66 @@ INSERT INTO traces (name) VALUES ('Image Processing'); INSERT INTO jobs (name, trace_id) VALUES ('Image Processing', 2); -- Tasks -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (10, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (20, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (1, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 100000, 2, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (21, 100000, 2, 'SEQUENTIAL'); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (10, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (20, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (1, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 100000, 1, 2); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (21, 100000, 1, 2); -- Path Planning Trace INSERT INTO traces (name) VALUES ('Path planning'); @@ -121,23 +121,23 @@ INSERT INTO traces (name) VALUES ('Path planning'); -- Jobs INSERT INTO jobs (name, trace_id) VALUES ('Path planning', 3); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 1000000, 3, 'PARALLEL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (12, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (13, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (14, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (12, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (13, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (14, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (12, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (13, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (14, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (11, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (12, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (13, 200000, 3, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (14, 200000, 3, 'SEQUENTIAL'); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 1000000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (12, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (13, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (14, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (12, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (13, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (14, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (12, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (13, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (14, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (11, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (12, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (13, 200000, 1, 3); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (14, 200000, 1, 3); INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 67); INSERT INTO task_dependencies (first_task_id, second_task_id) VALUES (66, 68); @@ -162,8 +162,8 @@ INSERT INTO traces (name) VALUES ('Parallel heavy trace'); -- Jobs INSERT INTO jobs (name, trace_id) VALUES ('Parallel heavy trace', 4); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 4, 'SEQUENTIAL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 900000, 4, 'PARALLEL'); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 4); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 900000, 1, 4); -- Sequential Trace INSERT INTO traces (name) VALUES ('Sequential heavy trace'); @@ -171,22 +171,18 @@ INSERT INTO traces (name) VALUES ('Sequential heavy trace'); -- Jobs INSERT INTO jobs (name, trace_id) VALUES ('Sequential heavy trace', 5); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 100000, 5, 'PARALLEL'); -INSERT INTO tasks (start_tick, total_flop_count, job_id, parallelizability) VALUES (0, 900000, 5, 'SEQUENTIAL'); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 100000, 1, 5); +INSERT INTO tasks (start_tick, total_flop_count, core_count, job_id) VALUES (0, 900000, 1, 5); -- Experiments INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick) -VALUES (1, 1, 1, 'SRTF', 'Default trace, SRTF', 'QUEUED', 110); +VALUES (1, 1, 3, 'fifo-bestfit', 'Path planning trace, FIFO', 'QUEUED', 0); INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick) -VALUES (1, 1, 2, 'SRTF', 'Image processing trace, SRTF', 'QUEUED', 0); +VALUES (1, 1, 1, 'srtf-firstfit', 'Default trace, SRTF', 'QUEUED', 0); INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick) -VALUES (1, 1, 3, 'SRTF', 'Path planning trace, FIFI', 'QUEUED', 0); +VALUES (1, 1, 2, 'srtf-firstfit', 'Image processing trace, SRTF', 'QUEUED', 0); INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick) -VALUES (1, 1, 1, 'FIFO', 'Default trace, SRTF', 'QUEUED', 0); -INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick) -VALUES (1, 1, 2, 'FIFO', 'Image processing trace, SRTF', 'QUEUED', 0); -INSERT INTO experiments (simulation_id, path_id, trace_id, scheduler_name, name, state, last_simulated_tick) -VALUES (1, 1, 3, 'FIFO', 'Path planning trace, FIFO', 'QUEUED', 0); +VALUES (1, 1, 3, 'fifo-firstfit', 'Path planning trace, FIFO', 'QUEUED', 0); -- Rooms INSERT INTO rooms (name, datacenter_id, type) VALUES ('room 1', 1, 'SERVER'); diff --git a/docker-compose.yml b/docker-compose.yml index 52c43ee7..a36611a1 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -31,6 +31,11 @@ services: - PERSISTENCE_URL=jdbc:mysql://mariadb:3306/opendc - PERSISTENCE_USER=opendc - PERSISTENCE_PASSWORD=opendcpassword + - COLLECT_MACHINE_STATES=ON + - COLLECT_TASK_STATES=ON + - COLLECT_STAGE_MEASUREMENTS=OFF + - COLLECT_TASK_METRICS=OFF + - COLLECT_JOB_METRICS=OFF mariadb: build: context: ./database diff --git a/opendc-frontend b/opendc-frontend -Subproject e65b805e94052d993f7ef486226cae0ebf9965b +Subproject 7032a007d4431f5a0c4c5e2d3f3bd20462d4995 diff --git a/opendc-simulator b/opendc-simulator -Subproject 8b9d789baa2757aa4904d18f581d29b2e328890 +Subproject 87bf2df9b290cc56eca14d293d2935b561200b7 diff --git a/opendc-web-server b/opendc-web-server -Subproject cd116ba5063b9bfda029196b310207b45e21604 +Subproject d49328f321362bb410775e9c520a6f002448941 |
