tables.sqlite 28.2 KB
Newer Older
1 2
/*
    This is SQLite version of EnsEMBL Hive database schema file.
Leo Gordon's avatar
Leo Gordon committed
3

4 5
    It has been annotated with @-tags.
    The following command is used to create HTML documentation:
Leo Gordon's avatar
Leo Gordon committed
6 7 8 9 10
        perl $ENSEMBL_CVS_ROOT_DIR/ensembl-production/scripts/sql2html.pl \
            -i $ENSEMBL_CVS_ROOT_DIR/ensembl-hive/sql/tables.sqlite -d Hive -intro $ENSEMBL_CVS_ROOT_DIR/ensembl-hive/docs/hive_schema.inc \
            -sort_headers 0 -sort_tables 0 -o $ENSEMBL_CVS_ROOT_DIR/ensembl-hive/docs/hive_schema.html


11 12 13
    Adding the following line into the header of the previous output will make it look prettier:
            <link rel="stylesheet" type="text/css" media="all" href="ehive_doc.css" />

14

15 16 17 18


LICENSE

19
    Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
20 21 22 23 24 25 26 27 28 29

    Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
    You may obtain a copy of the License at

         http://www.apache.org/licenses/LICENSE-2.0

    Unless required by applicable law or agreed to in writing, software distributed under the License
    is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and limitations under the License.

30 31 32 33
CONTACT

    Please subscribe to the Hive mailing list:  http://listserver.ebi.ac.uk/mailman/listinfo/ehive-users  to discuss Hive-related questions or to be notified of our updates

34 35 36 37 38 39 40 41
*/


/**
@header Pipeline structure
@colour #C70C09
*/

Leo Gordon's avatar
Leo Gordon committed
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
/**
@table  hive_meta

@colour #000000

@desc This table keeps several important hive-specific pipeline-wide key-value pairs
        such as hive_sql_schema_version, hive_use_triggers and hive_pipeline_name.

@column meta_key        the KEY of KEY-VALUE pairs (primary key)
@column meta_value      the VALUE of KEY-VALUE pairs
*/

CREATE TABLE hive_meta (
    meta_key                VARCHAR(255) NOT NULL PRIMARY KEY,
    meta_value              TEXT

);


/**
@table  pipeline_wide_parameters

@colour #000000

@desc This table contains a simple hash between pipeline_wide_parameter names and their values.
      The same data used to live in 'meta' table until both the schema and the API were finally separated from Ensembl Core.

@column param_name      the KEY of KEY-VALUE pairs (primary key)
@column param_value     the VALUE of KEY-VALUE pairs
*/

CREATE TABLE pipeline_wide_parameters (
    param_name              VARCHAR(255) NOT NULL PRIMARY KEY,
    param_value             TEXT

);
CREATE        INDEX pipeline_wide_parameters_value_idx     ON pipeline_wide_parameters (param_value);


81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
/**
@table  analysis_base

@colour #C70C09

@desc   Each Analysis is a node of the pipeline diagram.
        It acts both as a "class" to which Jobs belong (and inherit from it certain properties)
        and as a "container" for them (Jobs of an Analysis can be blocking all Jobs of another Analysis).

@column analysis_id             a unique ID that is also a foreign key to most of the other tables
@column logic_name              the name of the Analysis object
@column module                  the Perl module name that runs this Analysis
@column parameters              a stingified hash of parameters common to all jobs of the Analysis
@column resource_class_id       link to the resource_class table
@column failed_job_tolerance    % of tolerated failed Jobs
@column max_retry_count         how many times a job of this Analysis will be retried (unless there is no point)
@column can_be_empty            if TRUE, this Analysis will not be blocking if/while it doesn't have any jobs
@column priority                an Analysis with higher priority will be more likely chosen on Worker's specialization
@column meadow_type             if defined, forces this Analysis to be run only on the given Meadow
@column analysis_capacity       if defined, limits the number of Workers of this particular Analysis that are allowed to run in parallel
*/
102

103
CREATE TABLE analysis_base (
Leo Gordon's avatar
Leo Gordon committed
104
    analysis_id             INTEGER     NOT NULL PRIMARY KEY AUTOINCREMENT,
105
    logic_name              VARCHAR(255) NOT NULL,
106 107
    module                  VARCHAR(255),
    parameters              TEXT,
Leo Gordon's avatar
Leo Gordon committed
108 109 110 111 112
    resource_class_id       INTEGER     NOT NULL,
    failed_job_tolerance    INTEGER     NOT NULL DEFAULT 0,
    max_retry_count         INTEGER     NOT NULL DEFAULT 3,
    can_be_empty            SMALLINT    NOT NULL DEFAULT 0,
    priority                SMALLINT    NOT NULL DEFAULT 0,
113
    meadow_type             VARCHAR(255)         DEFAULT NULL,
Leo Gordon's avatar
Leo Gordon committed
114
    analysis_capacity       INTEGER              DEFAULT NULL
115
);
Leo Gordon's avatar
Leo Gordon committed
116
CREATE UNIQUE INDEX analysis_base_logic_name_idx ON analysis_base (logic_name);
117 118


119 120
/**
@table  analysis_stats
121

122
@colour #C70C09
123

124 125 126 127
@desc   Parallel table to analysis_base which provides high level statistics on the
        state of an analysis and it's jobs.  Used to provide a fast overview, and to
        provide final approval of 'DONE' which is used by the blocking rules to determine
        when to unblock other analyses.  Also provides
128

129 130 131 132
@column analysis_id             foreign-keyed to the corresponding analysis_base entry
@column batch_size              how many jobs are claimed in one claiming operation before Worker starts executing them
@column hive_capacity           a reciprocal limiter on the number of Workers running at the same time (dependent on Workers of other Analyses)
@column status                  cached state of the Analysis
133

134 135 136 137 138 139 140 141
@column total_job_count         total number of Jobs of this Analysis
@column semaphored_job_count    number of Jobs of this Analysis that are in SEMAPHORED state
@column ready_job_count         number of Jobs of this Analysis that are in READY state
@column done_job_count          number of Jobs of this Analysis that are in DONE state
@column failed_job_count        number of Jobs of this Analysis that are in FAILED state

@column num_running_workers     number of running Workers of this Analysis
@column num_required_workers    extra number of Workers of this Analysis needed to execute all READY jobs
142

143 144 145
@column behaviour               whether hive_capacity is set or is dynamically calculated based on timers
@column input_capacity          used to compute hive_capacity in DYNAMIC mode
@column output_capacity         used to compute hive_capacity in DYNAMIC mode
146

147 148 149 150 151 152 153 154 155 156
@column avg_msec_per_job        weighted average used to compute DYNAMIC hive_capacity
@column avg_input_msec_per_job  weighted average used to compute DYNAMIC hive_capacity
@column avg_run_msec_per_job    weighted average used to compute DYNAMIC hive_capacity
@column avg_output_msec_per_job weighted average used to compute DYNAMIC hive_capacity

@column last_update             when this entry was last updated
@column sync_lock               a binary lock flag to prevent simultaneous updates
*/

CREATE TABLE analysis_stats (
Leo Gordon's avatar
Leo Gordon committed
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
    analysis_id             INTEGER     NOT NULL,
    batch_size              INTEGER     NOT NULL DEFAULT 1,
    hive_capacity           INTEGER              DEFAULT NULL,
    status                  TEXT        NOT NULL DEFAULT 'EMPTY', /* enum('BLOCKED', 'LOADING', 'SYNCHING', 'EMPTY', 'READY', 'WORKING', 'ALL_CLAIMED', 'DONE', 'FAILED') DEFAULT 'EMPTY' NOT NULL, */

    total_job_count         INTEGER     NOT NULL DEFAULT 0,
    semaphored_job_count    INTEGER     NOT NULL DEFAULT 0,
    ready_job_count         INTEGER     NOT NULL DEFAULT 0,
    done_job_count          INTEGER     NOT NULL DEFAULT 0,
    failed_job_count        INTEGER     NOT NULL DEFAULT 0,
    num_running_workers     INTEGER     NOT NULL DEFAULT 0,
    num_required_workers    INTEGER     NOT NULL DEFAULT 0,

    behaviour               TEXT        NOT NULL DEFAULT 'STATIC', /* enum('STATIC', 'DYNAMIC') DEFAULT 'STATIC' NOT NULL, */
    input_capacity          INTEGER     NOT NULL DEFAULT 4,
    output_capacity         INTEGER     NOT NULL DEFAULT 4,

    avg_msec_per_job        INTEGER              DEFAULT NULL,
    avg_input_msec_per_job  INTEGER              DEFAULT NULL,
    avg_run_msec_per_job    INTEGER              DEFAULT NULL,
    avg_output_msec_per_job INTEGER              DEFAULT NULL,

    last_update             TIMESTAMP            DEFAULT NULL,
    sync_lock               SMALLINT    NOT NULL DEFAULT 0,
181 182 183 184 185 186 187 188 189 190 191

    PRIMARY KEY (analysis_id)
);


/**
@table  dataflow_rule

@colour #C70C09

@desc Extension of simple_rule design except that goal(to) is now in extended URL format e.g.
Leo Gordon's avatar
Leo Gordon committed
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
        mysql://ensadmin:<pass>@ecs2:3361/compara_hive_test?analysis.logic_name='blast_NCBI34'
        (full network address of an analysis).
        The only requirement is that there are rows in the job, analysis, dataflow_rule,
        and worker tables so that the following join works on the same database 
            WHERE analysis.analysis_id = dataflow_rule.from_analysis_id 
            AND   analysis.analysis_id = job.analysis_id
            AND   analysis.analysis_id = worker.analysis_id
        These are the rules used to create entries in the job table where the
        input_id (control data) is passed from one analysis to the next to define work.
        The analysis table will be extended so that it can specify different read and write
        databases, with the default being the database the analysis is on

@column dataflow_rule_id        internal ID
@column from_analysis_id        foreign key to analysis table analysis_id
@column branch_code             branch_code of the fan
@column funnel_dataflow_rule_id dataflow_rule_id of the semaphored funnel (is NULL by default, which means dataflow is not semaphored)
@column to_analysis_url         foreign key to net distributed analysis logic_name reference
@column input_id_template       a template for generating a new input_id (not necessarily a hashref) in this dataflow; if undefined is kept original
210
*/
211

212
CREATE TABLE dataflow_rule (
Leo Gordon's avatar
Leo Gordon committed
213 214 215 216 217 218
    dataflow_rule_id        INTEGER     NOT NULL PRIMARY KEY AUTOINCREMENT,
    from_analysis_id        INTEGER     NOT NULL,
    branch_code             INTEGER     NOT NULL DEFAULT 1,
    funnel_dataflow_rule_id INTEGER              DEFAULT NULL,
    to_analysis_url         VARCHAR(255) NOT NULL DEFAULT '',
    input_id_template       TEXT                 DEFAULT NULL
219
);
Leo Gordon's avatar
Leo Gordon committed
220
CREATE UNIQUE INDEX from_to_branch_template_idx ON dataflow_rule (from_analysis_id, branch_code, funnel_dataflow_rule_id, to_analysis_url, input_id_template);
221 222


223 224 225 226 227 228 229 230 231 232 233 234 235 236 237
/**
@table  analysis_ctrl_rule

@colour #C70C09

@desc   These rules define a higher level of control.
        These rules are used to turn whole anlysis nodes on/off (READY/BLOCKED).
        If any of the condition_analyses are not 'DONE' the ctrled_analysis is set to BLOCKED.
        When all conditions become 'DONE' then ctrled_analysis is set to READY
        The workers switch the analysis.status to 'WORKING' and 'DONE'.
        But any moment if a condition goes false, the analysis is reset to BLOCKED.

@column condition_analysis_url foreign key to net distributed analysis reference
@column ctrled_analysis_id     foreign key to analysis table analysis_id
*/
238 239

CREATE TABLE analysis_ctrl_rule (
Leo Gordon's avatar
Leo Gordon committed
240 241
    condition_analysis_url  VARCHAR(255) NOT NULL DEFAULT '',
    ctrled_analysis_id      INTEGER     NOT NULL
242
);
Leo Gordon's avatar
Leo Gordon committed
243
CREATE UNIQUE INDEX condition_ctrled_idx ON analysis_ctrl_rule (condition_analysis_url, ctrled_analysis_id);
244 245


246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
/**
@header Resources
@colour #FF7504
*/

/**
@table  resource_class

@colour #FF7504

@desc   Maps between resource_class numeric IDs and unique names.

@column resource_class_id   unique ID of the ResourceClass
@column name                unique name of the ResourceClass
*/

CREATE TABLE resource_class (
Leo Gordon's avatar
Leo Gordon committed
263
    resource_class_id       INTEGER     NOT NULL PRIMARY KEY AUTOINCREMENT,
264
    name                    VARCHAR(255) NOT NULL
265 266

);
Leo Gordon's avatar
Leo Gordon committed
267
CREATE UNIQUE INDEX resource_class_name_idx ON resource_class (name);
268 269 270 271 272 273 274 275 276 277 278


/**
@table  resource_description

@colour #FF7504

@desc   Maps (ResourceClass, MeadowType) pair to Meadow-specific resource lines.

@column resource_class_id   foreign-keyed to the ResourceClass entry
@column meadow_type         if the Worker is about to be executed on the given Meadow...
279 280
@column submission_cmd_args ... these are the resource arguments (queue, memory,...) to give to the submission command
@column worker_cmd_args     ... and these are the arguments that are given to the worker command being submitted
281 282 283
*/

CREATE TABLE resource_description (
Leo Gordon's avatar
Leo Gordon committed
284
    resource_class_id       INTEGER     NOT NULL,
285
    meadow_type             VARCHAR(255) NOT NULL,
286 287
    submission_cmd_args     VARCHAR(255) NOT NULL DEFAULT '',
    worker_cmd_args         VARCHAR(255) NOT NULL DEFAULT '',
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308

    PRIMARY KEY(resource_class_id, meadow_type)
);


/**
@header Job-related
@colour #1D73DA
*/

/**
@table  job

@colour #1D73DA

@desc The job is the heart of this system.  It is the kiosk or blackboard
    where workers find things to do and then post work for other works to do.
    These jobs are created prior to work being done, are claimed by workers,
    are updated as the work is done, with a final update on completion.

@column job_id                  autoincrement id
309
@column prev_job_id             previous job which created this one
310 311
@column analysis_id             the analysis_id needed to accomplish this job.
@column input_id                input data passed into Analysis:RunnableDB to control the work
312 313
@column param_id_stack          a CSV of job_ids whose input_ids contribute to the stack of local variables for the job
@column accu_id_stack           a CSV of job_ids whose accu's contribute to the stack of local variables for the job
314
@column role_id                 links to the Role that claimed this job (NULL means it has never been claimed)
315 316
@column status                  state the job is in
@column retry_count             number times job had to be reset when worker failed to run it
Leo Gordon's avatar
Leo Gordon committed
317
@column completed               when the job was completed
318 319 320 321 322
@column runtime_msec            how long did it take to execute the job (or until the moment it failed)
@column query_count             how many SQL queries were run during this job
@column semaphore_count         if this count is >0, the job is conditionally blocked (until this count drops to 0 or below). Default=0 means "nothing is blocking me by default".
@column semaphored_job_id       the job_id of job S that is waiting for this job to decrease S's semaphore_count. Default=NULL means "I'm not blocking anything by default".
*/
323 324

CREATE TABLE job (
Leo Gordon's avatar
Leo Gordon committed
325 326 327 328
    job_id                  INTEGER     NOT NULL PRIMARY KEY AUTOINCREMENT,
    prev_job_id             INTEGER              DEFAULT NULL,  -- the job that created this one using a dataflow rule
    analysis_id             INTEGER     NOT NULL,
    input_id                CHAR(255)   NOT NULL,
329 330
    param_id_stack          CHAR(64)    NOT NULL DEFAULT '',
    accu_id_stack           CHAR(64)    NOT NULL DEFAULT '',
331
    role_id                 INTEGER              DEFAULT NULL,
Leo Gordon's avatar
Leo Gordon committed
332 333 334 335 336 337 338 339
    status                  TEXT        NOT NULL DEFAULT 'READY', /* enum('SEMAPHORED','READY','CLAIMED','COMPILATION','FETCH_INPUT','RUN','WRITE_OUTPUT','DONE','FAILED','PASSED_ON') DEFAULT 'READY' NOT NULL, */
    retry_count             INTEGER     NOT NULL DEFAULT 0,
    completed               TIMESTAMP            DEFAULT NULL,
    runtime_msec            INTEGER              DEFAULT NULL,
    query_count             INTEGER              DEFAULT NULL,

    semaphore_count         INTEGER     NOT NULL DEFAULT 0,
    semaphored_job_id       INTEGER              DEFAULT NULL
340
);
341
CREATE UNIQUE INDEX job_input_id_stacks_analysis_idx ON job (input_id, param_id_stack, accu_id_stack, analysis_id);
Leo Gordon's avatar
Leo Gordon committed
342
CREATE        INDEX job_analysis_status_retry_idx ON job (analysis_id, status, retry_count);
343
CREATE        INDEX job_role_id_status_id_idx ON job (role_id);
344 345


346 347
/**
@table  job_file
348

349
@colour #1D73DA
350

351 352 353 354
@desc   For testing/debugging purposes both STDOUT and STDERR streams of each Job
        can be redirected into a separate log file.
        This table holds filesystem paths to one or both of those files.
        There is max one entry per job_id and retry.
355

356 357
@column job_id             foreign key
@column retry              copy of retry_count of job as it was run
358
@column role_id            links to the Role that claimed this job
359 360 361
@column stdout_file        path to the job's STDOUT log
@column stderr_file        path to the job's STDERR log
*/
362 363

CREATE TABLE job_file (
Leo Gordon's avatar
Leo Gordon committed
364 365
    job_id                  INTEGER     NOT NULL,
    retry                   INTEGER     NOT NULL,
366
    role_id                 INTEGER     NOT NULL,
Leo Gordon's avatar
Leo Gordon committed
367 368
    stdout_file             VARCHAR(255),
    stderr_file             VARCHAR(255),
369

Leo Gordon's avatar
Leo Gordon committed
370
    PRIMARY KEY (job_id, retry)
371
);
372
CREATE        INDEX job_file_role_id_idx ON job_file (role_id);
373 374


375 376
/**
@table  accu
377

378 379 380
@colour #1D73DA

@desc   Accumulator for funneled dataflow.
381

382 383 384 385 386 387
@column sending_job_id     semaphoring job in the "box"
@column receiving_job_id   semaphored job outside the "box"
@column struct_name        name of the structured parameter
@column key_signature      locates the part of the structured parameter
@column value              value of the part
*/
388

389
CREATE TABLE accu (
Leo Gordon's avatar
Leo Gordon committed
390 391 392 393
    sending_job_id          INTEGER,
    receiving_job_id        INTEGER     NOT NULL,
    struct_name             VARCHAR(255) NOT NULL,
    key_signature           VARCHAR(255) NOT NULL,
394
    value                   TEXT
395
);
Leo Gordon's avatar
Leo Gordon committed
396 397
CREATE        INDEX accu_sending_idx   ON accu (sending_job_id);
CREATE        INDEX accu_receiving_idx ON accu (receiving_job_id);
398 399


400 401
/**
@table  analysis_data
402

403 404 405 406 407 408 409 410 411 412 413 414
@colour #1D73DA

@desc   A generic blob-storage hash.
        Currently the only legitimate use of this table is "overflow" of job.input_ids:
        when they grow longer than 254 characters the real data is stored in analysis_data instead,
        and the input_id contains the corresponding analysis_data_id.

@column analysis_data_id    primary id
@column data                text blob which holds the data
*/

CREATE TABLE analysis_data (
Leo Gordon's avatar
Leo Gordon committed
415 416
    analysis_data_id        INTEGER     NOT NULL PRIMARY KEY AUTOINCREMENT,
    data                    TEXT
417
);
Leo Gordon's avatar
Leo Gordon committed
418
CREATE        INDEX analysis_data_idx ON analysis_data (data);
419 420


421
/**
422
@header execution tables
423 424 425 426 427 428 429 430 431
@colour #24DA06
*/

/**
@table  worker

@colour #24DA06

@desc Entries of this table correspond to Worker objects of the API.
Leo Gordon's avatar
Leo Gordon committed
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
        Workers are created by inserting into this table
        so that there is only one instance of a Worker object in the database.
        As Workers live and do work, they update this table, and when they die they update again.

@column worker_id           unique ID of the Worker
@column meadow_type         type of the Meadow it is running on
@column meadow_name         name of the Meadow it is running on (for 'LOCAL' type is the same as host)
@column host                execution host name
@column process_id          identifies the Worker process on the Meadow (for 'LOCAL' is the OS PID)
@column resource_class_id   links to Worker's resource class
@column work_done           how many jobs the Worker has completed successfully
@column status              current status of the Worker
@column born                when the Worker process was started
@column last_check_in       when the Worker last checked into the database
@column died                if defined, when the Worker died (or its premature death was first detected by GC)
@column cause_of_death      if defined, why did the Worker exit (or why it was killed)
@column log_dir             if defined, a filesystem directory where this Worker's output is logged
449
*/
450

451
CREATE TABLE worker (
Leo Gordon's avatar
Leo Gordon committed
452
    worker_id               INTEGER     NOT NULL PRIMARY KEY AUTOINCREMENT,
453 454 455 456
    meadow_type             VARCHAR(255) NOT NULL,
    meadow_name             VARCHAR(255) NOT NULL,
    host                    VARCHAR(255) NOT NULL,
    process_id              VARCHAR(255) NOT NULL,
Leo Gordon's avatar
Leo Gordon committed
457 458 459 460
    resource_class_id       INTEGER              DEFAULT NULL,
    work_done               INTEGER     NOT NULL DEFAULT 0,
    status                  TEXT        NOT NULL DEFAULT 'READY', /* enum('SPECIALIZATION','COMPILATION','READY','PRE_CLEANUP','FETCH_INPUT','RUN','WRITE_OUTPUT','POST_CLEANUP','DEAD') DEFAULT 'READY' NOT NULL */
    born                    TIMESTAMP   NOT NULL DEFAULT CURRENT_TIMESTAMP,
461
    last_check_in           TIMESTAMP            DEFAULT NULL,
Leo Gordon's avatar
Leo Gordon committed
462
    died                    TIMESTAMP            DEFAULT NULL,
463
    cause_of_death          TEXT                 DEFAULT NULL, /* enum('NO_ROLE', 'NO_WORK', 'JOB_LIMIT', 'HIVE_OVERLOAD', 'LIFESPAN', 'CONTAMINATED', 'RELOCATED', 'KILLED_BY_USER', 'MEMLIMIT', 'RUNLIMIT', 'SEE_MSG', 'UNKNOWN') DEFAULT NULL */
Leo Gordon's avatar
Leo Gordon committed
464
    log_dir                 VARCHAR(255)         DEFAULT NULL
465
);
466
CREATE INDEX worker_meadow_type_meadow_name_process_id_idx ON worker (meadow_type, meadow_name, process_id);
467 468


469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
/**
@table  role

@colour #24DA06

@desc Entries of this table correspond to Role objects of the API.
        When a Worker specializes, it acquires a Role,
        which is a temporary link between the Worker and a resource-compatible Analysis.

@column role_id             unique ID of the Role
@column worker_id           the specialized Worker
@column analysis_id         the Analysis into which the Worker specialized
@column when_started        when this Role started
@column when_finished       when this Role finished. NULL may either indicate it is still running or was killed by an external force.
@column attempted_jobs      counter of the number of attempts
@column done_jobs           counter of the number of successful attempts
*/

CREATE TABLE role (
    role_id                 INTEGER     NOT NULL PRIMARY KEY AUTOINCREMENT,
    worker_id               INTEGER     NOT NULL,
    analysis_id             INTEGER     NOT NULL,
    when_started            TIMESTAMP   NOT NULL DEFAULT CURRENT_TIMESTAMP,
    when_finished           TIMESTAMP            DEFAULT NULL,
    attempted_jobs          INTEGER     NOT NULL DEFAULT 0,
    done_jobs               INTEGER     NOT NULL DEFAULT 0
);
CREATE        INDEX role_worker_id_idx ON role (worker_id);
CREATE        INDEX role_analysis_id_idx ON role (analysis_id);



501 502 503 504
/**
@header Logging and monitoring
@colour #F4D20C
*/
505

506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
/**
@table  worker_resource_usage

@colour #F4D20C

@desc   A table with post-mortem resource usage statistics of a Worker.

@column          worker_id  links to the worker table
@column        exit_status  meadow-dependent, in case of LSF it's usually 'done' (normal) or 'exit' (abnormal)
@column           mem_megs  how much memory the Worker process used
@column          swap_megs  how much swap the Worker process used
@column        pending_sec  time spent by the process in the queue before it became a Worker
@column            cpu_sec  cpu time used by the Worker process
@column       lifespan_sec  walltime used by the Worker process
@column   exception_status  meadow-specific flags, in case of LSF it can be 'underrun', 'overrun' or 'idle'
*/

CREATE TABLE worker_resource_usage (
    worker_id               INTEGER         NOT NULL,
    exit_status             VARCHAR(255)    DEFAULT NULL,
    mem_megs                FLOAT           DEFAULT NULL,
    swap_megs               FLOAT           DEFAULT NULL,
528
    pending_sec             FLOAT           DEFAULT NULL,
529
    cpu_sec                 FLOAT           DEFAULT NULL,
530
    lifespan_sec            FLOAT           DEFAULT NULL,
531 532 533 534 535 536
    exception_status        VARCHAR(255)    DEFAULT NULL,

    PRIMARY KEY (worker_id)
);


537 538
/**
@table  log_message
539

540
@colour #F4D20C
541 542 543 544 545 546 547 548

@desc   When a Job or a job-less Worker (job_id=NULL) throws a "die" message
        for any reason, the message is recorded in this table.
        It may or may not indicate that the job was unsuccessful via is_error flag.
        Also $self->warning("...") messages are recorded with is_error=0.

@column log_message_id  an autoincremented primary id of the message
@column         job_id  the id of the job that threw the message (or NULL if it was outside of a message)
549
@column        role_id  the 'current' role
550 551 552 553 554 555 556 557 558
@column      worker_id  the 'current' worker
@column           time  when the message was thrown
@column          retry  retry_count of the job when the message was thrown (or NULL if no job)
@column         status  of the job or worker when the message was thrown
@column            msg  string that contains the message
@column       is_error  binary flag
*/

CREATE TABLE log_message (
Leo Gordon's avatar
Leo Gordon committed
559 560
    log_message_id          INTEGER     NOT NULL PRIMARY KEY AUTOINCREMENT,
    job_id                  INTEGER              DEFAULT NULL,
561
    role_id                 INTEGER              DEFAULT NULL,
562
    worker_id               INTEGER              DEFAULT NULL,
Leo Gordon's avatar
Leo Gordon committed
563 564
    time                    TIMESTAMP   NOT NULL DEFAULT CURRENT_TIMESTAMP,
    retry                   INTEGER              DEFAULT NULL,
565
    status                  TEXT        NOT NULL DEFAULT 'UNKNOWN', /* enum('UNKNOWN','SPECIALIZATION','COMPILATION','CLAIMED','READY','PRE_CLEANUP','FETCH_INPUT','RUN','WRITE_OUTPUT','POST_CLEANUP','PASSED_ON') DEFAULT 'UNKNOWN' */
Leo Gordon's avatar
Leo Gordon committed
566 567
    msg                     TEXT,
    is_error                SMALLINT
568
);
Leo Gordon's avatar
Leo Gordon committed
569 570
CREATE        INDEX log_message_worker_idx ON log_message (worker_id);
CREATE        INDEX log_message_job_idx ON log_message (job_id);
571 572 573 574 575 576


/**
@table  analysis_stats_monitor

@colour #F4D20C
577

578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
@desc   A regular timestamped snapshot of the analysis_stats table.

@column time                    when this snapshot was taken

@column analysis_id             foreign-keyed to the corresponding analysis_base entry
@column batch_size              how many jobs are claimed in one claiming operation before Worker starts executing them
@column hive_capacity           a reciprocal limiter on the number of Workers running at the same time (dependent on Workers of other Analyses)
@column status                  cached state of the Analysis

@column total_job_count         total number of Jobs of this Analysis
@column semaphored_job_count    number of Jobs of this Analysis that are in SEMAPHORED state
@column ready_job_count         number of Jobs of this Analysis that are in READY state
@column done_job_count          number of Jobs of this Analysis that are in DONE state
@column failed_job_count        number of Jobs of this Analysis that are in FAILED state

@column num_running_workers     number of running Workers of this Analysis
@column num_required_workers    extra number of Workers of this Analysis needed to execute all READY jobs

@column behaviour               whether hive_capacity is set or is dynamically calculated based on timers
@column input_capacity          used to compute hive_capacity in DYNAMIC mode
@column output_capacity         used to compute hive_capacity in DYNAMIC mode

@column avg_msec_per_job        weighted average used to compute DYNAMIC hive_capacity
@column avg_input_msec_per_job  weighted average used to compute DYNAMIC hive_capacity
@column avg_run_msec_per_job    weighted average used to compute DYNAMIC hive_capacity
@column avg_output_msec_per_job weighted average used to compute DYNAMIC hive_capacity

@column last_update             when this entry was last updated
@column sync_lock               a binary lock flag to prevent simultaneous updates
*/
608 609

CREATE TABLE analysis_stats_monitor (
Leo Gordon's avatar
Leo Gordon committed
610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635
    time                    TIMESTAMP   NOT NULL DEFAULT CURRENT_TIMESTAMP,

    analysis_id             INTEGER     NOT NULL,
    batch_size              INTEGER     NOT NULL DEFAULT 1,
    hive_capacity           INTEGER              DEFAULT NULL,
    status                  TEXT        NOT NULL DEFAULT 'EMPTY', /* enum('BLOCKED', 'LOADING', 'SYNCHING', 'EMPTY', 'READY', 'WORKING', 'ALL_CLAIMED', 'DONE', 'FAILED') DEFAULT 'EMPTY' NOT NULL, */

    total_job_count         INTEGER     NOT NULL DEFAULT 0,
    semaphored_job_count    INTEGER     NOT NULL DEFAULT 0,
    ready_job_count         INTEGER     NOT NULL DEFAULT 0,
    done_job_count          INTEGER     NOT NULL DEFAULT 0,
    failed_job_count        INTEGER     NOT NULL DEFAULT 0,
    num_running_workers     INTEGER     NOT NULL DEFAULT 0,
    num_required_workers    INTEGER     NOT NULL DEFAULT 0,

    behaviour               TEXT        NOT NULL DEFAULT 'STATIC', /* enum('STATIC', 'DYNAMIC') DEFAULT 'STATIC' NOT NULL, */
    input_capacity          INTEGER     NOT NULL DEFAULT 4,
    output_capacity         INTEGER     NOT NULL DEFAULT 4,

    avg_msec_per_job        INTEGER              DEFAULT NULL,
    avg_input_msec_per_job  INTEGER              DEFAULT NULL,
    avg_run_msec_per_job    INTEGER              DEFAULT NULL,
    avg_output_msec_per_job INTEGER              DEFAULT NULL,

    last_update             TIMESTAMP            DEFAULT NULL,
    sync_lock               SMALLINT    NOT NULL DEFAULT 0
636 637
);