tables.sqlite 25.7 KB
Newer Older
1 2
/*
    This is SQLite version of EnsEMBL Hive database schema file.
Leo Gordon's avatar
Leo Gordon committed
3

4 5
    It has been annotated with @-tags.
    The following command is used to create HTML documentation:
Leo Gordon's avatar
Leo Gordon committed
6 7 8 9 10 11 12
        perl $ENSEMBL_CVS_ROOT_DIR/ensembl-production/scripts/sql2html.pl \
            -i $ENSEMBL_CVS_ROOT_DIR/ensembl-hive/sql/tables.sqlite -d Hive -intro $ENSEMBL_CVS_ROOT_DIR/ensembl-hive/docs/hive_schema.inc \
            -sort_headers 0 -sort_tables 0 -o $ENSEMBL_CVS_ROOT_DIR/ensembl-hive/docs/hive_schema.html


    Adding the following line into the header of the previous output will make it look prettier (valid in rel.75):
        <link rel="stylesheet" type="text/css" media="all" href="http://static.ensembl.org/minified/fb4f89868f04f558d1d4421999719047.css" />
13

14 15 16 17


LICENSE

18
    Copyright [1999-2014] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
19 20 21 22 23 24 25 26 27 28

    Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
    You may obtain a copy of the License at

         http://www.apache.org/licenses/LICENSE-2.0

    Unless required by applicable law or agreed to in writing, software distributed under the License
    is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    See the License for the specific language governing permissions and limitations under the License.

29 30 31 32
CONTACT

    Please subscribe to the Hive mailing list:  http://listserver.ebi.ac.uk/mailman/listinfo/ehive-users  to discuss Hive-related questions or to be notified of our updates

33 34 35 36 37 38 39 40
*/


/**
@header Pipeline structure
@colour #C70C09
*/

Leo Gordon's avatar
Leo Gordon committed
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
/**
@table  hive_meta

@colour #000000

@desc This table keeps several important hive-specific pipeline-wide key-value pairs
        such as hive_sql_schema_version, hive_use_triggers and hive_pipeline_name.

@column meta_key        the KEY of KEY-VALUE pairs (primary key)
@column meta_value      the VALUE of KEY-VALUE pairs
*/

CREATE TABLE hive_meta (
    meta_key                VARCHAR(255) NOT NULL PRIMARY KEY,
    meta_value              TEXT

);


/**
@table  pipeline_wide_parameters

@colour #000000

@desc This table contains a simple hash between pipeline_wide_parameter names and their values.
      The same data used to live in 'meta' table until both the schema and the API were finally separated from Ensembl Core.

@column param_name      the KEY of KEY-VALUE pairs (primary key)
@column param_value     the VALUE of KEY-VALUE pairs
*/

CREATE TABLE pipeline_wide_parameters (
    param_name              VARCHAR(255) NOT NULL PRIMARY KEY,
    param_value             TEXT

);
CREATE        INDEX pipeline_wide_parameters_value_idx     ON pipeline_wide_parameters (param_value);


80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
/**
@table  analysis_base

@colour #C70C09

@desc   Each Analysis is a node of the pipeline diagram.
        It acts both as a "class" to which Jobs belong (and inherit from it certain properties)
        and as a "container" for them (Jobs of an Analysis can be blocking all Jobs of another Analysis).

@column analysis_id             a unique ID that is also a foreign key to most of the other tables
@column logic_name              the name of the Analysis object
@column module                  the Perl module name that runs this Analysis
@column parameters              a stingified hash of parameters common to all jobs of the Analysis
@column resource_class_id       link to the resource_class table
@column failed_job_tolerance    % of tolerated failed Jobs
@column max_retry_count         how many times a job of this Analysis will be retried (unless there is no point)
@column can_be_empty            if TRUE, this Analysis will not be blocking if/while it doesn't have any jobs
@column priority                an Analysis with higher priority will be more likely chosen on Worker's specialization
@column meadow_type             if defined, forces this Analysis to be run only on the given Meadow
@column analysis_capacity       if defined, limits the number of Workers of this particular Analysis that are allowed to run in parallel
*/
101

102
CREATE TABLE analysis_base (
Leo Gordon's avatar
Leo Gordon committed
103
    analysis_id             INTEGER     NOT NULL PRIMARY KEY AUTOINCREMENT,
104
    logic_name              VARCHAR(255) NOT NULL,
105 106
    module                  VARCHAR(255),
    parameters              TEXT,
Leo Gordon's avatar
Leo Gordon committed
107 108 109 110 111
    resource_class_id       INTEGER     NOT NULL,
    failed_job_tolerance    INTEGER     NOT NULL DEFAULT 0,
    max_retry_count         INTEGER     NOT NULL DEFAULT 3,
    can_be_empty            SMALLINT    NOT NULL DEFAULT 0,
    priority                SMALLINT    NOT NULL DEFAULT 0,
112
    meadow_type             VARCHAR(255)         DEFAULT NULL,
Leo Gordon's avatar
Leo Gordon committed
113
    analysis_capacity       INTEGER              DEFAULT NULL
114
);
Leo Gordon's avatar
Leo Gordon committed
115
CREATE UNIQUE INDEX analysis_base_logic_name_idx ON analysis_base (logic_name);
116 117


118 119
/**
@table  analysis_stats
120

121
@colour #C70C09
122

123 124 125 126
@desc   Parallel table to analysis_base which provides high level statistics on the
        state of an analysis and it's jobs.  Used to provide a fast overview, and to
        provide final approval of 'DONE' which is used by the blocking rules to determine
        when to unblock other analyses.  Also provides
127

128 129 130 131
@column analysis_id             foreign-keyed to the corresponding analysis_base entry
@column batch_size              how many jobs are claimed in one claiming operation before Worker starts executing them
@column hive_capacity           a reciprocal limiter on the number of Workers running at the same time (dependent on Workers of other Analyses)
@column status                  cached state of the Analysis
132

133 134 135 136 137 138 139 140
@column total_job_count         total number of Jobs of this Analysis
@column semaphored_job_count    number of Jobs of this Analysis that are in SEMAPHORED state
@column ready_job_count         number of Jobs of this Analysis that are in READY state
@column done_job_count          number of Jobs of this Analysis that are in DONE state
@column failed_job_count        number of Jobs of this Analysis that are in FAILED state

@column num_running_workers     number of running Workers of this Analysis
@column num_required_workers    extra number of Workers of this Analysis needed to execute all READY jobs
141

142 143 144
@column behaviour               whether hive_capacity is set or is dynamically calculated based on timers
@column input_capacity          used to compute hive_capacity in DYNAMIC mode
@column output_capacity         used to compute hive_capacity in DYNAMIC mode
145

146 147 148 149 150 151 152 153 154 155
@column avg_msec_per_job        weighted average used to compute DYNAMIC hive_capacity
@column avg_input_msec_per_job  weighted average used to compute DYNAMIC hive_capacity
@column avg_run_msec_per_job    weighted average used to compute DYNAMIC hive_capacity
@column avg_output_msec_per_job weighted average used to compute DYNAMIC hive_capacity

@column last_update             when this entry was last updated
@column sync_lock               a binary lock flag to prevent simultaneous updates
*/

CREATE TABLE analysis_stats (
Leo Gordon's avatar
Leo Gordon committed
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
    analysis_id             INTEGER     NOT NULL,
    batch_size              INTEGER     NOT NULL DEFAULT 1,
    hive_capacity           INTEGER              DEFAULT NULL,
    status                  TEXT        NOT NULL DEFAULT 'EMPTY', /* enum('BLOCKED', 'LOADING', 'SYNCHING', 'EMPTY', 'READY', 'WORKING', 'ALL_CLAIMED', 'DONE', 'FAILED') DEFAULT 'EMPTY' NOT NULL, */

    total_job_count         INTEGER     NOT NULL DEFAULT 0,
    semaphored_job_count    INTEGER     NOT NULL DEFAULT 0,
    ready_job_count         INTEGER     NOT NULL DEFAULT 0,
    done_job_count          INTEGER     NOT NULL DEFAULT 0,
    failed_job_count        INTEGER     NOT NULL DEFAULT 0,
    num_running_workers     INTEGER     NOT NULL DEFAULT 0,
    num_required_workers    INTEGER     NOT NULL DEFAULT 0,

    behaviour               TEXT        NOT NULL DEFAULT 'STATIC', /* enum('STATIC', 'DYNAMIC') DEFAULT 'STATIC' NOT NULL, */
    input_capacity          INTEGER     NOT NULL DEFAULT 4,
    output_capacity         INTEGER     NOT NULL DEFAULT 4,

    avg_msec_per_job        INTEGER              DEFAULT NULL,
    avg_input_msec_per_job  INTEGER              DEFAULT NULL,
    avg_run_msec_per_job    INTEGER              DEFAULT NULL,
    avg_output_msec_per_job INTEGER              DEFAULT NULL,

    last_update             TIMESTAMP            DEFAULT NULL,
    sync_lock               SMALLINT    NOT NULL DEFAULT 0,
180 181 182 183 184 185 186 187 188 189 190

    PRIMARY KEY (analysis_id)
);


/**
@table  dataflow_rule

@colour #C70C09

@desc Extension of simple_rule design except that goal(to) is now in extended URL format e.g.
Leo Gordon's avatar
Leo Gordon committed
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
        mysql://ensadmin:<pass>@ecs2:3361/compara_hive_test?analysis.logic_name='blast_NCBI34'
        (full network address of an analysis).
        The only requirement is that there are rows in the job, analysis, dataflow_rule,
        and worker tables so that the following join works on the same database 
            WHERE analysis.analysis_id = dataflow_rule.from_analysis_id 
            AND   analysis.analysis_id = job.analysis_id
            AND   analysis.analysis_id = worker.analysis_id
        These are the rules used to create entries in the job table where the
        input_id (control data) is passed from one analysis to the next to define work.
        The analysis table will be extended so that it can specify different read and write
        databases, with the default being the database the analysis is on

@column dataflow_rule_id        internal ID
@column from_analysis_id        foreign key to analysis table analysis_id
@column branch_code             branch_code of the fan
@column funnel_dataflow_rule_id dataflow_rule_id of the semaphored funnel (is NULL by default, which means dataflow is not semaphored)
@column to_analysis_url         foreign key to net distributed analysis logic_name reference
@column input_id_template       a template for generating a new input_id (not necessarily a hashref) in this dataflow; if undefined is kept original
209
*/
210

211
CREATE TABLE dataflow_rule (
Leo Gordon's avatar
Leo Gordon committed
212 213 214 215 216 217
    dataflow_rule_id        INTEGER     NOT NULL PRIMARY KEY AUTOINCREMENT,
    from_analysis_id        INTEGER     NOT NULL,
    branch_code             INTEGER     NOT NULL DEFAULT 1,
    funnel_dataflow_rule_id INTEGER              DEFAULT NULL,
    to_analysis_url         VARCHAR(255) NOT NULL DEFAULT '',
    input_id_template       TEXT                 DEFAULT NULL
218
);
Leo Gordon's avatar
Leo Gordon committed
219
CREATE UNIQUE INDEX from_to_branch_template_idx ON dataflow_rule (from_analysis_id, branch_code, funnel_dataflow_rule_id, to_analysis_url, input_id_template);
220 221


222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
/**
@table  analysis_ctrl_rule

@colour #C70C09

@desc   These rules define a higher level of control.
        These rules are used to turn whole anlysis nodes on/off (READY/BLOCKED).
        If any of the condition_analyses are not 'DONE' the ctrled_analysis is set to BLOCKED.
        When all conditions become 'DONE' then ctrled_analysis is set to READY
        The workers switch the analysis.status to 'WORKING' and 'DONE'.
        But any moment if a condition goes false, the analysis is reset to BLOCKED.

@column condition_analysis_url foreign key to net distributed analysis reference
@column ctrled_analysis_id     foreign key to analysis table analysis_id
*/
237 238

CREATE TABLE analysis_ctrl_rule (
Leo Gordon's avatar
Leo Gordon committed
239 240
    condition_analysis_url  VARCHAR(255) NOT NULL DEFAULT '',
    ctrled_analysis_id      INTEGER     NOT NULL
241
);
Leo Gordon's avatar
Leo Gordon committed
242
CREATE UNIQUE INDEX condition_ctrled_idx ON analysis_ctrl_rule (condition_analysis_url, ctrled_analysis_id);
243 244


245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261
/**
@header Resources
@colour #FF7504
*/

/**
@table  resource_class

@colour #FF7504

@desc   Maps between resource_class numeric IDs and unique names.

@column resource_class_id   unique ID of the ResourceClass
@column name                unique name of the ResourceClass
*/

CREATE TABLE resource_class (
Leo Gordon's avatar
Leo Gordon committed
262
    resource_class_id       INTEGER     NOT NULL PRIMARY KEY AUTOINCREMENT,
263
    name                    VARCHAR(255) NOT NULL
264 265

);
Leo Gordon's avatar
Leo Gordon committed
266
CREATE UNIQUE INDEX resource_class_name_idx ON resource_class (name);
267 268 269 270 271 272 273 274 275 276 277


/**
@table  resource_description

@colour #FF7504

@desc   Maps (ResourceClass, MeadowType) pair to Meadow-specific resource lines.

@column resource_class_id   foreign-keyed to the ResourceClass entry
@column meadow_type         if the Worker is about to be executed on the given Meadow...
278 279
@column submission_cmd_args ... these are the resource arguments (queue, memory,...) to give to the submission command
@column worker_cmd_args     ... and these are the arguments that are given to the worker command being submitted
280 281 282
*/

CREATE TABLE resource_description (
Leo Gordon's avatar
Leo Gordon committed
283
    resource_class_id       INTEGER     NOT NULL,
284
    meadow_type             VARCHAR(255) NOT NULL,
285 286
    submission_cmd_args     VARCHAR(255) NOT NULL DEFAULT '',
    worker_cmd_args         VARCHAR(255) NOT NULL DEFAULT '',
287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307

    PRIMARY KEY(resource_class_id, meadow_type)
);


/**
@header Job-related
@colour #1D73DA
*/

/**
@table  job

@colour #1D73DA

@desc The job is the heart of this system.  It is the kiosk or blackboard
    where workers find things to do and then post work for other works to do.
    These jobs are created prior to work being done, are claimed by workers,
    are updated as the work is done, with a final update on completion.

@column job_id                  autoincrement id
308
@column prev_job_id             previous job which created this one
309 310
@column analysis_id             the analysis_id needed to accomplish this job.
@column input_id                input data passed into Analysis:RunnableDB to control the work
311 312
@column param_id_stack          a CSV of job_ids whose input_ids contribute to the stack of local variables for the job
@column accu_id_stack           a CSV of job_ids whose accu's contribute to the stack of local variables for the job
313 314 315
@column worker_id               link to worker table to define which worker claimed this job
@column status                  state the job is in
@column retry_count             number times job had to be reset when worker failed to run it
Leo Gordon's avatar
Leo Gordon committed
316
@column completed               when the job was completed
317 318 319 320 321
@column runtime_msec            how long did it take to execute the job (or until the moment it failed)
@column query_count             how many SQL queries were run during this job
@column semaphore_count         if this count is >0, the job is conditionally blocked (until this count drops to 0 or below). Default=0 means "nothing is blocking me by default".
@column semaphored_job_id       the job_id of job S that is waiting for this job to decrease S's semaphore_count. Default=NULL means "I'm not blocking anything by default".
*/
322 323

CREATE TABLE job (
Leo Gordon's avatar
Leo Gordon committed
324 325 326 327
    job_id                  INTEGER     NOT NULL PRIMARY KEY AUTOINCREMENT,
    prev_job_id             INTEGER              DEFAULT NULL,  -- the job that created this one using a dataflow rule
    analysis_id             INTEGER     NOT NULL,
    input_id                CHAR(255)   NOT NULL,
328 329
    param_id_stack          CHAR(64)    NOT NULL DEFAULT '',
    accu_id_stack           CHAR(64)    NOT NULL DEFAULT '',
Leo Gordon's avatar
Leo Gordon committed
330 331 332 333 334 335 336 337 338
    worker_id               INTEGER              DEFAULT NULL,
    status                  TEXT        NOT NULL DEFAULT 'READY', /* enum('SEMAPHORED','READY','CLAIMED','COMPILATION','FETCH_INPUT','RUN','WRITE_OUTPUT','DONE','FAILED','PASSED_ON') DEFAULT 'READY' NOT NULL, */
    retry_count             INTEGER     NOT NULL DEFAULT 0,
    completed               TIMESTAMP            DEFAULT NULL,
    runtime_msec            INTEGER              DEFAULT NULL,
    query_count             INTEGER              DEFAULT NULL,

    semaphore_count         INTEGER     NOT NULL DEFAULT 0,
    semaphored_job_id       INTEGER              DEFAULT NULL
339
);
340
CREATE UNIQUE INDEX job_input_id_stacks_analysis_idx ON job (input_id, param_id_stack, accu_id_stack, analysis_id);
Leo Gordon's avatar
Leo Gordon committed
341 342
CREATE        INDEX job_analysis_status_retry_idx ON job (analysis_id, status, retry_count);
CREATE        INDEX job_worker_idx ON job (worker_id);
343 344


345 346
/**
@table  job_file
347

348
@colour #1D73DA
349

350 351 352 353
@desc   For testing/debugging purposes both STDOUT and STDERR streams of each Job
        can be redirected into a separate log file.
        This table holds filesystem paths to one or both of those files.
        There is max one entry per job_id and retry.
354

355 356 357 358 359 360
@column job_id             foreign key
@column worker_id          link to worker table to define which worker claimed this job
@column retry              copy of retry_count of job as it was run
@column stdout_file        path to the job's STDOUT log
@column stderr_file        path to the job's STDERR log
*/
361 362

CREATE TABLE job_file (
Leo Gordon's avatar
Leo Gordon committed
363 364 365 366 367
    job_id                  INTEGER     NOT NULL,
    retry                   INTEGER     NOT NULL,
    worker_id               INTEGER     NOT NULL,
    stdout_file             VARCHAR(255),
    stderr_file             VARCHAR(255),
368

Leo Gordon's avatar
Leo Gordon committed
369
    PRIMARY KEY (job_id, retry)
370
);
Leo Gordon's avatar
Leo Gordon committed
371
CREATE        INDEX job_file_worker_idx ON job_file (worker_id);
372 373


374 375
/**
@table  accu
376

377 378 379
@colour #1D73DA

@desc   Accumulator for funneled dataflow.
380

381 382 383 384 385 386
@column sending_job_id     semaphoring job in the "box"
@column receiving_job_id   semaphored job outside the "box"
@column struct_name        name of the structured parameter
@column key_signature      locates the part of the structured parameter
@column value              value of the part
*/
387

388
CREATE TABLE accu (
Leo Gordon's avatar
Leo Gordon committed
389 390 391 392
    sending_job_id          INTEGER,
    receiving_job_id        INTEGER     NOT NULL,
    struct_name             VARCHAR(255) NOT NULL,
    key_signature           VARCHAR(255) NOT NULL,
393
    value                   TEXT
394
);
Leo Gordon's avatar
Leo Gordon committed
395 396
CREATE        INDEX accu_sending_idx   ON accu (sending_job_id);
CREATE        INDEX accu_receiving_idx ON accu (receiving_job_id);
397 398


399 400
/**
@table  analysis_data
401

402 403 404 405 406 407 408 409 410 411 412 413
@colour #1D73DA

@desc   A generic blob-storage hash.
        Currently the only legitimate use of this table is "overflow" of job.input_ids:
        when they grow longer than 254 characters the real data is stored in analysis_data instead,
        and the input_id contains the corresponding analysis_data_id.

@column analysis_data_id    primary id
@column data                text blob which holds the data
*/

CREATE TABLE analysis_data (
Leo Gordon's avatar
Leo Gordon committed
414 415
    analysis_data_id        INTEGER     NOT NULL PRIMARY KEY AUTOINCREMENT,
    data                    TEXT
416
);
Leo Gordon's avatar
Leo Gordon committed
417
CREATE        INDEX analysis_data_idx ON analysis_data (data);
418 419


420 421 422 423 424 425 426 427 428 429 430
/**
@header worker table
@colour #24DA06
*/

/**
@table  worker

@colour #24DA06

@desc Entries of this table correspond to Worker objects of the API.
Leo Gordon's avatar
Leo Gordon committed
431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
        Workers are created by inserting into this table
        so that there is only one instance of a Worker object in the database.
        As Workers live and do work, they update this table, and when they die they update again.

@column worker_id           unique ID of the Worker
@column meadow_type         type of the Meadow it is running on
@column meadow_name         name of the Meadow it is running on (for 'LOCAL' type is the same as host)
@column host                execution host name
@column process_id          identifies the Worker process on the Meadow (for 'LOCAL' is the OS PID)
@column resource_class_id   links to Worker's resource class
@column analysis_id         Analysis the Worker is specified into
@column work_done           how many jobs the Worker has completed successfully
@column status              current status of the Worker
@column born                when the Worker process was started
@column last_check_in       when the Worker last checked into the database
@column died                if defined, when the Worker died (or its premature death was first detected by GC)
@column cause_of_death      if defined, why did the Worker exit (or why it was killed)
@column log_dir             if defined, a filesystem directory where this Worker's output is logged
449
*/
450

451
CREATE TABLE worker (
Leo Gordon's avatar
Leo Gordon committed
452
    worker_id               INTEGER     NOT NULL PRIMARY KEY AUTOINCREMENT,
453 454 455 456
    meadow_type             VARCHAR(255) NOT NULL,
    meadow_name             VARCHAR(255) NOT NULL,
    host                    VARCHAR(255) NOT NULL,
    process_id              VARCHAR(255) NOT NULL,
Leo Gordon's avatar
Leo Gordon committed
457 458 459 460 461 462 463 464
    resource_class_id       INTEGER              DEFAULT NULL,

    analysis_id             INTEGER              DEFAULT NULL,
    work_done               INTEGER     NOT NULL DEFAULT 0,
    status                  TEXT        NOT NULL DEFAULT 'READY', /* enum('SPECIALIZATION','COMPILATION','READY','PRE_CLEANUP','FETCH_INPUT','RUN','WRITE_OUTPUT','POST_CLEANUP','DEAD') DEFAULT 'READY' NOT NULL */
    born                    TIMESTAMP   NOT NULL DEFAULT CURRENT_TIMESTAMP,
    last_check_in           TIMESTAMP   NOT NULL,
    died                    TIMESTAMP            DEFAULT NULL,
465
    cause_of_death          TEXT                 DEFAULT NULL, /* enum('NO_ROLE', 'NO_WORK', 'JOB_LIMIT', 'HIVE_OVERLOAD', 'LIFESPAN', 'CONTAMINATED', 'RELOCATED', 'KILLED_BY_USER', 'MEMLIMIT', 'RUNLIMIT', 'SEE_MSG', 'UNKNOWN') DEFAULT NULL */
Leo Gordon's avatar
Leo Gordon committed
466
    log_dir                 VARCHAR(255)         DEFAULT NULL
467
);
Leo Gordon's avatar
Leo Gordon committed
468
CREATE        INDEX worker_analysis_id_status_idx ON worker (analysis_id, status);
469 470


471 472 473 474
/**
@header Logging and monitoring
@colour #F4D20C
*/
475

476 477
/**
@table  log_message
478

479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
@colour #08DAD8

@desc   When a Job or a job-less Worker (job_id=NULL) throws a "die" message
        for any reason, the message is recorded in this table.
        It may or may not indicate that the job was unsuccessful via is_error flag.
        Also $self->warning("...") messages are recorded with is_error=0.

@column log_message_id  an autoincremented primary id of the message
@column         job_id  the id of the job that threw the message (or NULL if it was outside of a message)
@column      worker_id  the 'current' worker
@column           time  when the message was thrown
@column          retry  retry_count of the job when the message was thrown (or NULL if no job)
@column         status  of the job or worker when the message was thrown
@column            msg  string that contains the message
@column       is_error  binary flag
*/

CREATE TABLE log_message (
Leo Gordon's avatar
Leo Gordon committed
497 498
    log_message_id          INTEGER     NOT NULL PRIMARY KEY AUTOINCREMENT,
    job_id                  INTEGER              DEFAULT NULL,
499
    worker_id               INTEGER              DEFAULT NULL,
Leo Gordon's avatar
Leo Gordon committed
500 501
    time                    TIMESTAMP   NOT NULL DEFAULT CURRENT_TIMESTAMP,
    retry                   INTEGER              DEFAULT NULL,
502
    status                  TEXT        NOT NULL DEFAULT 'UNKNOWN', /* enum('UNKNOWN','SPECIALIZATION','COMPILATION','CLAIMED','READY','PRE_CLEANUP','FETCH_INPUT','RUN','WRITE_OUTPUT','POST_CLEANUP','PASSED_ON') DEFAULT 'UNKNOWN' */
Leo Gordon's avatar
Leo Gordon committed
503 504
    msg                     TEXT,
    is_error                SMALLINT
505
);
Leo Gordon's avatar
Leo Gordon committed
506 507
CREATE        INDEX log_message_worker_idx ON log_message (worker_id);
CREATE        INDEX log_message_job_idx ON log_message (job_id);
508 509 510 511 512 513


/**
@table  analysis_stats_monitor

@colour #F4D20C
514

515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544
@desc   A regular timestamped snapshot of the analysis_stats table.

@column time                    when this snapshot was taken

@column analysis_id             foreign-keyed to the corresponding analysis_base entry
@column batch_size              how many jobs are claimed in one claiming operation before Worker starts executing them
@column hive_capacity           a reciprocal limiter on the number of Workers running at the same time (dependent on Workers of other Analyses)
@column status                  cached state of the Analysis

@column total_job_count         total number of Jobs of this Analysis
@column semaphored_job_count    number of Jobs of this Analysis that are in SEMAPHORED state
@column ready_job_count         number of Jobs of this Analysis that are in READY state
@column done_job_count          number of Jobs of this Analysis that are in DONE state
@column failed_job_count        number of Jobs of this Analysis that are in FAILED state

@column num_running_workers     number of running Workers of this Analysis
@column num_required_workers    extra number of Workers of this Analysis needed to execute all READY jobs

@column behaviour               whether hive_capacity is set or is dynamically calculated based on timers
@column input_capacity          used to compute hive_capacity in DYNAMIC mode
@column output_capacity         used to compute hive_capacity in DYNAMIC mode

@column avg_msec_per_job        weighted average used to compute DYNAMIC hive_capacity
@column avg_input_msec_per_job  weighted average used to compute DYNAMIC hive_capacity
@column avg_run_msec_per_job    weighted average used to compute DYNAMIC hive_capacity
@column avg_output_msec_per_job weighted average used to compute DYNAMIC hive_capacity

@column last_update             when this entry was last updated
@column sync_lock               a binary lock flag to prevent simultaneous updates
*/
545 546

CREATE TABLE analysis_stats_monitor (
Leo Gordon's avatar
Leo Gordon committed
547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
    time                    TIMESTAMP   NOT NULL DEFAULT CURRENT_TIMESTAMP,

    analysis_id             INTEGER     NOT NULL,
    batch_size              INTEGER     NOT NULL DEFAULT 1,
    hive_capacity           INTEGER              DEFAULT NULL,
    status                  TEXT        NOT NULL DEFAULT 'EMPTY', /* enum('BLOCKED', 'LOADING', 'SYNCHING', 'EMPTY', 'READY', 'WORKING', 'ALL_CLAIMED', 'DONE', 'FAILED') DEFAULT 'EMPTY' NOT NULL, */

    total_job_count         INTEGER     NOT NULL DEFAULT 0,
    semaphored_job_count    INTEGER     NOT NULL DEFAULT 0,
    ready_job_count         INTEGER     NOT NULL DEFAULT 0,
    done_job_count          INTEGER     NOT NULL DEFAULT 0,
    failed_job_count        INTEGER     NOT NULL DEFAULT 0,
    num_running_workers     INTEGER     NOT NULL DEFAULT 0,
    num_required_workers    INTEGER     NOT NULL DEFAULT 0,

    behaviour               TEXT        NOT NULL DEFAULT 'STATIC', /* enum('STATIC', 'DYNAMIC') DEFAULT 'STATIC' NOT NULL, */
    input_capacity          INTEGER     NOT NULL DEFAULT 4,
    output_capacity         INTEGER     NOT NULL DEFAULT 4,

    avg_msec_per_job        INTEGER              DEFAULT NULL,
    avg_input_msec_per_job  INTEGER              DEFAULT NULL,
    avg_run_msec_per_job    INTEGER              DEFAULT NULL,
    avg_output_msec_per_job INTEGER              DEFAULT NULL,

    last_update             TIMESTAMP            DEFAULT NULL,
    sync_lock               SMALLINT    NOT NULL DEFAULT 0
573 574
);