Queen.pm 31 KB
Newer Older
Jessica Severin's avatar
Jessica Severin committed
1
2
3
4
5
6
#
# You may distribute this module under the same terms as perl itself

=pod 

=head1 NAME
7

8
  Bio::EnsEMBL::Hive::Queen
Jessica Severin's avatar
Jessica Severin committed
9
10

=head1 SYNOPSIS
11

12
  The Queen of the Hive based job control system
Jessica Severin's avatar
Jessica Severin committed
13
14

=head1 DESCRIPTION
15

16
17
18
19
20
21
22
23
24
25
26
27
  The Queen of the Hive based job control system is responsible to 'birthing' the
  correct number of workers of the right type so that they can find jobs to do.
  It will also free up jobs of Workers that died unexpectantly so that other workers
  can claim them to do.

  Hive based processing is a concept based on a more controlled version
  of an autonomous agent type system.  Each worker is not told what to do
  (like a centralized control system - like the current pipeline system)
  but rather queries a central database for jobs (give me jobs).

  Each worker is linked to an analysis_id, registers its self on creation
  into the Hive, creates a RunnableDB instance of the Analysis->module,
28
29
  gets $analysis->stats->batch_size jobs from the job table, does its work,
  creates the next layer of job entries by interfacing to
30
31
32
  the DataflowRuleAdaptor to determine the analyses it needs to pass its
  output data to and creates jobs on the next analysis database.
  It repeats this cycle until it has lived its lifetime or until there are no
33
34
35
36
37
38
39
40
41
42
  more jobs left.
  The lifetime limit is just a safety limit to prevent these from 'infecting'
  a system.

  The Queens job is to simply birth Workers of the correct analysis_id to get the
  work down.  The only other thing the Queen does is free up jobs that were
  claimed by Workers that died unexpectantly so that other workers can take
  over the work.

  The Beekeeper is in charge of interfacing between the Queen and a compute resource
43
  or 'compute farm'.  Its job is to query Queens if they need any workers and to
44
45
46
  send the requested number of workers to open machines via the runWorker.pl script.
  It is also responsible for interfacing with the Queen to identify worker which died
  unexpectantly.
Jessica Severin's avatar
Jessica Severin committed
47
48

=head1 CONTACT
49

50
  Please contact ehive-users@ebi.ac.uk mailing list with questions/suggestions.
Jessica Severin's avatar
Jessica Severin committed
51
52

=head1 APPENDIX
53

54
55
  The rest of the documentation details each of the object methods. 
  Internal methods are usually preceded with a _
Jessica Severin's avatar
Jessica Severin committed
56
57
58

=cut

59

Jessica Severin's avatar
Jessica Severin committed
60
61
62
package Bio::EnsEMBL::Hive::Queen;

use strict;
63
use POSIX;
64
use File::Path 'make_path';
65

66
use Bio::EnsEMBL::Utils::Argument ('rearrange');
67

68
use Bio::EnsEMBL::Hive::Utils ('destringify', 'dir_revhash');  # NB: needed by invisible code
69
use Bio::EnsEMBL::Hive::AnalysisJob;
Jessica Severin's avatar
Jessica Severin committed
70
71
use Bio::EnsEMBL::Hive::Worker;

72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
use base ('Bio::EnsEMBL::Hive::DBSQL::ObjectAdaptor');


sub default_table_name {
    return 'worker';
}


sub default_insertion_method {
    return 'INSERT';
}


sub object_class {
    return 'Bio::EnsEMBL::Hive::Worker';
}
88

Jessica Severin's avatar
Jessica Severin committed
89

90
############################
Jessica Severin's avatar
Jessica Severin committed
91
#
92
# PUBLIC API
Jessica Severin's avatar
Jessica Severin committed
93
#
94
############################
Jessica Severin's avatar
Jessica Severin committed
95

96

Jessica Severin's avatar
Jessica Severin committed
97
98
=head2 create_new_worker

99
100
101
102
  Description: Creates an entry in the worker table,
               populates some non-storable attributes
               and returns a Worker object based on that insert.
               This guarantees that each worker registered in this Queen's hive is properly registered.
Jessica Severin's avatar
Jessica Severin committed
103
  Returntype : Bio::EnsEMBL::Hive::Worker
104
  Caller     : runWorker.pl
Jessica Severin's avatar
Jessica Severin committed
105
106
107
108

=cut

sub create_new_worker {
109
    my ($self, @args) = @_;
110

111
    my ($meadow_type, $meadow_name, $process_id, $exec_host, $resource_class_id, $resource_class_name,
112
        $no_write, $debug, $worker_log_dir, $hive_log_dir, $job_limit, $life_span, $no_cleanup, $retry_throwing_jobs, $can_respecialize) =
113
114

    rearrange([qw(meadow_type meadow_name process_id exec_host resource_class_id resource_class_name
115
                no_write debug worker_log_dir hive_log_dir job_limit life_span no_cleanup retry_throwing_jobs can_respecialize) ], @args);
116

117
118
119
120
121
122
123
124
125
126
127
128
    foreach my $prev_worker_incarnation (@{ $self->fetch_all( "status!='DEAD' AND meadow_type='$meadow_type' AND meadow_name='$meadow_name' AND process_id='$process_id'" ) }) {
            # so far 'RELOCATED events' has been detected on LSF 9.0 in response to sending signal #99 or #100
            # Since I don't know how to avoid them, I am trying to register them when they happen.
            # The following snippet buries the previous incarnation of the Worker before starting a new one.
            #
            # FIXME: if GarabageCollector (beekeeper -dead) gets to these processes first, it will register them as DEAD/UNKNOWN.
            #       LSF 9.0 does not report "rescheduling" events in the output of 'bacct', but does mention them in 'bhist'.
            #       So parsing 'bhist' output would probably yield the most accurate & confident registration of these events.
        $prev_worker_incarnation->cause_of_death( 'RELOCATED' );
        $self->register_worker_death( $prev_worker_incarnation );
    }

129
    if( defined($resource_class_name) ) {
130
131
132
133
134
135
136
137
138
139
140
141
        my $rc = $self->db->get_ResourceClassAdaptor->fetch_by_name($resource_class_name)
            or die "resource_class with name='$resource_class_name' could not be fetched from the database";

        $resource_class_id = $rc->dbID;
    }

    my $sql = q{INSERT INTO worker (born, last_check_in, meadow_type, meadow_name, host, process_id, resource_class_id)
              VALUES (CURRENT_TIMESTAMP, CURRENT_TIMESTAMP, ?, ?, ?, ?, ?)};

    my $sth = $self->prepare($sql);
    $sth->execute($meadow_type, $meadow_name, $exec_host, $process_id, $resource_class_id);
    my $worker_id = $self->dbc->db_handle->last_insert_id(undef, undef, 'worker', 'worker_id')
142
        or die "Could not insert a new worker";
143
144
145
146
147
148
    $sth->finish;

    if($hive_log_dir or $worker_log_dir) {
        my $dir_revhash = dir_revhash($worker_id);
        $worker_log_dir ||= $hive_log_dir .'/'. ($dir_revhash ? "$dir_revhash/" : '') .'worker_id_'.$worker_id;

149
150
        eval {
            make_path( $worker_log_dir );
151
152
            1;
        } or die "Could not create '$worker_log_dir' directory : $@";
153
154
155
156
157
158
159
160
161
162
163

        my $sth_add_log = $self->prepare( "UPDATE worker SET log_dir=? WHERE worker_id=?" );
        $sth_add_log->execute($worker_log_dir, $worker_id);
        $sth_add_log->finish;
    }

    my $worker = $self->fetch_by_dbID($worker_id)
        or die "Could not fetch worker with dbID=$worker_id";

    $worker->init;

164
165
    if(defined($job_limit)) {
      $worker->job_limiter($job_limit);
166
167
168
169
170
171
172
173
174
175
176
177
178
      $worker->life_span(0);
    }

    $worker->life_span($life_span * 60)                 if($life_span);

    $worker->execute_writes(0)                          if($no_write);

    $worker->perform_cleanup(0)                         if($no_cleanup);

    $worker->debug($debug)                              if($debug);

    $worker->retry_throwing_jobs($retry_throwing_jobs)  if(defined $retry_throwing_jobs);

179
180
    $worker->can_respecialize($can_respecialize)        if(defined $can_respecialize);

181
182
183
184
185
    return $worker;
}


=head2 specialize_new_worker
186

187
188
  Description: If analysis_id or logic_name is specified it will try to specialize the Worker into this analysis.
               If not specified the Queen will analyze the hive and pick the most suitable analysis.
189
  Caller     : Bio::EnsEMBL::Hive::Worker
190
191
192
193
194
195
196
197

=cut

sub specialize_new_worker {
    my ($self, $worker, @args) = @_;

    my ($analysis_id, $logic_name, $job_id, $force) =
        rearrange([qw(analysis_id logic_name job_id force) ], @args);
198

199
200
    if( scalar( grep {defined($_)} ($analysis_id, $logic_name, $job_id) ) > 1) {
        die "At most one of the options {-analysis_id, -logic_name, -job_id} can be set to pre-specialize a Worker";
Leo Gordon's avatar
Leo Gordon committed
201
202
    }

203
    my ($analysis, $stats, $special_batch);
204
    my $analysis_stats_adaptor = $self->db->get_AnalysisStatsAdaptor;
Jessica Severin's avatar
Jessica Severin committed
205

206
    if($job_id or $analysis_id or $logic_name) {    # probably pre-specialized from command-line
207

208
        if($job_id) {
209
210
211
            print "resetting and fetching job for job_id '$job_id'\n";

            my $job_adaptor = $self->db->get_AnalysisJobAdaptor;
212

213
214
215
216
217
218
219
220
221
            my $job = $job_adaptor->fetch_by_dbID( $job_id )
                or die "Could not fetch job with dbID='$job_id'";
            my $job_status = $job->status();

            if($job_status =~/(CLAIMED|PRE_CLEANUP|FETCH_INPUT|RUN|WRITE_OUTPUT|POST_CLEANUP)/ ) {
                die "Job with dbID='$job_id' is already in progress, cannot run";   # FIXME: try GC first, then complain
            } elsif($job_status =~/(DONE|SEMAPHORED)/ and !$force) {
                die "Job with dbID='$job_id' is $job_status, please use -force 1 to override";
            }
222

223
224
225
226
227
228
229
230
            if(($job_status eq 'DONE') and $job->semaphored_job_id) {
                warn "Increasing the semaphore count of the dependent job";
                $job_adaptor->increase_semaphore_count_for_jobid( $job->semaphored_job_id );
            }

            my $worker_id = $worker->dbID;
            if($job = $job_adaptor->reset_or_grab_job_by_dbID($job_id, $worker_id)) {
                $special_batch = [ $job ];
231
232
                $analysis_id = $job->analysis_id;
            } else {
233
                die "Could not claim job with dbID='$job_id' for worker with dbID='$worker_id'";
234
            }
235
236
237
238
239
240
241
242
243
244
245
246
247
        }

        if($logic_name) {
            $analysis = $self->db->get_AnalysisAdaptor->fetch_by_logic_name($logic_name)
                or die "analysis with name='$logic_name' could not be fetched from the database";

            $analysis_id = $analysis->dbID;

        } elsif($analysis_id) {
            $analysis = $self->db->get_AnalysisAdaptor->fetch_by_dbID($analysis_id)
                or die "analysis with dbID='$analysis_id' could not be fetched from the database";
        }

248
249
250
        if( $worker->resource_class_id
        and $worker->resource_class_id != $analysis->resource_class_id) {
                die "resource_class of analysis ".$analysis->logic_name." is incompatible with this Worker's resource_class";
251
252
        }

253
254
255
        $stats = $analysis_stats_adaptor->fetch_by_analysis_id($analysis_id);
        $self->safe_synchronize_AnalysisStats($stats);

256
        unless($special_batch or $force) {    # do we really need to run this analysis?
257
            if($self->get_hive_current_load() >= 1.1) {
258
                $worker->cause_of_death('HIVE_OVERLOAD');
259
                die "Hive is overloaded, can't specialize a worker";
260
261
            }
            if($stats->status eq 'BLOCKED') {
262
                die "Analysis is BLOCKED, can't specialize a worker";
263
            }
264
            if($stats->num_required_workers <= 0) {
265
                die "Analysis doesn't require extra workers at the moment";
266
267
            }
            if($stats->status eq 'DONE') {
268
                die "Analysis is DONE, and doesn't require workers";
269
270
            }
        }
271
272
            # probably scheduled by beekeeper.pl:
    } elsif( $stats = $self->suggest_analysis_to_specialize_by_rc_id_meadow_type($worker->resource_class_id, $worker->meadow_type) ) {
273

274
        $worker->analysis( undef ); # make sure we reset anything that was there before
275
        $analysis_id = $stats->analysis_id;
276
277
278
    } else {
        $worker->cause_of_death('NO_ROLE');
        die "No analysis suitable for the worker was found\n";
279
    }
280

281
        # now set it in the $worker:
Jessica Severin's avatar
Jessica Severin committed
282

283
    $worker->analysis_id( $analysis_id );
Jessica Severin's avatar
Jessica Severin committed
284

285
286
287
288
    my $sth_update_analysis_id = $self->prepare( "UPDATE worker SET analysis_id=? WHERE worker_id=?" );
    $sth_update_analysis_id->execute($worker->analysis_id, $worker->dbID);
    $sth_update_analysis_id->finish;

289
290
    if($special_batch) {
        $worker->special_batch( $special_batch );
291
    } else {    # count it as autonomous worker sharing the load of that analysis:
292
293
294

        $stats->update_status('WORKING');

295
        $analysis_stats_adaptor->decrease_required_workers($worker->analysis_id);
296
297
298
299
300
301
302
303
304
    }

        # The following increment used to be done only when no specific task was given to the worker,
        # thereby excluding such "special task" workers from being counted in num_running_workers.
        #
        # However this may be tricky to emulate by triggers that know nothing about "special tasks",
        # so I am (temporarily?) simplifying the accounting algorithm.
        #
    unless( $self->db->hive_use_triggers() ) {
305
        $analysis_stats_adaptor->increase_running_workers($worker->analysis_id);
306
    }
Jessica Severin's avatar
Jessica Severin committed
307
308
}

309

Jessica Severin's avatar
Jessica Severin committed
310
sub register_worker_death {
311
    my ($self, $worker, $self_burial) = @_;
312

313
314
315
316
    return unless($worker);

    my $cod = $worker->cause_of_death() || 'UNKNOWN';    # make sure we do not attempt to insert a void

317
                    # FIXME: make it possible to set the 'died' timestamp if we have detected it from logs:
318
    my $sql = qq{UPDATE worker SET died=CURRENT_TIMESTAMP
319
    } . ( $self_burial ? ',last_check_in=CURRENT_TIMESTAMP ' : '') . qq{
320
321
322
323
324
325
326
327
328
329
330
331
332
                    ,status='DEAD'
                    ,work_done='}. $worker->work_done . qq{'
                    ,cause_of_death='$cod'
                WHERE worker_id='}. $worker->dbID . qq{'};
    $self->dbc->do( $sql );

    if(my $analysis_id = $worker->analysis_id) {
        my $analysis_stats_adaptor = $self->db->get_AnalysisStatsAdaptor;

        unless( $self->db->hive_use_triggers() ) {
            $analysis_stats_adaptor->decrease_running_workers($worker->analysis_id);
        }

333
334
335
336
337
        unless( $cod eq 'NO_WORK'
            or  $cod eq 'JOB_LIMIT'
            or  $cod eq 'HIVE_OVERLOAD'
            or  $cod eq 'LIFESPAN'
        ) {
338
339
340
341
342
343
344
345
346
347
                $self->db->get_AnalysisJobAdaptor->release_undone_jobs_from_worker($worker);
        }

            # re-sync the analysis_stats when a worker dies as part of dynamic sync system
        if($self->safe_synchronize_AnalysisStats($worker->analysis->stats)->status ne 'DONE') {
            # since I'm dying I should make sure there is someone to take my place after I'm gone ...
            # above synch still sees me as a 'living worker' so I need to compensate for that
            $analysis_stats_adaptor->increase_required_workers($worker->analysis_id);
        }
    }
Jessica Severin's avatar
Jessica Severin committed
348
349
}

350

351
sub check_for_dead_workers {    # scans the whole Valley for lost Workers (but ignores unreachagle ones)
352
    my ($self, $valley, $check_buried_in_haste) = @_;
Leo Gordon's avatar
Leo Gordon committed
353

354
    warn "GarbageCollector:\tChecking for lost Workers...\n";
355

356
357
    my $last_few_seconds            = 5;    # FIXME: It is probably a good idea to expose this parameter for easier tuning.
    my $queen_overdue_workers       = $self->fetch_overdue_workers( $last_few_seconds );    # check the workers we have not seen active during the $last_few_seconds
358
359
360
    my %mt_and_pid_to_worker_status = ();
    my %worker_status_counts        = ();
    my %mt_and_pid_to_lost_worker   = ();
Leo Gordon's avatar
Leo Gordon committed
361

362
    warn "GarbageCollector:\t[Queen:] out of ".scalar(@$queen_overdue_workers)." Workers that haven't checked in during the last $last_few_seconds seconds...\n";
363

364
    foreach my $worker (@$queen_overdue_workers) {
Leo Gordon's avatar
Leo Gordon committed
365

366
367
368
        my $meadow_type = $worker->meadow_type;
        if(my $meadow = $valley->find_available_meadow_responsible_for_worker($worker)) {
            $mt_and_pid_to_worker_status{$meadow_type} ||= $meadow->status_of_all_our_workers;
Leo Gordon's avatar
Leo Gordon committed
369
        } else {
370
            $worker_status_counts{$meadow_type}{'UNREACHABLE'}++;
371

372
            next;   # Worker is unreachable from this Valley
Leo Gordon's avatar
Leo Gordon committed
373
374
        }

375
376
377
378
379
        my $process_id = $worker->process_id;
        if(my $status = $mt_and_pid_to_worker_status{$meadow_type}{$process_id}) { # can be RUN|PEND|xSUSP
            $worker_status_counts{$meadow_type}{$status}++;
        } else {
            $worker_status_counts{$meadow_type}{'LOST'}++;
380

381
            $mt_and_pid_to_lost_worker{$meadow_type}{$process_id} = $worker;
382
        }
383
384
385
386
387
388
    }

        # just a quick summary report:
    foreach my $meadow_type (keys %worker_status_counts) {
        warn "GarbageCollector:\t[$meadow_type Meadow:]\t".join(', ', map { "$_:$worker_status_counts{$meadow_type}{$_}" } keys %{$worker_status_counts{$meadow_type}})."\n\n";
    }
389

390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
    while(my ($meadow_type, $pid_to_lost_worker) = each %mt_and_pid_to_lost_worker) {
        my $this_meadow = $valley->available_meadow_hash->{$meadow_type};

        if(my $lost_this_meadow = scalar(keys %$pid_to_lost_worker) ) {
            warn "GarbageCollector:\tDiscovered $lost_this_meadow lost $meadow_type Workers\n";

            my $wpid_to_cod = {};
            if($this_meadow->can('find_out_causes')) {
                $wpid_to_cod = $this_meadow->find_out_causes( keys %$pid_to_lost_worker );
                my $lost_with_known_cod = scalar(keys %$wpid_to_cod);
                warn "GarbageCollector:\tFound why $lost_with_known_cod of $meadow_type Workers died\n";
            } else {
                warn "GarbageCollector:\t$meadow_type meadow does not support post-mortem examination\n";
            }

            warn "GarbageCollector:\tReleasing the jobs\n";
            while(my ($process_id, $worker) = each %$pid_to_lost_worker) {
407
                $worker->cause_of_death( $wpid_to_cod->{$process_id} || 'UNKNOWN');
408
409
                $self->register_worker_death($worker);
            }
410
411
412
        }
    }

413
        # the following bit is completely Meadow-agnostic and only restores database integrity:
Leo Gordon's avatar
Leo Gordon committed
414
    if($check_buried_in_haste) {
415
        warn "GarbageCollector:\tChecking for Workers buried in haste...\n";
416
        my $buried_in_haste_list = $self->fetch_all_dead_workers_with_jobs();
Leo Gordon's avatar
Leo Gordon committed
417
        if(my $bih_number = scalar(@$buried_in_haste_list)) {
418
            warn "GarbageCollector:\tfound $bih_number jobs, reclaiming.\n\n";
Leo Gordon's avatar
Leo Gordon committed
419
420
421
            if($bih_number) {
                my $job_adaptor = $self->db->get_AnalysisJobAdaptor();
                foreach my $worker (@$buried_in_haste_list) {
Leo Gordon's avatar
Leo Gordon committed
422
                    $job_adaptor->release_undone_jobs_from_worker($worker);
Leo Gordon's avatar
Leo Gordon committed
423
424
425
                }
            }
        } else {
426
            warn "GarbageCollector:\tfound none\n";
Leo Gordon's avatar
Leo Gordon committed
427
428
429
        }
    }
}
Jessica Severin's avatar
Jessica Severin committed
430
431


432
433
434
    # a new version that both checks in and updates the status
sub check_in_worker {
    my ($self, $worker) = @_;
Jessica Severin's avatar
Jessica Severin committed
435

436
    $self->dbc->do("UPDATE worker SET last_check_in=CURRENT_TIMESTAMP, status='".$worker->status."', work_done='".$worker->work_done."' WHERE worker_id='".$worker->dbID."'");
Jessica Severin's avatar
Jessica Severin committed
437
438
439
}


440
=head2 reset_job_by_dbID_and_sync
441

442
  Arg [1]: int $job_id
443
  Example: 
444
    my $job = $queen->reset_job_by_dbID_and_sync($job_id);
445
  Description: 
446
    For the specified job_id it will fetch just that job, 
447
448
449
    reset it completely as if it has never run, and return it.  
    Specifying a specific job bypasses the safety checks, 
    thus multiple workers could be running the 
450
    same job simultaneously (use only for debugging).
451
  Returntype : none
452
  Exceptions :
453
  Caller     : beekeeper.pl
454
455
456

=cut

457
458
sub reset_job_by_dbID_and_sync {
    my ($self, $job_id) = @_;
459

460
    my $job_adaptor = $self->db->get_AnalysisJobAdaptor;
461
    my $job = $job_adaptor->reset_or_grab_job_by_dbID($job_id); 
462
463
464

    my $stats = $self->db->get_AnalysisStatsAdaptor->fetch_by_analysis_id($job->analysis_id);
    $self->synchronize_AnalysisStats($stats);
465
466
467
}


468
469
470
471
472
473
######################################
#
# Public API interface for beekeeper
#
######################################

474

475
476
477
    # Note: asking for Queen->fetch_overdue_workers(0) essentially means
    #       "fetch all workers known to the Queen not to be officially dead"
    #
478
sub fetch_overdue_workers {
479
    my ($self,$overdue_secs) = @_;
480

481
    $overdue_secs = 3600 unless(defined($overdue_secs));
482

483
484
485
486
487
488
    my $constraint = "status!='DEAD' AND ".{
            'mysql'     =>  "(UNIX_TIMESTAMP()-UNIX_TIMESTAMP(last_check_in)) > $overdue_secs",
            'sqlite'    =>  "(strftime('%s','now')-strftime('%s',last_check_in)) > $overdue_secs",
            'pgsql'     =>  "EXTRACT(EPOCH FROM CURRENT_TIMESTAMP - last_check_in) > $overdue_secs",
        }->{ $self->dbc->driver };

489
    return $self->fetch_all( $constraint );
490
491
}

492
493
494

sub fetch_all_dead_workers_with_jobs {
    my $self = shift;
Leo Gordon's avatar
Leo Gordon committed
495

496
    return $self->fetch_all( "JOIN job j USING(worker_id) WHERE worker.status='DEAD' AND j.status NOT IN ('DONE', 'READY', 'FAILED', 'PASSED_ON') GROUP BY worker_id" );
Leo Gordon's avatar
Leo Gordon committed
497
}
Jessica Severin's avatar
Jessica Severin committed
498

499

500
501
=head2 synchronize_hive

Leo Gordon's avatar
Leo Gordon committed
502
  Arg [1]    : $filter_analysis (optional)
503
504
  Example    : $queen->synchronize_hive();
  Description: Runs through all analyses in the system and synchronizes
505
506
              the analysis_stats summary with the states in the job 
              and worker tables.  Then follows by checking all the blocking rules
507
508
509
510
511
512
513
              and blocks/unblocks analyses as needed.
  Exceptions : none
  Caller     : general

=cut

sub synchronize_hive {
Leo Gordon's avatar
Leo Gordon committed
514
  my $self          = shift;
Leo Gordon's avatar
Leo Gordon committed
515
  my $filter_analysis = shift; # optional parameter
516

517
  my $start_time = time();
518

Leo Gordon's avatar
Leo Gordon committed
519
  my $list_of_analyses = $filter_analysis ? [$filter_analysis] : $self->db->get_AnalysisAdaptor->fetch_all;
Leo Gordon's avatar
Leo Gordon committed
520

Leo Gordon's avatar
Leo Gordon committed
521
  print STDERR "\nSynchronizing the hive (".scalar(@$list_of_analyses)." analyses this time):\n";
Leo Gordon's avatar
Leo Gordon committed
522
523
  foreach my $analysis (@$list_of_analyses) {
    $self->synchronize_AnalysisStats($analysis->stats);
524
    print STDERR ( ($analysis->stats()->status eq 'BLOCKED') ? 'x' : 'o');
525
  }
Leo Gordon's avatar
Leo Gordon committed
526
  print STDERR "\n";
Leo Gordon's avatar
Leo Gordon committed
527

Leo Gordon's avatar
Leo Gordon committed
528
  print STDERR ''.((time() - $start_time))." seconds to synchronize_hive\n\n";
529
}
530

531

532
533
534
=head2 safe_synchronize_AnalysisStats

  Arg [1]    : Bio::EnsEMBL::Hive::AnalysisStats object
535
  Example    : $self->safe_synchronize_AnalysisStats($stats);
536
537
538
539
540
541
542
543
544
  Description: Prewrapper around synchronize_AnalysisStats that does
               checks and grabs sync_lock before proceeding with sync.
               Used by distributed worker sync system to avoid contention.
  Exceptions : none
  Caller     : general

=cut

sub safe_synchronize_AnalysisStats {
545
546
547
548
549
550
551
552
553
554
    my ($self, $stats) = @_;

    my $max_refresh_attempts = 5;
    while($stats->sync_lock and $max_refresh_attempts--) {   # another Worker/Beekeeper is synching this analysis right now
        sleep(1);
        $stats->refresh();  # just try to avoid collision
    }

    return $stats if($stats->status eq 'DONE');
    return $stats if(($stats->status eq 'WORKING') and
555
                   ($stats->seconds_since_last_update < 3*60));
556

557
558
559
560
561
        # try to claim the sync_lock
    my $sql = "UPDATE analysis_stats SET status='SYNCHING', sync_lock=1 ".
              "WHERE sync_lock=0 and analysis_id=" . $stats->analysis_id;
    my $row_count = $self->dbc->do($sql);  
    return $stats unless($row_count == 1);        # return the un-updated status if locked
562
  
563
564
        # if we managed to obtain the lock, let's go and perform the sync:
    $self->synchronize_AnalysisStats($stats);
565

566
    return $stats;
567
568
569
}


570
=head2 synchronize_AnalysisStats
571

572
573
  Arg [1]    : Bio::EnsEMBL::Hive::AnalysisStats object
  Example    : $self->synchronize($analysisStats);
574
  Description: Queries the job and worker tables to get summary counts
575
576
577
578
579
               and rebuilds the AnalysisStats object.  Then updates the
               analysis_stats table with the new summary info
  Returntype : newly synced Bio::EnsEMBL::Hive::AnalysisStats object
  Exceptions : none
  Caller     : general
580

581
=cut
582

583
sub synchronize_AnalysisStats {
584
  my $self = shift;
585
  my $analysisStats = shift;
586

587
  return $analysisStats unless($analysisStats);
588
  return $analysisStats unless($analysisStats->analysis_id);
589

590
  $analysisStats->refresh(); ## Need to get the new hive_capacity for dynamic analyses
591

592

593
  unless($self->db->hive_use_triggers()) {
594
      $analysisStats->total_job_count(0);
595
596
      $analysisStats->semaphored_job_count(0);
      $analysisStats->ready_job_count(0);
597
598
      $analysisStats->done_job_count(0);
      $analysisStats->failed_job_count(0);
599

600
            # ask for analysis_id to force MySQL to use existing index on (analysis_id, status)
601
      my $sql = "SELECT analysis_id, status, count(*) FROM job WHERE analysis_id=? GROUP BY analysis_id, status";
602
603
      my $sth = $self->prepare($sql);
      $sth->execute($analysisStats->analysis_id);
604

605
606
607
      my $done_here       = 0;
      my $done_elsewhere  = 0;
      my $total_job_count = 0;
608
      while (my ($dummy_analysis_id, $status, $job_count)=$sth->fetchrow_array()) {
609
    # print STDERR "$status: $job_count\n";
610

611
        $total_job_count += $job_count;
612

613
        if($status eq 'READY') {
614
615
616
            $analysisStats->ready_job_count($job_count);
        } elsif($status eq 'SEMAPHORED') {
            $analysisStats->semaphored_job_count($job_count);
617
        } elsif($status eq 'DONE') {
618
            $done_here = $job_count;
619
        } elsif($status eq 'PASSED_ON') {
620
621
622
            $done_elsewhere = $job_count;
        } elsif ($status eq 'FAILED') {
            $analysisStats->failed_job_count($job_count);
623
        }
624
      } # /while
625
      $sth->finish;
626

627
628
      $analysisStats->total_job_count( $total_job_count );
      $analysisStats->done_job_count( $done_here + $done_elsewhere );
629
  } # unless($self->db->hive_use_triggers())
630

631

632
633
634
635
636
        # compute the number of total required workers for this analysis (taking into account the jobs that are already running)
    my $analysis              = $analysisStats->get_analysis();
    my $scheduling_allowed    =  ( !defined( $analysisStats->hive_capacity ) or $analysisStats->hive_capacity )
                              && ( !defined( $analysis->analysis_capacity  ) or $analysis->analysis_capacity  );
    my $required_workers    = $scheduling_allowed
637
                            && POSIX::ceil( $analysisStats->ready_job_count() / $analysisStats->get_or_estimate_batch_size() );
638
639
640
641
642
643
644
645
    $analysisStats->num_required_workers( $required_workers );


    $analysisStats->check_blocking_control_rules();

    if($analysisStats->status ne 'BLOCKED') {
        $analysisStats->determine_status();
    }
646

647
648
    # $analysisStats->sync_lock(0); ## do we perhaps need it here?
    $analysisStats->update;  #update and release sync_lock
649

650
    return $analysisStats;
651
}
652
653


654
655
656
657
658
659
660
661
662
663
664
665
666
667
=head2 get_num_failed_analyses

  Arg [1]    : Bio::EnsEMBL::Hive::AnalysisStats object (optional)
  Example    : if( $self->get_num_failed_analyses( $my_analysis )) { do_something; }
  Example    : my $num_failed_analyses = $self->get_num_failed_analyses();
  Description: Reports all failed analyses and returns
                either the number of total failed (if no $filter_analysis was provided)
                or 1/0, depending on whether $filter_analysis failed or not.
  Returntype : int
  Exceptions : none
  Caller     : general

=cut

Leo Gordon's avatar
Leo Gordon committed
668
sub get_num_failed_analyses {
669
    my ($self, $filter_analysis) = @_;
670

671
672
673
674
675
676
677
678
679
680
681
682
    my $failed_analyses = $self->db->get_AnalysisAdaptor->fetch_all_failed_analyses();

    my $filter_analysis_failed = 0;

    foreach my $failed_analysis (@$failed_analyses) {
        print "\t##########################################################\n";
        print "\t# Too many jobs in analysis '".$failed_analysis->logic_name."' FAILED #\n";
        print "\t##########################################################\n\n";
        if($filter_analysis and ($filter_analysis->dbID == $failed_analysis)) {
            $filter_analysis_failed = 1;
        }
    }
683

684
    return $filter_analysis ? $filter_analysis_failed : scalar(@$failed_analyses);
685
686
687
}


688
sub get_hive_current_load {
689
690
691
    my $self = shift;
    my $sql = qq{
        SELECT sum(1/hive_capacity)
692
693
694
        FROM worker w
        JOIN analysis_stats USING(analysis_id)
        WHERE w.status!='DEAD'
695
        AND hive_capacity IS NOT NULL
696
        AND hive_capacity>0
697
698
699
700
701
702
    };
    my $sth = $self->prepare($sql);
    $sth->execute();
    my ($load)=$sth->fetchrow_array();
    $sth->finish;
    return ($load || 0);
703
704
705
}


706
707
708
sub count_running_workers {
    my ($self, $analysis_id) = @_;

709
710
    return $self->count_all( "status!='DEAD'".($analysis_id ? " AND analysis_id=$analysis_id" : '') );
}
711
712


713
714
715
716
sub get_workers_rank {
    my ($self, $worker) = @_;

    return $self->count_all( "status!='DEAD' AND analysis_id=".$worker->analysis_id." AND worker_id<".$worker->dbID );
717
718
}

719

Leo Gordon's avatar
Leo Gordon committed
720
sub get_remaining_jobs_show_hive_progress {
721
  my $self = shift;
Leo Gordon's avatar
Leo Gordon committed
722
  my $sql = "SELECT sum(done_job_count), sum(failed_job_count), sum(total_job_count), ".
723
            "sum(ready_job_count * analysis_stats.avg_msec_per_job)/1000/60/60 ".
724
            "FROM analysis_stats";
725
726
  my $sth = $self->prepare($sql);
  $sth->execute();
727
  my ($done, $failed, $total, $cpuhrs) = $sth->fetchrow_array();
728
  $sth->finish;
729
730
731
732
733
734
735

  $done   ||= 0;
  $failed ||= 0;
  $total  ||= 0;
  my $completed = $total
    ? ((100.0 * ($done+$failed))/$total)
    : 0.0;
736
737
738
  my $remaining = $total - $done - $failed;
  printf("hive %1.3f%% complete (< %1.3f CPU_hrs) (%d todo + %d done + %d failed = %d total)\n", 
          $completed, $cpuhrs, $remaining, $done, $failed, $total);
Leo Gordon's avatar
Leo Gordon committed
739
  return $remaining;
740
741
}

742

Leo Gordon's avatar
Leo Gordon committed
743
sub print_analysis_status {
Leo Gordon's avatar
Leo Gordon committed
744
    my ($self, $filter_analysis) = @_;
745

Leo Gordon's avatar
Leo Gordon committed
746
    my $list_of_analyses = $filter_analysis ? [$filter_analysis] : $self->db->get_AnalysisAdaptor->fetch_all;
Leo Gordon's avatar
Leo Gordon committed
747
    foreach my $analysis (sort {$a->dbID <=> $b->dbID} @$list_of_analyses) {
748
        $analysis->stats->print_stats();
Leo Gordon's avatar
Leo Gordon committed
749
    }
750
751
752
}


Leo Gordon's avatar
Leo Gordon committed
753
sub print_running_worker_counts {
754
    my $self = shift;
755

756
757
758
    my $sql = qq{
        SELECT logic_name, count(*)
        FROM worker w
759
        JOIN analysis_base a USING(analysis_id)
760
        WHERE w.status!='DEAD'
761
        GROUP BY a.analysis_id
762
    };
Leo Gordon's avatar
Leo Gordon committed
763

764
765
766
767
768
769
770
771
772
773
774
    my $total_workers = 0;
    my $sth = $self->prepare($sql);
    $sth->execute();

    print "\n===== Stats of live Workers according to the Queen: ======\n";
    while((my $logic_name, my $worker_count)=$sth->fetchrow_array()) {
        printf("%30s : %d workers\n", $logic_name, $worker_count);
        $total_workers += $worker_count;
    }
    $sth->finish;
    printf("%30s : %d workers\n\n", '======= TOTAL =======', $total_workers);
775
}
776

777

778
779
780
781
782
783
784
785
786
787
788
=head2 monitor

  Arg[1]     : --none--
  Example    : $queen->monitor();
  Description: Monitors current throughput and store the result in the monitor
               table
  Exceptions : none
  Caller     : beekeepers and other external processes

=cut

Leo Gordon's avatar
Leo Gordon committed
789
sub monitor {
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
    my $self = shift;
    my $sql = qq{
        INSERT INTO monitor
        SELECT CURRENT_TIMESTAMP, count(*),
    } . {
        'mysql'     =>  qq{ sum(work_done/(UNIX_TIMESTAMP()-UNIX_TIMESTAMP(born))),
                            sum(work_done/(UNIX_TIMESTAMP()-UNIX_TIMESTAMP(born)))/count(*), },
        'sqlite'    =>  qq{ sum(work_done/(strftime('%s','now')-strftime('%s',born))),
                            sum(work_done/(strftime('%s','now')-strftime('%s',born)))/count(*), },
        'pgsql'     =>  qq{ sum(work_done/(EXTRACT(EPOCH FROM CURRENT_TIMESTAMP - born))),
                            sum(work_done/(EXTRACT(EPOCH FROM CURRENT_TIMESTAMP - born)))/count(*), },
    }->{ $self->dbc->driver }. qq{
        group_concat(DISTINCT logic_name)
        FROM worker w
        LEFT JOIN analysis_base USING (analysis_id)
        WHERE w.status!='DEAD'
    };
807
808
809
810
811
      
  my $sth = $self->prepare($sql);
  $sth->execute();
}

812

Leo Gordon's avatar
Leo Gordon committed
813
814
815
816
817
818
819
820
821
822
823
824
=head2 register_all_workers_dead

  Example    : $queen->register_all_workers_dead();
  Description: Registers all workers dead
  Exceptions : none
  Caller     : beekeepers and other external processes

=cut

sub register_all_workers_dead {
    my $self = shift;

825
826
    my $all_workers_considered_alive = $self->fetch_all( "status!='DEAD'" );
    foreach my $worker (@{$all_workers_considered_alive}) {
827
        $worker->cause_of_death( 'UNKNOWN' );  # well, maybe we could have investigated further...
Leo Gordon's avatar
Leo Gordon committed
828
829
830
831
        $self->register_worker_death($worker);
    }
}

832

833
sub suggest_analysis_to_specialize_by_rc_id_meadow_type {
834
    my $self                = shift;
835
    my $rc_id               = shift;
836
    my $meadow_type         = shift;
837

838
    my @suitable_analyses = @{ $self->db->get_AnalysisStatsAdaptor->fetch_all_by_suitability_rc_id_meadow_type( $rc_id, $meadow_type ) };
839

840
    foreach my $stats (@suitable_analyses) {
841

842
            #synchronize and double check that it can be run:
843
        $self->safe_synchronize_AnalysisStats($stats);
844
        return $stats if( ($stats->status ne 'BLOCKED') and ($stats->status ne 'SYNCHING') and ($stats->num_required_workers > 0) );
845
    }
846

847
    return undef;
848
849
850
}


Jessica Severin's avatar
Jessica Severin committed
851
1;