Queen.pm 29.6 KB
Newer Older
Jessica Severin's avatar
Jessica Severin committed
1
2
3
4
5
6
#
# You may distribute this module under the same terms as perl itself

=pod 

=head1 NAME
7

8
  Bio::EnsEMBL::Hive::Queen
Jessica Severin's avatar
Jessica Severin committed
9
10

=head1 SYNOPSIS
11

12
  The Queen of the Hive based job control system
Jessica Severin's avatar
Jessica Severin committed
13
14

=head1 DESCRIPTION
15

16
17
18
19
20
21
22
23
24
25
26
27
  The Queen of the Hive based job control system is responsible to 'birthing' the
  correct number of workers of the right type so that they can find jobs to do.
  It will also free up jobs of Workers that died unexpectantly so that other workers
  can claim them to do.

  Hive based processing is a concept based on a more controlled version
  of an autonomous agent type system.  Each worker is not told what to do
  (like a centralized control system - like the current pipeline system)
  but rather queries a central database for jobs (give me jobs).

  Each worker is linked to an analysis_id, registers its self on creation
  into the Hive, creates a RunnableDB instance of the Analysis->module,
28
29
  gets $analysis->stats->batch_size jobs from the job table, does its work,
  creates the next layer of job entries by interfacing to
30
31
32
  the DataflowRuleAdaptor to determine the analyses it needs to pass its
  output data to and creates jobs on the next analysis database.
  It repeats this cycle until it has lived its lifetime or until there are no
33
34
35
36
37
38
39
40
41
42
  more jobs left.
  The lifetime limit is just a safety limit to prevent these from 'infecting'
  a system.

  The Queens job is to simply birth Workers of the correct analysis_id to get the
  work down.  The only other thing the Queen does is free up jobs that were
  claimed by Workers that died unexpectantly so that other workers can take
  over the work.

  The Beekeeper is in charge of interfacing between the Queen and a compute resource
43
  or 'compute farm'.  Its job is to query Queens if they need any workers and to
44
45
46
  send the requested number of workers to open machines via the runWorker.pl script.
  It is also responsible for interfacing with the Queen to identify worker which died
  unexpectantly.
Jessica Severin's avatar
Jessica Severin committed
47
48

=head1 CONTACT
49

50
  Please contact ehive-users@ebi.ac.uk mailing list with questions/suggestions.
Jessica Severin's avatar
Jessica Severin committed
51
52

=head1 APPENDIX
53

54
55
  The rest of the documentation details each of the object methods. 
  Internal methods are usually preceded with a _
Jessica Severin's avatar
Jessica Severin committed
56
57
58

=cut

59

Jessica Severin's avatar
Jessica Severin committed
60
61
62
package Bio::EnsEMBL::Hive::Queen;

use strict;
63
use POSIX;
64
use Clone 'clone';
65
66
67
use Bio::EnsEMBL::Utils::Argument;
use Bio::EnsEMBL::Utils::Exception;

68
use Bio::EnsEMBL::Hive::Utils ('destringify', 'dir_revhash');  # import 'destringify()' and 'dir_revhash()'
69
use Bio::EnsEMBL::Hive::AnalysisJob;
Jessica Severin's avatar
Jessica Severin committed
70
71
use Bio::EnsEMBL::Hive::Worker;

72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
use base ('Bio::EnsEMBL::Hive::DBSQL::ObjectAdaptor');


sub default_table_name {
    return 'worker';
}


sub default_insertion_method {
    return 'INSERT';
}


sub object_class {
    return 'Bio::EnsEMBL::Hive::Worker';
}
88

Jessica Severin's avatar
Jessica Severin committed
89

90
############################
Jessica Severin's avatar
Jessica Severin committed
91
#
92
# PUBLIC API
Jessica Severin's avatar
Jessica Severin committed
93
#
94
############################
Jessica Severin's avatar
Jessica Severin committed
95

96

Jessica Severin's avatar
Jessica Severin committed
97
98
=head2 create_new_worker

99
100
101
102
  Description: Creates an entry in the worker table,
               populates some non-storable attributes
               and returns a Worker object based on that insert.
               This guarantees that each worker registered in this Queen's hive is properly registered.
Jessica Severin's avatar
Jessica Severin committed
103
  Returntype : Bio::EnsEMBL::Hive::Worker
104
  Caller     : runWorker.pl
Jessica Severin's avatar
Jessica Severin committed
105
106
107
108

=cut

sub create_new_worker {
109
    my ($self, @args) = @_;
110

111
    my ($meadow_type, $meadow_name, $process_id, $exec_host, $resource_class_id, $resource_class_name,
112
        $no_write, $debug, $worker_log_dir, $hive_log_dir, $job_limit, $life_span, $no_cleanup, $retry_throwing_jobs, $can_respecialize) =
113
114

    rearrange([qw(meadow_type meadow_name process_id exec_host resource_class_id resource_class_name
115
                no_write debug worker_log_dir hive_log_dir job_limit life_span no_cleanup retry_throwing_jobs can_respecialize) ], @args);
116

117
    if( defined($resource_class_name) ) {
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
        my $rc = $self->db->get_ResourceClassAdaptor->fetch_by_name($resource_class_name)
            or die "resource_class with name='$resource_class_name' could not be fetched from the database";

        $resource_class_id = $rc->dbID;
    }

    my $sql = q{INSERT INTO worker (born, last_check_in, meadow_type, meadow_name, host, process_id, resource_class_id)
              VALUES (CURRENT_TIMESTAMP, CURRENT_TIMESTAMP, ?, ?, ?, ?, ?)};

    my $sth = $self->prepare($sql);
    $sth->execute($meadow_type, $meadow_name, $exec_host, $process_id, $resource_class_id);
    my $worker_id = $self->dbc->db_handle->last_insert_id(undef, undef, 'worker', 'worker_id')
        or die "Could not create a new worker";
    $sth->finish;

    if($hive_log_dir or $worker_log_dir) {
        my $dir_revhash = dir_revhash($worker_id);
        $worker_log_dir ||= $hive_log_dir .'/'. ($dir_revhash ? "$dir_revhash/" : '') .'worker_id_'.$worker_id;

            # Note: the following die-message will not reach the log files for circular reason!
        system("mkdir -p $worker_log_dir") && die "Could not create '$worker_log_dir' because: $!";

        my $sth_add_log = $self->prepare( "UPDATE worker SET log_dir=? WHERE worker_id=?" );
        $sth_add_log->execute($worker_log_dir, $worker_id);
        $sth_add_log->finish;
    }

    my $worker = $self->fetch_by_dbID($worker_id)
        or die "Could not fetch worker with dbID=$worker_id";

    $worker->init;

150
151
    if(defined($job_limit)) {
      $worker->job_limiter($job_limit);
152
153
154
155
156
157
158
159
160
161
162
163
164
      $worker->life_span(0);
    }

    $worker->life_span($life_span * 60)                 if($life_span);

    $worker->execute_writes(0)                          if($no_write);

    $worker->perform_cleanup(0)                         if($no_cleanup);

    $worker->debug($debug)                              if($debug);

    $worker->retry_throwing_jobs($retry_throwing_jobs)  if(defined $retry_throwing_jobs);

165
166
    $worker->can_respecialize($can_respecialize)        if(defined $can_respecialize);

167
168
169
170
171
    return $worker;
}


=head2 specialize_new_worker
172

173
174
  Description: If analysis_id or logic_name is specified it will try to specialize the Worker into this analysis.
               If not specified the Queen will analyze the hive and pick the most suitable analysis.
175
  Caller     : Bio::EnsEMBL::Hive::Worker
176
177
178
179
180
181
182
183

=cut

sub specialize_new_worker {
    my ($self, $worker, @args) = @_;

    my ($analysis_id, $logic_name, $job_id, $force) =
        rearrange([qw(analysis_id logic_name job_id force) ], @args);
184

185
186
    if( scalar( grep {defined($_)} ($analysis_id, $logic_name, $job_id) ) > 1) {
        die "At most one of the options {-analysis_id, -logic_name, -job_id} can be set to pre-specialize a Worker";
Leo Gordon's avatar
Leo Gordon committed
187
188
    }

189
    my ($analysis, $stats, $special_batch);
190
    my $analysis_stats_adaptor = $self->db->get_AnalysisStatsAdaptor;
Jessica Severin's avatar
Jessica Severin committed
191

192
    if($job_id or $analysis_id or $logic_name) {    # probably pre-specialized from command-line
193

194
        if($job_id) {
195
196
197
            print "resetting and fetching job for job_id '$job_id'\n";

            my $job_adaptor = $self->db->get_AnalysisJobAdaptor;
198

199
200
201
202
203
204
205
206
207
            my $job = $job_adaptor->fetch_by_dbID( $job_id )
                or die "Could not fetch job with dbID='$job_id'";
            my $job_status = $job->status();

            if($job_status =~/(CLAIMED|PRE_CLEANUP|FETCH_INPUT|RUN|WRITE_OUTPUT|POST_CLEANUP)/ ) {
                die "Job with dbID='$job_id' is already in progress, cannot run";   # FIXME: try GC first, then complain
            } elsif($job_status =~/(DONE|SEMAPHORED)/ and !$force) {
                die "Job with dbID='$job_id' is $job_status, please use -force 1 to override";
            }
208

209
210
211
212
213
214
215
216
            if(($job_status eq 'DONE') and $job->semaphored_job_id) {
                warn "Increasing the semaphore count of the dependent job";
                $job_adaptor->increase_semaphore_count_for_jobid( $job->semaphored_job_id );
            }

            my $worker_id = $worker->dbID;
            if($job = $job_adaptor->reset_or_grab_job_by_dbID($job_id, $worker_id)) {
                $special_batch = [ $job ];
217
218
                $analysis_id = $job->analysis_id;
            } else {
219
                die "Could not claim job with dbID='$job_id' for worker with dbID='$worker_id'";
220
            }
221
222
223
224
225
226
227
228
229
230
231
232
233
        }

        if($logic_name) {
            $analysis = $self->db->get_AnalysisAdaptor->fetch_by_logic_name($logic_name)
                or die "analysis with name='$logic_name' could not be fetched from the database";

            $analysis_id = $analysis->dbID;

        } elsif($analysis_id) {
            $analysis = $self->db->get_AnalysisAdaptor->fetch_by_dbID($analysis_id)
                or die "analysis with dbID='$analysis_id' could not be fetched from the database";
        }

234
235
236
        if( $worker->resource_class_id
        and $worker->resource_class_id != $analysis->resource_class_id) {
                die "resource_class of analysis ".$analysis->logic_name." is incompatible with this Worker's resource_class";
237
238
        }

239
240
241
        $stats = $analysis_stats_adaptor->fetch_by_analysis_id($analysis_id);
        $self->safe_synchronize_AnalysisStats($stats);

242
        unless($special_batch or $force) {    # do we really need to run this analysis?
243
            if($self->get_hive_current_load() >= 1.1) {
244
                $worker->cause_of_death('HIVE_OVERLOAD');
245
                die "Hive is overloaded, can't specialize a worker";
246
247
            }
            if($stats->status eq 'BLOCKED') {
248
                die "Analysis is BLOCKED, can't specialize a worker";
249
            }
250
251
            if($stats->num_running_workers >= $stats->num_required_workers) {
                die "Analysis doesn't require extra workers at the moment";
252
253
            }
            if($stats->status eq 'DONE') {
254
                die "Analysis is DONE, and doesn't require workers";
255
256
            }
        }
257
258
            # probably scheduled by beekeeper.pl:
    } elsif( $stats = $self->suggest_analysis_to_specialize_by_rc_id_meadow_type($worker->resource_class_id, $worker->meadow_type) ) {
259

260
        print "Queen picked analysis with dbID=".$stats->analysis_id." for the worker\n";
261

262
        $worker->analysis( undef ); # make sure we reset anything that was there before
263
        $analysis_id = $stats->analysis_id;
264
265
266
    } else {
        $worker->cause_of_death('NO_ROLE');
        die "No analysis suitable for the worker was found\n";
267
    }
268

269
        # now set it in the $worker:
Jessica Severin's avatar
Jessica Severin committed
270

271
    $worker->analysis_id( $analysis_id );
Jessica Severin's avatar
Jessica Severin committed
272

273
274
275
276
    my $sth_update_analysis_id = $self->prepare( "UPDATE worker SET analysis_id=? WHERE worker_id=?" );
    $sth_update_analysis_id->execute($worker->analysis_id, $worker->dbID);
    $sth_update_analysis_id->finish;

277
278
    if($special_batch) {
        $worker->special_batch( $special_batch );
279
    } else {    # count it as autonomous worker sharing the load of that analysis:
280
281
282

        $stats->update_status('WORKING');

283
        $analysis_stats_adaptor->decrease_required_workers($worker->analysis_id);
284
285
286
287
288
289
290
291
292
    }

        # The following increment used to be done only when no specific task was given to the worker,
        # thereby excluding such "special task" workers from being counted in num_running_workers.
        #
        # However this may be tricky to emulate by triggers that know nothing about "special tasks",
        # so I am (temporarily?) simplifying the accounting algorithm.
        #
    unless( $self->db->hive_use_triggers() ) {
293
        $analysis_stats_adaptor->increase_running_workers($worker->analysis_id);
294
    }
Jessica Severin's avatar
Jessica Severin committed
295
296
}

297

Jessica Severin's avatar
Jessica Severin committed
298
sub register_worker_death {
299
    my ($self, $worker) = @_;
300

301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
    return unless($worker);

    my $cod = $worker->cause_of_death() || 'UNKNOWN';    # make sure we do not attempt to insert a void

    my $sql = qq{UPDATE worker SET died=CURRENT_TIMESTAMP
                    ,last_check_in=CURRENT_TIMESTAMP
                    ,status='DEAD'
                    ,work_done='}. $worker->work_done . qq{'
                    ,cause_of_death='$cod'
                WHERE worker_id='}. $worker->dbID . qq{'};
    $self->dbc->do( $sql );

    if(my $analysis_id = $worker->analysis_id) {
        my $analysis_stats_adaptor = $self->db->get_AnalysisStatsAdaptor;

        unless( $self->db->hive_use_triggers() ) {
            $analysis_stats_adaptor->decrease_running_workers($worker->analysis_id);
        }

320
321
322
323
324
        unless( $cod eq 'NO_WORK'
            or  $cod eq 'JOB_LIMIT'
            or  $cod eq 'HIVE_OVERLOAD'
            or  $cod eq 'LIFESPAN'
        ) {
325
326
327
328
329
330
331
332
333
334
                $self->db->get_AnalysisJobAdaptor->release_undone_jobs_from_worker($worker);
        }

            # re-sync the analysis_stats when a worker dies as part of dynamic sync system
        if($self->safe_synchronize_AnalysisStats($worker->analysis->stats)->status ne 'DONE') {
            # since I'm dying I should make sure there is someone to take my place after I'm gone ...
            # above synch still sees me as a 'living worker' so I need to compensate for that
            $analysis_stats_adaptor->increase_required_workers($worker->analysis_id);
        }
    }
Jessica Severin's avatar
Jessica Severin committed
335
336
}

337

338
sub check_for_dead_workers {    # scans the whole Valley for lost Workers (but ignores unreachagle ones)
339
    my ($self, $valley, $check_buried_in_haste) = @_;
Leo Gordon's avatar
Leo Gordon committed
340

341
    warn "GarbageCollector:\tChecking for lost Workers...\n";
342

343
344
345
346
    my $queen_worker_list           = $self->fetch_overdue_workers(0);
    my %mt_and_pid_to_worker_status = ();
    my %worker_status_counts        = ();
    my %mt_and_pid_to_lost_worker   = ();
Leo Gordon's avatar
Leo Gordon committed
347

348
    warn "GarbageCollector:\t[Queen:] we have ".scalar(@$queen_worker_list)." Workers alive.\n";
349

Leo Gordon's avatar
Leo Gordon committed
350
351
    foreach my $worker (@$queen_worker_list) {

352
353
354
        my $meadow_type = $worker->meadow_type;
        if(my $meadow = $valley->find_available_meadow_responsible_for_worker($worker)) {
            $mt_and_pid_to_worker_status{$meadow_type} ||= $meadow->status_of_all_our_workers;
Leo Gordon's avatar
Leo Gordon committed
355
        } else {
356
            $worker_status_counts{$meadow_type}{'UNREACHABLE'}++;
357

358
            next;   # Worker is unreachable from this Valley
Leo Gordon's avatar
Leo Gordon committed
359
360
        }

361
362
363
364
365
        my $process_id = $worker->process_id;
        if(my $status = $mt_and_pid_to_worker_status{$meadow_type}{$process_id}) { # can be RUN|PEND|xSUSP
            $worker_status_counts{$meadow_type}{$status}++;
        } else {
            $worker_status_counts{$meadow_type}{'LOST'}++;
366

367
            $mt_and_pid_to_lost_worker{$meadow_type}{$process_id} = $worker;
368
        }
369
370
371
372
373
374
    }

        # just a quick summary report:
    foreach my $meadow_type (keys %worker_status_counts) {
        warn "GarbageCollector:\t[$meadow_type Meadow:]\t".join(', ', map { "$_:$worker_status_counts{$meadow_type}{$_}" } keys %{$worker_status_counts{$meadow_type}})."\n\n";
    }
375

376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
    while(my ($meadow_type, $pid_to_lost_worker) = each %mt_and_pid_to_lost_worker) {
        my $this_meadow = $valley->available_meadow_hash->{$meadow_type};

        if(my $lost_this_meadow = scalar(keys %$pid_to_lost_worker) ) {
            warn "GarbageCollector:\tDiscovered $lost_this_meadow lost $meadow_type Workers\n";

            my $wpid_to_cod = {};
            if($this_meadow->can('find_out_causes')) {
                $wpid_to_cod = $this_meadow->find_out_causes( keys %$pid_to_lost_worker );
                my $lost_with_known_cod = scalar(keys %$wpid_to_cod);
                warn "GarbageCollector:\tFound why $lost_with_known_cod of $meadow_type Workers died\n";
            } else {
                warn "GarbageCollector:\t$meadow_type meadow does not support post-mortem examination\n";
            }

            warn "GarbageCollector:\tReleasing the jobs\n";
            while(my ($process_id, $worker) = each %$pid_to_lost_worker) {
393
                $worker->cause_of_death( $wpid_to_cod->{$process_id} || 'UNKNOWN');
394
395
                $self->register_worker_death($worker);
            }
396
397
398
        }
    }

399
        # the following bit is completely Meadow-agnostic and only restores database integrity:
Leo Gordon's avatar
Leo Gordon committed
400
    if($check_buried_in_haste) {
401
        warn "GarbageCollector:\tChecking for Workers buried in haste...\n";
402
        my $buried_in_haste_list = $self->fetch_all_dead_workers_with_jobs();
Leo Gordon's avatar
Leo Gordon committed
403
        if(my $bih_number = scalar(@$buried_in_haste_list)) {
404
            warn "GarbageCollector:\tfound $bih_number jobs, reclaiming.\n\n";
Leo Gordon's avatar
Leo Gordon committed
405
406
407
            if($bih_number) {
                my $job_adaptor = $self->db->get_AnalysisJobAdaptor();
                foreach my $worker (@$buried_in_haste_list) {
Leo Gordon's avatar
Leo Gordon committed
408
                    $job_adaptor->release_undone_jobs_from_worker($worker);
Leo Gordon's avatar
Leo Gordon committed
409
410
411
                }
            }
        } else {
412
            warn "GarbageCollector:\tfound none\n";
Leo Gordon's avatar
Leo Gordon committed
413
414
415
        }
    }
}
Jessica Severin's avatar
Jessica Severin committed
416
417


418
419
420
    # a new version that both checks in and updates the status
sub check_in_worker {
    my ($self, $worker) = @_;
Jessica Severin's avatar
Jessica Severin committed
421

422
    $self->dbc->do("UPDATE worker SET last_check_in=CURRENT_TIMESTAMP, status='".$worker->status."', work_done='".$worker->work_done."' WHERE worker_id='".$worker->dbID."'");
Jessica Severin's avatar
Jessica Severin committed
423
424
425
}


426
=head2 reset_job_by_dbID_and_sync
427

428
  Arg [1]: int $job_id
429
  Example: 
430
    my $job = $queen->reset_job_by_dbID_and_sync($job_id);
431
  Description: 
432
    For the specified job_id it will fetch just that job, 
433
434
435
    reset it completely as if it has never run, and return it.  
    Specifying a specific job bypasses the safety checks, 
    thus multiple workers could be running the 
436
    same job simultaneously (use only for debugging).
437
  Returntype : none
438
  Exceptions :
439
  Caller     : beekeeper.pl
440
441
442

=cut

443
444
sub reset_job_by_dbID_and_sync {
    my ($self, $job_id) = @_;
445

446
    my $job_adaptor = $self->db->get_AnalysisJobAdaptor;
447
    my $job = $job_adaptor->reset_or_grab_job_by_dbID($job_id); 
448
449
450

    my $stats = $self->db->get_AnalysisStatsAdaptor->fetch_by_analysis_id($job->analysis_id);
    $self->synchronize_AnalysisStats($stats);
451
452
453
}


454
455
456
457
458
459
######################################
#
# Public API interface for beekeeper
#
######################################

460

461
462
463
    # Note: asking for Queen->fetch_overdue_workers(0) essentially means
    #       "fetch all workers known to the Queen not to be officially dead"
    #
464
sub fetch_overdue_workers {
465
    my ($self,$overdue_secs) = @_;
466

467
    $overdue_secs = 3600 unless(defined($overdue_secs));
468

469
    my $constraint = "status!='DEAD' AND ".
470
                    ( ($self->dbc->driver eq 'sqlite')
471
472
473
                        ? "(strftime('%s','now')-strftime('%s',last_check_in))>$overdue_secs"
                        : "(UNIX_TIMESTAMP()-UNIX_TIMESTAMP(last_check_in))>$overdue_secs");
    return $self->fetch_all( $constraint );
474
475
}

476
477
478

sub fetch_all_dead_workers_with_jobs {
    my $self = shift;
Leo Gordon's avatar
Leo Gordon committed
479

480
    return $self->fetch_all( "JOIN job j USING(worker_id) WHERE worker.status='DEAD' AND j.status NOT IN ('DONE', 'READY', 'FAILED', 'PASSED_ON') GROUP BY worker_id" );
Leo Gordon's avatar
Leo Gordon committed
481
}
Jessica Severin's avatar
Jessica Severin committed
482

483

484
485
=head2 synchronize_hive

Leo Gordon's avatar
Leo Gordon committed
486
  Arg [1]    : $filter_analysis (optional)
487
488
  Example    : $queen->synchronize_hive();
  Description: Runs through all analyses in the system and synchronizes
489
490
              the analysis_stats summary with the states in the job 
              and worker tables.  Then follows by checking all the blocking rules
491
492
493
494
495
496
497
              and blocks/unblocks analyses as needed.
  Exceptions : none
  Caller     : general

=cut

sub synchronize_hive {
Leo Gordon's avatar
Leo Gordon committed
498
  my $self          = shift;
Leo Gordon's avatar
Leo Gordon committed
499
  my $filter_analysis = shift; # optional parameter
500

501
  my $start_time = time();
502

Leo Gordon's avatar
Leo Gordon committed
503
  my $list_of_analyses = $filter_analysis ? [$filter_analysis] : $self->db->get_AnalysisAdaptor->fetch_all;
Leo Gordon's avatar
Leo Gordon committed
504

Leo Gordon's avatar
Leo Gordon committed
505
  print STDERR "\nSynchronizing the hive (".scalar(@$list_of_analyses)." analyses this time):\n";
Leo Gordon's avatar
Leo Gordon committed
506
507
  foreach my $analysis (@$list_of_analyses) {
    $self->synchronize_AnalysisStats($analysis->stats);
508
    print STDERR ( ($analysis->stats()->status eq 'BLOCKED') ? 'x' : 'o');
509
  }
Leo Gordon's avatar
Leo Gordon committed
510
  print STDERR "\n";
Leo Gordon's avatar
Leo Gordon committed
511

Leo Gordon's avatar
Leo Gordon committed
512
  print STDERR ''.((time() - $start_time))." seconds to synchronize_hive\n\n";
513
}
514

515

516
517
518
=head2 safe_synchronize_AnalysisStats

  Arg [1]    : Bio::EnsEMBL::Hive::AnalysisStats object
519
  Example    : $self->safe_synchronize_AnalysisStats($stats);
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
  Description: Prewrapper around synchronize_AnalysisStats that does
               checks and grabs sync_lock before proceeding with sync.
               Used by distributed worker sync system to avoid contention.
  Exceptions : none
  Caller     : general

=cut

sub safe_synchronize_AnalysisStats {
  my $self = shift;
  my $stats = shift;

  return $stats unless($stats->analysis_id);
  return $stats if($stats->status eq 'SYNCHING');
  return $stats if($stats->status eq 'DONE');
  return $stats if($stats->sync_lock);
  return $stats if(($stats->status eq 'WORKING') and
537
                   ($stats->seconds_since_last_update < 3*60));
538
539
540
541

  # OK try to claim the sync_lock
  my $sql = "UPDATE analysis_stats SET status='SYNCHING', sync_lock=1 ".
            "WHERE sync_lock=0 and analysis_id=" . $stats->analysis_id;
542
  #print("$sql\n");
543
  my $row_count = $self->dbc->do($sql);  
544
  return $stats unless($row_count == 1);        # return the un-updated status if locked
545
  #printf("got sync_lock on analysis_stats(%d)\n", $stats->analysis_id);
546
  
547
      # since we managed to obtain the lock, let's go and perform the sync:
548
  $self->synchronize_AnalysisStats($stats);
549

550
551
552
553
  return $stats;
}


554
=head2 synchronize_AnalysisStats
555

556
557
  Arg [1]    : Bio::EnsEMBL::Hive::AnalysisStats object
  Example    : $self->synchronize($analysisStats);
558
  Description: Queries the job and worker tables to get summary counts
559
560
561
562
563
               and rebuilds the AnalysisStats object.  Then updates the
               analysis_stats table with the new summary info
  Returntype : newly synced Bio::EnsEMBL::Hive::AnalysisStats object
  Exceptions : none
  Caller     : general
564

565
=cut
566

567
sub synchronize_AnalysisStats {
568
  my $self = shift;
569
  my $analysisStats = shift;
570

571
  return $analysisStats unless($analysisStats);
572
  return $analysisStats unless($analysisStats->analysis_id);
573

574
  $analysisStats->refresh(); ## Need to get the new hive_capacity for dynamic analyses
575

576

577
  unless($self->db->hive_use_triggers()) {
578
      $analysisStats->total_job_count(0);
579
580
      $analysisStats->semaphored_job_count(0);
      $analysisStats->ready_job_count(0);
581
582
      $analysisStats->done_job_count(0);
      $analysisStats->failed_job_count(0);
583

584
            # ask for analysis_id to force MySQL to use existing index on (analysis_id, status)
585
      my $sql = "SELECT analysis_id, status, count(*) FROM job WHERE analysis_id=? GROUP BY analysis_id, status";
586
587
      my $sth = $self->prepare($sql);
      $sth->execute($analysisStats->analysis_id);
588

589
590
591
      my $done_here       = 0;
      my $done_elsewhere  = 0;
      my $total_job_count = 0;
592
      while (my ($dummy_analysis_id, $status, $job_count)=$sth->fetchrow_array()) {
593
    # print STDERR "$status: $job_count\n";
594

595
        $total_job_count += $job_count;
596

597
        if($status eq 'READY') {
598
599
600
            $analysisStats->ready_job_count($job_count);
        } elsif($status eq 'SEMAPHORED') {
            $analysisStats->semaphored_job_count($job_count);
601
        } elsif($status eq 'DONE') {
602
            $done_here = $job_count;
603
        } elsif($status eq 'PASSED_ON') {
604
605
606
            $done_elsewhere = $job_count;
        } elsif ($status eq 'FAILED') {
            $analysisStats->failed_job_count($job_count);
607
        }
608
      } # /while
609
      $sth->finish;
610

611
612
      $analysisStats->total_job_count( $total_job_count );
      $analysisStats->done_job_count( $done_here + $done_elsewhere );
613
  } # unless($self->db->hive_use_triggers())
614

615

616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
        # compute the number of total required workers for this analysis (taking into account the jobs that are already running)
    my $analysis              = $analysisStats->get_analysis();
    my $scheduling_allowed    =  ( !defined( $analysisStats->hive_capacity ) or $analysisStats->hive_capacity )
                              && ( !defined( $analysis->analysis_capacity  ) or $analysis->analysis_capacity  );
    my $required_workers    = $scheduling_allowed
                            && POSIX::ceil( ($analysisStats->ready_job_count() + $analysisStats->inprogress_job_count())
                                            / $analysisStats->get_or_estimate_batch_size() );
    $analysisStats->num_required_workers( $required_workers );


    $analysisStats->check_blocking_control_rules();

    if($analysisStats->status ne 'BLOCKED') {
        $analysisStats->determine_status();
    }
631

632
633
    # $analysisStats->sync_lock(0); ## do we perhaps need it here?
    $analysisStats->update;  #update and release sync_lock
634

635
    return $analysisStats;
636
}
637
638


639
640
641
642
643
644
645
646
647
648
649
650
651
652
=head2 get_num_failed_analyses

  Arg [1]    : Bio::EnsEMBL::Hive::AnalysisStats object (optional)
  Example    : if( $self->get_num_failed_analyses( $my_analysis )) { do_something; }
  Example    : my $num_failed_analyses = $self->get_num_failed_analyses();
  Description: Reports all failed analyses and returns
                either the number of total failed (if no $filter_analysis was provided)
                or 1/0, depending on whether $filter_analysis failed or not.
  Returntype : int
  Exceptions : none
  Caller     : general

=cut

Leo Gordon's avatar
Leo Gordon committed
653
sub get_num_failed_analyses {
654
    my ($self, $filter_analysis) = @_;
655

656
657
658
659
660
661
662
663
664
665
666
667
    my $failed_analyses = $self->db->get_AnalysisAdaptor->fetch_all_failed_analyses();

    my $filter_analysis_failed = 0;

    foreach my $failed_analysis (@$failed_analyses) {
        print "\t##########################################################\n";
        print "\t# Too many jobs in analysis '".$failed_analysis->logic_name."' FAILED #\n";
        print "\t##########################################################\n\n";
        if($filter_analysis and ($filter_analysis->dbID == $failed_analysis)) {
            $filter_analysis_failed = 1;
        }
    }
668

669
    return $filter_analysis ? $filter_analysis_failed : scalar(@$failed_analyses);
670
671
672
}


673
sub get_hive_current_load {
674
675
676
    my $self = shift;
    my $sql = qq{
        SELECT sum(1/hive_capacity)
677
678
679
        FROM worker w
        JOIN analysis_stats USING(analysis_id)
        WHERE w.status!='DEAD'
680
        AND hive_capacity IS NOT NULL
681
        AND hive_capacity>0
682
683
684
685
686
687
    };
    my $sth = $self->prepare($sql);
    $sth->execute();
    my ($load)=$sth->fetchrow_array();
    $sth->finish;
    return ($load || 0);
688
689
690
}


691
692
693
sub count_running_workers {
    my ($self, $analysis_id) = @_;

694
695
696
697
698
    my $sql = qq{
            SELECT count(*)
            FROM worker
            WHERE status!='DEAD'
        } . ($analysis_id ? " AND analysis_id='$analysis_id'" : '');
699
700
701
702
703
704
705

    my $sth = $self->prepare($sql);
    $sth->execute();
    (my $running_workers_count)=$sth->fetchrow_array();
    $sth->finish();

    return $running_workers_count || 0;
706
707
}

708

Leo Gordon's avatar
Leo Gordon committed
709
sub get_remaining_jobs_show_hive_progress {
710
  my $self = shift;
Leo Gordon's avatar
Leo Gordon committed
711
  my $sql = "SELECT sum(done_job_count), sum(failed_job_count), sum(total_job_count), ".
712
            "sum(ready_job_count * analysis_stats.avg_msec_per_job)/1000/60/60 ".
713
            "FROM analysis_stats";
714
715
  my $sth = $self->prepare($sql);
  $sth->execute();
716
  my ($done, $failed, $total, $cpuhrs) = $sth->fetchrow_array();
717
  $sth->finish;
718
719
720
721
722
723
724

  $done   ||= 0;
  $failed ||= 0;
  $total  ||= 0;
  my $completed = $total
    ? ((100.0 * ($done+$failed))/$total)
    : 0.0;
725
726
727
  my $remaining = $total - $done - $failed;
  printf("hive %1.3f%% complete (< %1.3f CPU_hrs) (%d todo + %d done + %d failed = %d total)\n", 
          $completed, $cpuhrs, $remaining, $done, $failed, $total);
Leo Gordon's avatar
Leo Gordon committed
728
  return $remaining;
729
730
}

731

Leo Gordon's avatar
Leo Gordon committed
732
sub print_analysis_status {
Leo Gordon's avatar
Leo Gordon committed
733
    my ($self, $filter_analysis) = @_;
734

Leo Gordon's avatar
Leo Gordon committed
735
    my $list_of_analyses = $filter_analysis ? [$filter_analysis] : $self->db->get_AnalysisAdaptor->fetch_all;
Leo Gordon's avatar
Leo Gordon committed
736
    foreach my $analysis (sort {$a->dbID <=> $b->dbID} @$list_of_analyses) {
737
        $analysis->stats->print_stats();
Leo Gordon's avatar
Leo Gordon committed
738
    }
739
740
741
}


Leo Gordon's avatar
Leo Gordon committed
742
sub print_running_worker_counts {
743
    my $self = shift;
744

745
746
747
748
749
750
751
    my $sql = qq{
        SELECT logic_name, count(*)
        FROM worker w
        JOIN analysis_base USING(analysis_id)
        WHERE w.status!='DEAD'
        GROUP BY analysis_id
    };
Leo Gordon's avatar
Leo Gordon committed
752

753
754
755
756
757
758
759
760
761
762
763
    my $total_workers = 0;
    my $sth = $self->prepare($sql);
    $sth->execute();

    print "\n===== Stats of live Workers according to the Queen: ======\n";
    while((my $logic_name, my $worker_count)=$sth->fetchrow_array()) {
        printf("%30s : %d workers\n", $logic_name, $worker_count);
        $total_workers += $worker_count;
    }
    $sth->finish;
    printf("%30s : %d workers\n\n", '======= TOTAL =======', $total_workers);
764
}
765

766

767
768
769
770
771
772
773
774
775
776
777
=head2 monitor

  Arg[1]     : --none--
  Example    : $queen->monitor();
  Description: Monitors current throughput and store the result in the monitor
               table
  Exceptions : none
  Caller     : beekeepers and other external processes

=cut

Leo Gordon's avatar
Leo Gordon committed
778
sub monitor {
779
780
781
782
  my $self = shift;
  my $sql = qq{
      INSERT INTO monitor
      SELECT
783
          CURRENT_TIMESTAMP,
784
          count(*),
Leo Gordon's avatar
Leo Gordon committed
785
786
787
788
789
790
  }. ( ($self->dbc->driver eq 'sqlite')
        ? qq{ sum(work_done/(strftime('%s','now')-strftime('%s',born))),
              sum(work_done/(strftime('%s','now')-strftime('%s',born)))/count(*), }
        : qq{ sum(work_done/(UNIX_TIMESTAMP()-UNIX_TIMESTAMP(born))),
              sum(work_done/(UNIX_TIMESTAMP()-UNIX_TIMESTAMP(born)))/count(*), }
  ). qq{
791
          group_concat(DISTINCT logic_name)
792
793
794
      FROM worker w
      LEFT JOIN analysis_base USING (analysis_id)
      WHERE w.status!='DEAD'
795
  };
796
797
798
799
800
      
  my $sth = $self->prepare($sql);
  $sth->execute();
}

801

Leo Gordon's avatar
Leo Gordon committed
802
803
804
805
806
807
808
809
810
811
812
813
814
815
=head2 register_all_workers_dead

  Example    : $queen->register_all_workers_dead();
  Description: Registers all workers dead
  Exceptions : none
  Caller     : beekeepers and other external processes

=cut

sub register_all_workers_dead {
    my $self = shift;

    my $overdueWorkers = $self->fetch_overdue_workers(0);
    foreach my $worker (@{$overdueWorkers}) {
816
        $worker->cause_of_death( 'UNKNOWN' );  # well, maybe we could have investigated further...
Leo Gordon's avatar
Leo Gordon committed
817
818
819
820
        $self->register_worker_death($worker);
    }
}

821

822
sub suggest_analysis_to_specialize_by_rc_id_meadow_type {
823
    my $self                = shift;
824
    my $rc_id               = shift;
825
    my $meadow_type         = shift;
826

827
    my @suitable_analyses = @{ $self->db->get_AnalysisStatsAdaptor->fetch_all_by_suitability_rc_id_meadow_type( $rc_id, $meadow_type ) };
828

829
    foreach my $stats (@suitable_analyses) {
830

831
            #synchronize and double check that it can be run:
832
        $self->safe_synchronize_AnalysisStats($stats);
833
        return $stats if( ($stats->status ne 'BLOCKED') and ($stats->status ne 'SYNCHING') and ($stats->num_running_workers < $stats->num_required_workers) );
834
    }
835

836
    return undef;
837
838
839
}


Jessica Severin's avatar
Jessica Severin committed
840
1;