Commit c848696d authored by Matthieu Muffato's avatar Matthieu Muffato
Browse files

POD coverage is currently not an objective

parent ea1fcec0
use strict;
use warnings;
use Test::More;
use lib 't/lib';
use Hive::Config;
use File::Spec::Functions qw{catdir};
diag "Testing for Plain Old Documentation (POD) validity...";
eval "use Test::Pod 1.00";
plan skip_all => "Test::Pod 1.00 required for testing POD" if $@;
## use this as filtering of source might occur
my @pod_dirs = map { catdir $ENV{EHIVE_ROOT_DIR}, 'blib', $_ } qw{lib script};
# diag "all pod files in @pod_dirs";
all_pod_files_ok( all_pod_files( @pod_dirs ) );
use strict;
use warnings;
use Test::More;
diag( "Testing for Plain Old Documentation (POD) coverage..." );
eval "use Test::Pod::Coverage 1.00";
plan skip_all => "Test::Pod::Coverage 1.00 required for testing POD coverage" if $@;
my @allowed = qw{
accu_hash
accu_id_stack
accumulated
add
add_new_or_update
analysis_capacity
attempted_jobs
autoinc_id
available_capacity
available_meadow_hash
avg_input_msec_per_job
avg_msec_per_job
avg_output_msec_per_job
avg_run_msec_per_job
batch_size
behaviour
born
cached_name
can_be_empty
can_respecialize
cause_of_death
check_blocking_control_rules
check_for_dead_workers
check_in_worker
check_object_present_in_db_by_content
check_worker_is_alive_and_mine
cluster_2_nodes
collection
colour_offset
colour_scheme
column_set
complete_early
completed
config
config_get
config_hash
config_set
context
continue
control_rules_collection
count_active_roles
count_all
count_pending_workers_by_rc_name
count_running_workers
create_cached_dba
csvq
current_role
dataflow
dataflow_output_id
dataflow_rules
dataflow_rules_collection
db
dbc
debug
decrease_required_workers
decrease_running_workers
decrease_semaphore_count_for_jobid
default_config_files
default_input_column_mapping
default_insertion_method
default_options
default_overflow_limit
default_table_name
description
determine_status
died
died_somewhere
disconnect_count
display_name
display_subgraph
done_job_count
done_jobs
enter_status
execute_writes
failed_job_count
failed_job_tolerance
fan_cache
fetch_all
fetch_all_finished_roles_with_unfinished_jobs
fetch_all_incomplete_jobs_by_role_id
fetch_by_dbID
fetch_by_url_query
fetch_input
fetch_input_ids_for_job_ids
fetch_job_counts_hashed_by_status
fetch_last_unfinished_by_worker_id
fetch_overdue_workers
fetch_some_by_analysis_id_limit
fetch_structures_for_job_ids
final_decision
finalize_role
find_all_by
find_all_sql_schema_patches
find_available_meadow_responsible_for_worker
find_one_by
get
get_adaptor
get_available_adaptors
get_available_meadow_list
get_code_sql_schema_version
get_code_version
get_compiled_module_name
get_current_worker_process_id
get_default_meadow
get_elapsed
get_hive_current_load
get_meadow_capacity_hash_by_meadow_type
get_or_estimate_batch_size
get_pending_worker_counts_by_meadow_type_rc_name
get_report_entries_for_process_ids
get_report_entries_for_time_interval
get_role_rank
get_row_count
get_sql_schema_patches
get_stderr_redirector
get_stdout_redirector
get_value_by_key
go_figure_dbc
grab_memory
hash_leaves
hive_auto_rebalance_semaphores
hive_capacity
hive_meta_table
hive_use_param_stack
hive_use_triggers
host
incomplete
increase_required_workers
increase_running_workers
increase_semaphore_count_for_jobid
init
init_collections
inprogress_job_count
input_capacity
input_column_mapping
input_id
insertion_method
interval_workers_with_unknown_usage
is_counting
is_fully_substituted_string
is_fully_substituted_structure
job_array_common_name
job_count_breakout
job_name_prefix
jobs_collection
keys_to_columns
kill_worker
last_check_in
last_update
lethal_for_worker
life_cycle
life_span_limit_reached
lifespan_stopwatch
list
listref
load_cmdline_options
load_collections
load_from_json
logic_name
mark_stored
max_retry_count
meadow_class_path
meadow_name
meadow_type
merge
merge_from_rules
min_batch_time
module
more_work_done
multiplier
mysql_conn
mysql_conn_from_dbc
mysql_dbname
name
new
num_required_workers
num_running_workers
o
object_class
objectify
original_capacity
output_capacity
overflow_limit
overridable_pipeline_create_commands
param
param_defaults
param_id_stack
param_is_defined
param_required
param_substitute
parameters
parse
parse_report_source_line
parse_underscored_id_name
pause
perform_cleanup
pipeline_analyses
pipeline_create_commands
pipeline_name
pipeline_url
pop
pre_options
preliminary_offer
prepare
prev_job_error
primary_key
primary_key_constraint
print_active_role_counts
priority
process_id
process_options
protected_prepare_execute
push
query_count
reached
ready_job_count
recalculate_from_job_counts
refresh
register_attempt
register_worker_death
release_and_age_job
remove
remove_all
report_versions
resource_classes
responsible_for_worker
restart
retry_count
retry_throwing_jobs
role_id
root
run
run_one_batch
run_pipeline_create_commands
runnable_object
runtime_msec
save_collections
say_with_header
schedule_workers
schedule_workers_resync_if_necessary
scheduler_say
seconds_since_last_update
semaphore_count
semaphored_job_count
semaphored_job_id
set
set_and_update_status
set_default_meadow_type
signature
signature_template
slicer
sort_stats_by_suitability
special_batch
specialize_and_compile_wrapper
sql
start_job_output_redirection
status
status_of_all_our_workers
stderr_file
stdout_file
stop_job_output_redirection
store
store_if_needed
store_job_message
store_or_update_one
store_resource_usage
store_worker_message
strict_hash_format
struct_name
submission_cmd_args
submit_workers
substitute
suggest_analysis_to_specialize_a_worker
sync_lock
table_name
throw
timeout
toString
total_job_count
transient_error
type
unikey
updatable_column_list
update
update_status
url
url2dbconn_hash
use_cases
useful_commands_legend
warning
when_finished
when_started
whereami
work_done
worker_cmd_args
worker_say
worker_temp_directory_name
};
my $regex = join '|', @allowed;
all_pod_coverage_ok({ trustme => [qr/^($regex)$/] }, 'documentation coverage');
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment