#! /usr/bin/perl -w

# vim:syntax=perl

use strict;

use vars qw/ %reports @report_order %inputs %reports_notavail $cfg
	     $dlf_count $start_time $end_time $time_field_idx
	     %fields_unavail $keep_temp_dlf /;

use lib '/usr/share/perl5';

use Lire::ReportConfig;
use Lire::Program qw( :msg tempfile $PROG $LR_ID );
use Lire::DataTypes qw( :basic :special :misc );
use Lire::AsciiDlf::AsciiDlfFactory;

use POSIX qw( strftime );
use Symbol;
use Fcntl;

use constant DEFAULT_MAX_MEMORY => "32Megs";

BEGIN {
    $keep_temp_dlf = 0;
    $ENV{LR_KEEP_TEMP_DLF} ||= "no";

    if ( check_bool( $ENV{LR_KEEP_TEMP_DLF}) ) {
	$keep_temp_dlf = eval_bool( $ENV{LR_KEEP_TEMP_DLF} );
    } else {
	lr_warn( "invalid boolean value in LR_KEEP_TEMP_DLF: ",
		 $ENV{LR_KEEP_TEMP_DLF} );
    }
}

my $debug = 0;
sub debug {
    $debug and lr_debug($_[0]);
}

sub load_report_cfg {
    my ( $superservice, $report_cfg ) = @_;

    my $factory = new Lire::AsciiDlf::AsciiDlfFactory;
    debug( "created Lire::AsciiDlf::AsciiDlfFactory object" );

    $cfg = eval { new_from_file Lire::ReportConfig( $superservice,
						    $report_cfg, $factory)};
    if ($@) {
        debug( "troubles when executing new_from_file Lire::ReportConfig " .
            "'$superservice', '$report_cfg', '$factory'" );
        lr_err( $@ );
    }
    debug( "load_report_cfg: executed new_from_file Lire::ReportConfig on " .
        "'$superservice', '$report_cfg' and a factory" );

    # Fill %reports and @report_orders based on the config file
    foreach my $section ( $cfg->sections ) {
	my @filters = map { $_->filter_spec } $section->filters;
	my @reports = $section->reports;

        debug( "load_report_cfg: processing reports in section from cfg" );
	foreach my $r ( @reports ) {
	    if ( @filters ) {
		my $expr;
		if ( $r->filter_spec || @filters > 1) {
		    $expr = $factory->create_and_expr( container => $r );
		    if ( $r->filter_spec ) {
			$expr->expr( [ @filters, $r->filter_spec ] );
		    } else {
			$expr->expr( [@filters] );
		    }
		} else {
		    $expr = $filters[0];
		}
		$r->filter_spec( $expr );
	    }

	    push @report_order, $r->key;
	    $reports{$r->key} = $r;
	}
    }
}

sub count_dlf_records {
    my ( $schema, $fh ) = @_;

    # Count the number of DLF records in that file. This adds insignificant
    # overhead compare to the time it take to compute the report
    $dlf_count = 0;
    my $last_dlf;
    while (<$fh>) {
	$last_dlf = $_;
	unless ($dlf_count) {
	    # First record, save the time stamp
	    chomp;
	    my $dlf = [split /\s+/];
	    $start_time = $dlf->[$time_field_idx];

            if (@$dlf != $schema->field_count) {
                debug( "count_dlf_records: choking on line '$_'" );
	        lr_crit( "DLF record has only ", scalar @$dlf, 
                     " fields when it should have ", $schema->field_count );
            }

	    my @unavail = ();
	    for ( my $i = 0; $i < @{$schema->fields}; $i++) {
		push @unavail, $schema->field_by_pos( $i )->name
		  if ($dlf->[$i] eq 'LIRE_NOTAVAIL' );
	    }
	    %fields_unavail = map { $_ => 1 } @unavail;
	}
	$dlf_count++;
    }
    # Rewind the DLF file
    seek $fh, 0, 0
      or lr_err( "seek: $!" );

    if (defined $last_dlf) {
        chomp $last_dlf;
	my $dlf = [split /\s+/, $last_dlf];
	lr_crit( "DLF record has only ", scalar @$dlf, " fields when it ",
		 "should have ", $schema->field_count )
	  if @$dlf != $schema->field_count;
        $end_time = $dlf->[$time_field_idx];
    } else {
        # DLF was probably empty
        $start_time = 'LIRE_NOTAVAIL';
        $end_time = 'LIRE_NOTAVAIL';
    }

    if ( $start_time eq 'LIRE_NOTAVAIL' || $end_time eq 'LIRE_NOTAVAIL' ) {
	# Invariant check
	lr_crit( "start_time and end_time should be LIRE_NOTAVAIL. Something is seriously broken" )
	  if $start_time ne $end_time;
	$start_time = 0;
	$end_time   = 0;
	lr_info( "DLF contains $dlf_count records; start and end time are unavailable" );
    } else {
	lr_info ( "DLF contains $dlf_count records; starts on ", 
		  strftime( "%Y-%m-%d %H:%M:%S", localtime $start_time ),
		  "; ends on ", 
		  strftime( "%Y-%m-%d %H:%M:%S", localtime $end_time ),
		);
    }
    lr_info( "unavailable DLF fields: ", join( ", ", keys %fields_unavail) )
      if keys %fields_unavail;
}

sub init_reports {
    my ($superservice) = @_;
    %inputs = ();

    debug ("init_reports: initializing reports '@report_order'");

    # Initialize the reports
    foreach my $report ( @report_order) {
	lr_info( "initializing report '$report'");
	my $report_spec = $reports{$report};
	eval {
	    $report_spec->init_report();
	    my $input  = add_schema_to_input( $report_spec->schema );
	    my $filter_spec = $report_spec->filter_spec;
	    if ( defined $filter_spec ) {
		my $filter = add_filter_to_input( $input, $filter_spec );
		push @{$filter->{reports}}, $report;
	    } else {
		push @{$input->{reports}}, $report;
	    }
	};
	if ( $@ ) {
	    lr_warn( "$@\n$report will be skipped");
	    delete $reports{$report};
	}
    }
    eval {
        create_secondary_inputs( $inputs{$superservice} ) 
    };

    if ( $@ ) {
        lr_warn( "trouble executing create_secondary_inputs: $@" );
    }
}

sub add_schema_to_input {
    my ( $schema ) = @_;

    my $id = $schema->id;
    return $inputs{$id} if exists $inputs{$id};

    my $fh;
    my $filename;
    my $count;
    if ( $schema->isa( "Lire::DerivedSchema" ) ) {
	my $base = $schema->base;
	add_schema_to_input( $base );

	if ( $keep_temp_dlf ) {
	    ($fh, $filename) = tempfile( "$PROG.$LR_ID.$id.XXXXXX",
					 SUFFIX => '.dlf' );
	} else {
	    $fh = tempfile();
	}
	$count = 0;
	push @{$inputs{$base->id}{derived}}, $id;
    } elsif ( $schema->isa( "Lire::ExtendedSchema" ) ) {
	my $base = $schema->base;
        add_schema_to_input( $base );

	if ( $keep_temp_dlf ) {
	    ($fh, $filename) = tempfile( "$PROG.$LR_ID.$id.XXXXXX",
					 SUFFIX => '.dlf' );
	} else {
	    $fh = tempfile();
	}
	$count = 0;
	push @{$inputs{$base->id}{extensions}}, $id;
    } elsif ( $schema->isa( "Lire::DlfSchema" ) ) {
	$fh = \*DLF;
	$count = $dlf_count;
    }

    $inputs{$id} =
      { name       => $id,
	fh	   => $fh,
	filename   => $filename,
	reports    => [],
	filters    => {},
	record_count => $count,
	extensions => [], # Input sources that are derived using
	                  # the Lire::AsciiDlf::ExtendedFieldsCreator interface
	derived	   => [], # Input sources that are derived using
	                  # the Lire::AsciiDlf::DerivedRecordsCreator interface
      }
}

sub add_filter_to_input {
    my ( $input, $filter_spec ) = @_;

    my $filter_id = $filter_spec->id();
    return $input->{filters}{$filter_id}
      if exists $input->{filters}{$filter_id};

    $input->{filters}{$filter_id} = { name    => $filter_id,
				      filter  => $filter_spec->compile(),
				      reports => [],
				    };
}

sub cancel_input {
    my ($input) = @_;

    foreach my $r ( @{$input->{reports}} ) {
	lr_notice( "report '$r' will be skipped because schema '$input->{name}' isn't available");
	delete $reports{$r};
    }

    foreach my $f ( values %{$input->{filters}} ) {
	foreach my $r ( @{$f->{reports}} ) {
	    lr_notice( "report '$r' will be skipped because schema '$input->{name}' isn't available");
	    delete $reports{$r};
	}
    }
    delete $inputs{$input->{name}};

    foreach my $i ( (@{$input->{extensions}}, @{$input->{derived}} ) ) 
    {
	cancel_input( $i );
    }
}

sub create_secondary_inputs {
    my ( $input ) = @_;

    my $schema = Lire::DlfSchema::load_schema( $input->{name} );
    my @extended = grep { check_schema_fields( $_ ) } 
      map { Lire::DlfSchema::load_schema( $_ ) } @{$input->{extensions}};
    my @derived  = grep { check_schema_fields( $_ ) } 
      map { Lire::DlfSchema::load_schema( $_ ) } @{$input->{derived}};

    return unless @extended || @derived;

    lr_info( "creating secondary DLF sources for '$input->{name}' schema:" );
    lr_info( "  extended schemas :  ", join( ", ", map { $_->id } @extended) )
      if @extended;

    foreach my $e ( (@extended) ) {
	eval {
	    debug( "will keep temporary DLF for schema ", $e->id,
		      " in file ", $inputs{$e->id}{filename} )
	      if ($keep_temp_dlf);

	    $e->module->init_computation;
	};
	if ( $@ ) {
	    lr_warn( $@ );
	    cancel_input( $inputs{$e->id});
	}
    }

    lr_info( "  derived schemas :  ", join( ", ", map { $_->id } @derived) )
      if @derived;
    foreach my $d ( (@derived) ) {
	my $derived_input = $inputs{$d->id};

	my $fh = create_sort_filter( $d, $derived_input );
	$derived_input->{writer_cb} = sub {
	    foreach my $f ( @{$_[0]} ) {
		Lire::DlfSchema::ascii_dlf_escape_field( $f )
	    }
	    print $fh join( " ", @{$_[0]} ), "\n";
	    $derived_input->{record_count}++;
	};
	eval {
	    $d->module->init_computation( $derived_input->{writer_db} );
	};
	if ( $@ ) {
	    lr_warn( $@ );
	    cancel_input( $derived_input );
	}
    }

    my $line;
    my $count = 0;
    my $fh = $input->{fh};
    my $field_count = $schema->field_count;
    while (defined($line = <$fh>)) {
	chomp $line;
	my $dlf = [split /\s+/, $line];
        if (@$dlf != $field_count) {
            debug( "create_secondary_inputs: choking on line '$line'" );
            lr_crit( "DLF record has only ", scalar @$dlf, " fields when it ",
		 "should have ", $field_count );
        }

	foreach my $e ( @extended ) {
	    my $extended_input = $inputs{$e->id};
	    next unless $extended_input;
	    my $fields = eval {
		$e->module->create_extended_fields( $dlf );
	    };
	    if ( $@ ) {
		lr_warn( $@ );
		cancel_input( $inputs{$e->id} );
	    }

	    my $fh = $extended_input->{fh};
	    foreach my $f ( @$fields ) {
		Lire::DlfSchema::ascii_dlf_escape_field( $f )
	    }
	    print $fh join( " ", @$dlf, @$fields ), "\n";
	}

	foreach my $d ( @derived ) {
	    my $derived_input = $inputs{$d->id};
	    next unless $derived_input;
	    eval {
		$d->module->dlf_record( $dlf, $derived_input->{writer_cb} );
	    };
	    if ( $@ ) {
		lr_warn( $@ );
		cancel_input( $inputs{$d->id} );
	    }

	}

	$count++;
	lr_info( sprintf( "%.2f%%", $count*100 / $input->{record_count} ),
		 " of DLF secondary sources of '$input->{name}' completed" )
	  unless $count % 10_000;
    }

    foreach my $e ( (@extended) ) {
	my $ext_input = $inputs{$e->id};
	next unless $inputs{$e->id};
	eval {
	    $e->module->end_computation;
	    $ext_input->{record_count} = $input->{record_count};
	    lr_info( "$ext_input->{record_count} records in '$ext_input->{name}' extended schema" );
	};
	if ( $@ ) {
	    lr_warn( $@ );
	    cancel_input( $inputs{$e->id});
	}
    }
    foreach my $d ( (@derived) ) {
	my $derived_input = $inputs{$d->id};
	next unless $derived_input;
	eval {
	    $d->module->end_computation( $derived_input->{writer_cb} );
	    lr_info( "$derived_input->{record_count} records in '$derived_input->{name}' derived schema" );
	    delete $derived_input->{writer_cb}; # Close sort fh
	    waitpid $derived_input->{sort_pid}, 0
	      or croak( "waitpid failed: $!");
	};
	if ( $@ ) {
	    lr_warn( $@ );
	    cancel_input( $derived_input );
	}
    }

    # Rewind all file handles
    seek $input->{fh}, 0, 0
      or lr_err( "seek: $!" );

    foreach my $s ( (@extended, @derived) ) {
	my $i = $inputs{$s->id};
	next unless $i;

	seek $i->{fh}, 0, 0
	  or lr_err( "seek: $!" );
    }

    # Recurse
    foreach my $s ( (@extended, @derived) ) {
	my $i = $inputs{$s->id};
	next unless $i;

	create_secondary_inputs( $i );
    }
}

sub check_schema_fields {
    my ( $schema ) = @_;

    my @missings;
    foreach my $f ( @{$schema->required_fields} ) {
	push @missings, $f
	  if $fields_unavail{$f};
    }

    if (@missings) {
	lr_notice( "can't compute schema '", $schema->id, 
		   "' because some fields are unavailable: ", 
		   join( ", ", @missings) );
	cancel_input( $inputs{$schema->id} );
	return 0;
    } else {
	return 1;
    }
}

sub create_sort_filter {
    my ( $schema, $input ) = @_;

    my ( $reader, $writer ) = (gensym,gensym);
    pipe $reader, $writer
      or croak( "pipe failed: $!" );
    my $pid = fork;
    croak( "fork failed: $!")
      unless defined $pid;
    
    if ($pid == 0 ) {
	# Child
	close $writer;

	fcntl( $reader, F_SETFD, 0 )
	  or croak( "error removing close on exec flag: $!");

	open ( STDIN, "<&" . fileno $reader )
	  or lr_err( "error redirecting stdin: $!" );
	open ( STDOUT, ">&" . fileno $input->{fh} )
	  or lr_err( "error redirecting stdout: $!" );
	exec ( "sort", "+" . $schema->timestamp_field->pos . "n" )
	  or lr_err( "exec failed: $!" );
    } else {
	# Parent
	close $reader;
	
	$input->{sort_pid} = $pid;

	return $writer;
    }
}

# The sequential algorithm has a lower memory footprint
# at the cost of longer processing time. Here are some 
# stats comparing the two algorithm 
#
# TIME = USER+SYSTEM
#		RECORDS    TIME       RSS
# Parallel	  62672   95.67    20404K
# Sequential      62672  123.48    12068K
#
# Parallel       178748  266.83    37532K
# Sequential     178748  345.54    21768K

# Improvements	    RSS	      TIME
#  62672	  40.9%     -29.1%
# 178748	  42.0%     -29.5%
#
# Speed trade-off is probably constant. Memory saving
# will probably depends on the kind of report generated. 
# Reports that group a lot of key will gain more, whereas
# reports that have a limited number of key (i.e. http_method, os)
# won't see any improvements.
sub compute_reports_sequential {

    my $report_count = @report_order;
    my $report_no = 1;
  REPORT:
    foreach my $report ( @report_order ) {
	lr_info( "computing report '$report' ($report_no / $report_count)" );
	my $report_spec = $reports{$report};
	next unless defined $report_spec; # Skipped failed reports

	my ($input, $filter);
	# Find the input of this report_spec
      INPUT:
	foreach my $i ( values %inputs ) {
	    # Look into reports
	    foreach my $r ( @{$i->{reports}}) {
		if ($r eq $report) {
		    $input = $i;
		    last INPUT;
		}
	    }

	    # Look in filters
	    foreach my $f ( values %{$i->{filters}} ) {
		foreach my $r ( @{$f->{reports}}) {
		    if ( $r eq $report ) {
			$filter = $f->{filter};
			$input = $i;
			last INPUT;
		    }
		}
	    }
	}

	unless (defined $input) {
	    lr_warn( "can't find '$report' report in \%inputs" );
	    delete $reports{$report};
	    next REPORT;
	}
	  
	my $fh = $input->{fh};
	my $line;
	my $first_record = 1;
	my $count = 0;
	my $schema = Lire::DlfSchema::load_schema( $input->{name} );
	my $field_count = $schema->field_count;
      LINE:
	while (defined($line = <$fh>)) {
	    chomp $line;

	    $count++;
	    lr_info( sprintf( "%.2f%%", $count*100 / $input->{record_count} ),
		     " of report '$report' computed" )
	      unless $count % 10_000;

	    my $dlf = [split /\s+/, $line];
            if (@$dlf != $field_count) {
                debug( "compute_reports_sequential: choking on line '$line'" );
                lr_crit( "DLF record has only ", scalar @$dlf,
                    " fields when it should have ", $field_count );
            }

	    if ( $first_record ) {
		next REPORT
		  unless check_report_fields( $report, $dlf );
		$first_record = 0;
	    }

	    # Skip unwanted record
	    if (defined $filter ) {
		next unless $filter->( $dlf );
	    }

	    eval {
		$report_spec->update_report( $dlf );
	    };
	    if ( $@ ) {
		lr_warn( "$@\n$report will be skipped");
		delete $reports{$report};
		next REPORT;
	    }
	}
	# Rewind the input
	seek $fh, 0, 0
	  or lr_err( "seek: $!" );

	lr_info( "completing report '$report'" );
	eval {
	    $report_spec->end_report();
	};
	if ($@) {
	    lr_warn( "$@\n$report failed");
	    delete $reports{$report};
	}

	$report_no++;
    }
}

sub check_report_fields {
    my ( $report, $dlf ) = @_;

    my $report_spec = $reports{$report};
    return 0 unless $report_spec;

    # Determine reports for which a field is LIRE_NOTAVAIL
    # INVARIANT: if a field isn't LIRE_NOTAVAIL in the first record,
    # no record will have LIRE_NOTAVAIL as a value for this field
    my @fields = $report_spec->needed_fields();
    my @missings = ();
    foreach my $f ( @fields ) {
	my $idx = $f->pos;
	if ( $dlf->[$idx] eq "LIRE_NOTAVAIL" ) {
	    push @missings, $f->name;
	}
    }

    if (@missings) {
	lr_notice( "report '$report' will be skipped because some ",
		   "required fields are unavailable: ",
		   join( ", ", @missings));
	delete $reports{$report};
	$reports_notavail{$report} = join ", ", @missings;
	return 0;
    } else {
	return 1;
    }
}

sub compute_reports_parallel {
    my ($input) = @_;

    return unless $input;

    my $fh	= $input->{fh};
    my $reports = $input->{reports};
    my $filters = $input->{filters};
    my $record_count = $input->{record_count};

    lr_info( "computing reports from DLF source '$input->{name}'" );

    my $line;
    my $first_record = 1;
    my $count = 0;
    my $schema = Lire::DlfSchema::load_schema( $input->{name} );
    my $field_count = $schema->field_count;
    while (defined($line = <$fh>)) {
	chomp $line;

	$count++;

	lr_info( sprintf( "%.2f%%", $count*100 / $record_count ),
		 " of DLF source '$input->{name}' processed" )
	  unless $count % 10_000;
	my $dlf = [split /\s+/, $line];
        if (@$dlf != $field_count) {
            debug ( "compute_reports_parallel: choking on line '$line'" );
            lr_crit( "DLF record has only ", scalar @$dlf, " fields when it ",
		 "should have ", $field_count );
        }
        
	if ( $first_record ) {
	    foreach my $report ( (@$reports,
				  map { @{$_->{reports}} } values %$filters ))
	      {
		  check_report_fields( $report, $dlf );
	      }
	    $first_record = 0;
	}

	# Compute reports which aren't using a filter
	foreach my $report ( @$reports ) {
	    my $report_spec = $reports{$report};
	    next unless defined $report_spec; # Skipped failed reports
	    eval {
		$report_spec->update_report( $dlf );
	    };
	    if ( $@ ) {
		lr_warn( "$@\n$report will be skipped");
		delete $reports{$report};
	    }
	}

	# Compute each reports using a filter
	foreach my $f ( values %$filters) {
	    next unless $f->{filter}->( $dlf );

	    foreach my $report ( @{$f->{reports}} ) {
		my $report_spec = $reports{$report};
		next unless defined $report_spec; # Skipped failed reports
		eval {
		    $report_spec->update_report( $dlf );
		};
		if ( $@ ) {
		    lr_warn( "$@\n$report will be skipped");
		    delete $reports{$report};
		}
	    }
	}
    }
    close $fh;
    lr_info( "processed $count records in DLF source '$input->{name}'" );

    # Finalize the reports
    foreach my $report ( (@$reports,
			  map { @{$_->{reports}} } values %$filters )) 
    {
	my $report_spec = $reports{$report};
	next unless defined $report_spec; # Skipped failed reports

	lr_info( "completing report '$report'" );
	eval {
	    $report_spec->end_report();
	    $reports{$report} = $report_spec;
	};
	if ($@) {
	    lr_warn( "$@\n$report will be skipped");
	    delete $reports{$report};
	}
    }

    foreach my $i ( (@{$input->{extensions}}, @{$input->{derived}}) )
    {
	compute_reports_parallel( $inputs{$i} );
    }
}

sub write_reports {
    # Output the reports
    my $time  = strftime '%Y-%m-%d %H:%M:%S %Z', localtime;
    my $stime = strftime '%Y-%m-%d %H:%M:%S %Z', localtime $start_time;
    my $etime = strftime '%Y-%m-%d %H:%M:%S %Z', localtime $end_time;

    # FIXME: We explicitely set the encoding to us-ascii because
    # it won't work otherwise. Parsers return data in UTF-8. We need
    # to handle the conversion.
    print  <<EOF;
<?xml version="1.0" encoding="us-ascii"?>
<!DOCTYPE lire:report PUBLIC
  "-//LogReport.ORG//DTD Lire Report Markup Language V1.0//EN"
  "http://www.logreport.org/LRML/1.0/lire.dtd">
<lire:report xmlns:lire="http://www.logreport.org/LRML/"
 date="$time">
<!-- generated by lr_dlf2xml(1) -->
 <lire:timespan>$stime - $etime</lire:timespan>
EOF

    foreach my $section ( $cfg->sections ) {
	print " <lire:section>\n";
	print "  <lire:title>", $section->title, "</lire:title>\n";
	my $desc = $section->expanded_display_description;
	print "  <lire:description>\n   ", $desc, "  </lire:description>\n"
	  if ( $desc );

	foreach my $report ( map { $_->key } $section->reports ) {
	    my $report_spec = $reports{$report};
	    if ( exists $reports_notavail{$report} ) {
		print qq{  <!-- report "$report" was skipped because of missing fields : $reports_notavail{$report} -->\n\n};
	    } else {
		next unless defined $report_spec; # Skipped failed reports
		lr_info( "generating XML for report '$report'" );
		$report_spec->write_report( \*STDOUT, 2 );
	    }
	}
	print " </lire:section>\n";
    }
    print "</lire:report>\n";
}

lr_err( "Usage: $PROG <superservice> <report_cfg_file> <dlf_file>" )
  unless @ARGV == 3;

my ( $superservice, $report_cfg, $dlf_file ) = @ARGV;

# Open the DLF file
open DLF, $dlf_file
  or lr_err( "can't open $dlf_file: $!" );

my $compute_strategy;
# Determine if we are going to process that 
# report parrallely or sequentially.
my $max_size = defined $ENV{LR_MAX_MEMORY} ? size2bytes( $ENV{LR_MAX_MEMORY} )
  : size2bytes( DEFAULT_MAX_MEMORY );
if ( -s DLF > $max_size ) {
    $compute_strategy = \&compute_reports_sequential;
    lr_info( "will compute reports sequentially");
} else {
    $compute_strategy = \&compute_reports_parallel;
    lr_info( "will compute reports in parallel");
}
my $schema = eval { Lire::DlfSchema::load_schema( $superservice ) };
if ($@) {
    debug( "troubles excuting Lire::DlfSchema::load_schema on " .
        "'$superservice'" );
    lr_err( $@ );
}
debug( "executed Lire::DlfSchema::load_schema on '$superservice'" );

$time_field_idx = $schema->timestamp_field->pos;
debug( "inspected time_stamp_field" );

load_report_cfg( $superservice, $report_cfg );
debug( "executed load_report_cfg on '$superservice', '$report_cfg'" );

count_dlf_records( $schema, \*DLF );
debug( "executed count_dlf_records" );

init_reports( $superservice );
debug( "executed init_reports on '$superservice'" );

$compute_strategy->( $inputs{$superservice} );
write_reports();

if ( $ENV{ARCHIVE} ) {
    my $lr_time = strftime( "%Y%m%d%H%M%S", localtime $end_time ) . "-" .
      strftime( "%Y%m%d%H%M%S", localtime $end_time );
    # Save timespan in the archive
    lr_info( "gonna run lr_db_store $LR_ID time_span $lr_time" );
    system( "lr_db_store", $LR_ID, "time_span", $lr_time );
    lr_err "lr_db_store failed"
      if ( $? ne 0 );
}
exit 0;

# Local Variables:
# mode: cperl
# End:

__END__

=pod

=head1 NAME

lr_dlf2xml - generate a XML report from a dlf file

=head1 SYNOPSIS 

B<lr_dlf2xml> I<superservice> I<report_cfg_file> I<dlffile>

=head1 DESCRIPTION

B<lr_dlf2xml> reads a dlf file, and prints a generated XML report to stdout.

It stores the dlf file's timespan in the Lire database, by running
lr_db_store(1).

It inspects the I<report_cfg_file> (e.g. .../etc/lire/email.cfg) to find
names of reports and associated settings.

The environment variable LR_ID is used in debug messages printed to stderr.
The directory stored in the  environment variable TMPDIR, as set in defaults,
is used to create tmp files in.

This script is called by lr_log2report(1).

=head1 SEE ALSO

lr_log2report(1), documentation in the Lire User Manual

=head1 VERSION

$Id: lr_dlf2xml.in,v 1.70 2002/02/03 21:57:05 flacoste Exp $

=head1 COPYRIGHT

Copyright (C) 2001 Stichting LogReport Foundation LogReport@LogReport.org
 
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
 
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with this program (see COPYING); if not, check with
http://www.gnu.org/copyleft/gpl.html or write to the Free Software 
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111, USA.

=head1 AUTHOR

Francis J. Lacoste <flacoste@logreport.org>

=cut


