Commit d9e38f1b authored by Olav Kvittem's avatar Olav Kvittem
Browse files

added autostart to remote scripts

parent 46354fab
......@@ -16,6 +16,8 @@ crontab dragonlab/etc/crontab.cfg
##on server
rsync -rvt dragonlab user@machine:
Enter node in config files
* /dynga/dragonlab/script/mp-names.txt
* /dynga/dragonlab/script/rude-list.con
......@@ -29,6 +31,6 @@ cd tmp
# get password-free code out there
rsync /dynga/dragonlab/script/authorized_keys oak@104.196.241.36:.ssh
../server/script/rude-config-maker --list /dynga/dragonlab/script/mp-names.txt/dynga/dragonlab/script/rude-list.conf
../server/script/rude-config-maker --list /dynga/dragonlab/script/mp-names.txt /dynga/dragonlab/script/rude-list.conf
../server/script/rude-config-updater --list /dynga/dragonlab/script/mp-names.txt | sh -x
~~~~~
#!/bin/bash
# start measurement jobs if not running
export USER=$(whoami)
source $HOME/dragonlab/etc/start.cfg
stop=no
if test $# -gt 0; then
stop=yes
fi
T=$(env TZ=Europe/Oslo date +%T)
Tm=$(env TZ=Europe/Oslo date +%H:%M)
mkdir -p $logpath/$date
if test $Tm = "00:00" -o $stop = yes; then
pkill -u $USER -f $crude || echo crude kill failed
pkill -u $USER -f $rude || echo rude kill failed
pkill -u $USER -f $trace || echo trace kill failed
pkill -u $USER -f $tcptrace || echo tcptrace kill failed
# pkill -u $USER -f $trace6 || echo trace6 kill failed
pkill -u $USER -f $vmstat || echo vmstat kill failed
sleep 3 # allow previous days proceses to die
fi
if ! pgrep -u $USER -f $crude >/dev/null; then
$crude -k -p $crude_port | gzip -c > $logpath/$date/crude-$T.gz 2> /dev/null&
fi
if ! pgrep -u $USER -f $rude >/dev/null; then
$rude -s $rudecfg > /dev/null 2> /dev/null &
fi
if ! pgrep -u $USER -f $trace >/dev/null; then
$HOME/dragonlab/bin/start-trace.sh
fi
if ! pgrep -u $USER -f $tcptrace >/dev/null; then
$HOME/dragonlab/bin/start-tcptrace.sh
fi
if ! pgrep -u $USER -f $vmstat >/dev/null; then
$vmstat 2> /dev/null > /dev/null&
fi
#!/bin/bash
# kill mp-rude measurements at midnight CET
source $HOME/dragonlab/etc/start.cfg
T=$(env TZ=Europe/Oslo date +%T)
if test $T = "00:00:00"; then
pkill -f $crude
pkill -f $rude
pkill -f $trace
pkill -f $vmstat
fi
#!/usr/bin/perl
# use PDL;
# use PDL::Ops;
# use PDL::Fit::Polynomial;
use Socket;
use Statistics::LineFit;
# date --date 'jan 1 2000' +%s
$min_tx=946681200;
$max_tx=1893452400; # 2030-01-01
$maxseqreorder=1000; #
$max_small_gap=10; # the max size of a small graph
$max_small_graphs=20;
$max_big_graphs=20;
$late_delay=3; # seconds to doom a packet late
require "newgetopt.pl";
$usage="$0 '[-title text] [-minloss n] [-win n] [-graph file] [max-small-graphs n] [-outdir dir] [-head|-rhead] [-id id] [-names file] [-json file] [-v] [file]...
Analyse gaps in a crude packet log
- output a list of statistical qos parameters as text or json
- make linear regression to see the delay trend around a gap
- make curves to show the delay change before and after a gap
Parameters
-rhead n - output headers so that R can make the headings in the text tables
-max-small-graphs n - limit number(20) of graphs to output from small few packet losses
-slep n - number(1000) of crude lines in the circular buffer
-json file - filename to store json event documents (intended for logstash ?)
";
&NGetOpt( 'h', 'help', 'id=s', 'slep=s', 'minloss=s', 'win=s', 'max-small-graphs=s', 'head', 'rhead', 'graph=s', 'outdir=s', 'title=s', 'names=s', 'json=s', 'v') || die "$!" . $usage ."\n";
if ( $opt_h || $opt_help) {
printf "$usage\n";
exit(0);
}
my @heads= qw/id date time tunix x1 nloss tloss seqloss x2 seqtail overlap x3 h_n h_jit h_ddelay h_delay h_min_d h_slope_10 h_slope_20 h_slope_30 h_slope_40 h_slope_50 x4 t_n t_jit t_ddelay t_delay t_min_d t_slope_10 t_slope_20 t_slope_30 t_slope_40 t_slope_50/;
if ($opt_rhead){
my @a=split(" ", $head);
my $h="";
foreach $a (@heads ){
$h.='"'.$a.'", ' }
chop($h); chop($h); # remove ', '
printf 'head<-c(' . $h . ")\n";
#id", "date", "time", "tunix", "x1", "nloss", "tloss", "seqloss", "x2", "seqtail", "overlap", "x3", "head_n", "head_jit", "head_ddelay", "head_delay", "head_slope", "x4", "tail_n", "tail_jit", "tail_ddelay", "tail_delay", "tail_slope_10", "tail_slope_20")'; printf "\n";
exit 0;
}
my %hix=(); # hash on name to index in @heads
foreach $i(0..$#heads){
$hix{$heads[$i]}=$i;
}
my $coder; # json coder
if ( $opt_json){
require JSON::XS;
my $json=$opt_json;
open JSON, ">$json" || die "Could not open $json ; $!";
$coder = JSON::XS->new->ascii->pretty->allow_nonref;
$encoder=$coder->canonical([1]);
}
if ($opt_graph){
require Chart::Clicker;
require Chart::Clicker::Data::Series;
require Chart::Clicker::Data::DataSet;
require Chart::Clicker::Renderer::Point;
}
if ( $opt_names){
get_names($opt_names);
}
$maxslep=$opt_slep || 1000 ;
$maxhead=$opt_win || 10; # packets to keep before
$maxtail=$opt_win || 10; # packets to keep after
$min_slopes=5; # slopes to report on text report
$minloss= $opt_minloss || 1;
$minrecover = $opt_recover || 5;
$outdir=$opt_outdir || ".";
$title=$opt_title || 'Delay';
$bv_fmt='^([\d]+)\s+([\d\.\:]+)\s+([\d\.]+)\s+([\d\.]+)'; # BV's condensed format for crude
$id= $opt_id || "ukjent" ;
my %npackets=(); # keep track of all ids
my $print_line;
my %duration; # seconds per id
read_crude();
if ( $opt_v ){
foreach $id ( sort keys %dupl){
printf STDERR "%-30s %d duplicates\n", $id, $dupl{$id};
}
foreach $id ( sort keys %reorder){
printf STDERR "%-30s %d reordered (%d ppm)\n", $id, $reorder{$id}, $reorder{$id}*10^6/$npackets{$id};
}
foreach $id ( sort keys %npackets){
printf STDERR "%-30s lasted %02d:%02d:%02d ( %d seconds ) and has %d small and %d big gaps and lost %.3f small and %.3f big seconds %d resets %d late %d ppm.\n", $id,
$duration{$id}/3600, $duration{$id}%3600/60, $duration{$id}%60, $duration{$id},
$nsmall_gaps{$id}, $nbig_gaps{$id},
$small_time{$id}, $big_time{$id}, $resets{$id}, $late_n{$id},
10**6 * ($small_time{$id} + $big_time{$id}) / $duration{$id}; # ppm
}
printf STDERR "Big gap limit %d packets.\n", $minloss;
# if (!%nbreak){
# print STDERR "No big gaps($minloss) found in $npackets packets.";
# }
}
foreach $id (keys %small_tx){
print STDERR "ID $id has Tx to small in $small_tx{$id} packets\n";
}
close JSON if $opt_json;
exit(0);
################################################################################
#name id dns ip
sub get_names {
$file=shift;
if ( open NAMES, "<$file"){
while(<NAMES>){
next if /\s*#/;
($name, $user, $dns, $ip)=split;
$hostname{$ip}=$name;
}
}
}
################################################################################
sub read_crude {
my $tx; # current transmit time
while(<>){
if ( /crude version 0.9.0/){
# die "### Versjon med feil i Rx : $_";
}
my $seq;
if ( /crude version/){ # new file restart sequence control
undef %pseq, %selp, %gap_slep;
} elsif ( ( ($rudeid,$seq, $src, $tx, $rx, $size) = /^ID=(\d+) SEQ=(\d+) SRC=([\w\:\.]+):\d+ .*Tx=([\d\.]+) .*Rx=([\d\.]+) .*SIZE=(\d+)/ )
|| ( ($seq, $src, $tx, $rx) = /$bv_fmt/ ) ){
next if $size < 50; # just a sanity filter for the packets i harstad-mp that has SIZE=4
if ( $opt_id){
# use that
} elsif ($hostname{$src}){
$id=$hostname{$src};
} else {
if ( $hostname = gethostbyaddr(inet_aton($src), AF_INET) ){
$id=$hostname{$src}=$hostname;
} else {
$id=$hostname{$src}=$src;
}
}
if ($tx < $min_tx || $tx > $max_tx){
$small_tx{$id}++;
next; #########################
}
$npackets{$id}++;
my $dt=0;
if (defined($pseq{$id})){
my $dseq=$seq - $pseq{$id};
$dt=$rx-$tx;
$ids{$id}++;
if ( $dt > $late_delay ){ # late packet
$late_n{$id}++;
$late_sum{$id}+=$dt;
$late_ss{$id}+=$dt*$dt;
} elsif ( $dseq == 1 ){ # normal packet
if ( $ntail_seq{$id} > 0 ){ # is recovering
$ntail_seq{$id}++;
if ( $ntail_seq{$id} >= $minrecover && $in_gap{$id} ){
$emit_graph{$id}=1;
my $missing= $gap_end_seq{$id} - $head_seq{$id};
if ( $missing <= $max_small_gap ){
if ($n_small_graphs{$id} > $max_small_graphs){
$emit_graph{$id}=0;
} else {
$n_small_graphs{$id}++;
}
} else {
if ( $n_big_graphs{$id} > $max_big_graphs){
$emit_graph{$id}=0;
} else {
$n_big_graphs{$id}++;
}
}
emit_break_head($id, $missing );
$ntail_seq{$id}=0; #
# add the ok part of the postgap tail
# my $bad=$#{$gap_slep{$id}} - $ntail_seq{$id};
# $bad = 0 if $bad < 0 ;
for ($lno=0; $lno <= $#{$gap_slep{$id}}; $lno++){
push( @{$slep{$id}}, $gap_slep{$id}[$lno] );
}
$gap_slep{$id}=[]; # copied - blank it.
}
}
$pseq{$id}= $seq;
$ptx{$id}=$tx;
} elsif ($dseq == 0 ){ #
$dupl{$id}++;
} elsif ($dseq < 0 ) { # reordered
if ( $dseq > (-$maxseqreorder) ){ # reordered
$reorder{$id}++;
undef $lost{$id}{$seq};
} else { # reset
$resets{$id}++;
$seq0{$id}=$seq;
# $pseq{$id}= $seq;
undef $pseq{$id};
$ptx{$id}=$tx;
}
} elsif ( ( $dseq > 1 ) && ( $nslep{$id} > 0 ) ){ # some packets lost and we got started
if ( $dseq > $minloss && ! ( $in_gap{$id} > 0 ) ){ # is a new big gap
if ( $ntail_seq{$id} < 1 ){ # start of new gap
my $start=$#{$slep{$id}} - $maxhead;
$start=0 if $start < 0 ; # has to few packets in buffer
$head_start{$id} = $slep{$id}[$start];
$head_end{$id} = $slep{$id}[$#{$slep{$id}}] ; # last valid record before outage
$head_seq{$id} = $pseq{$id};
$in_gap{$id}=1;
}
$ntail_seq{$id} = 1; # restart this if there are more holes
$gap_slep{$id}=[];
$nbig_gaps{$id}++;
$big_gaps{$id}+=$dseq-1;
$big_time{$id}+=$tx-$ptx{$id};
} else {
$nsmall_gaps{$id}++;
$small_gaps{$id}+=$dseq-1;
$small_time{$id}+=$tx-$ptx{$id};
}
# note which packets are lost
foreach $lost ( $pseq{$id}+1 .. $seq-1 ){
$lost{$id}{$lost}=1;
}
$gap_end{$id}=$_;
$gap_end_seq{$id}=$seq;
$ptx{$id}=$tx;
$pseq{$id}= $seq;
}
} else {
$seq0{$id}=$seq;
$pseq{$id}=$seq;
$ptx{$id}=$tx;
$t0{$id}=$tx if !$t0{$id};
}
if ( $dt == 0 || $dt <= $late_delay ){ # buffer lines
if ( $in_gap{$id} > 0 ){ # during gap
push(@{$gap_slep{$id}}, $_);
} else {
push(@{$slep{$id}},$_);
if ($nslep{$id} < $maxslep){ # buffer packets
$nslep{$id}++;
} else {
# my $a=
shift @{$slep{$id}};
# undef $a;
}
}
}
$in_gap{$id}=0 if $ntail_seq{$id} < 1;
# handle tail
# count up multiple possibly overlapping tails
# if ($dseq > $minloss && $nslep{$id} > 0 ){
# if ( $ntail_seq{$id} > 0 ){
foreach $i (0 .. $#{$ntail{$id}}){
if ($ntail{$id}[$i] < $maxtail){
$ntail{$id}[$i]++;
}
}
# }
#
# foreach $i (0 .. $#{$ntail{$id}}){
if ($nbreak{$id} > 0 && ($ntail{$id}[0] >= $maxtail)){
my $head=shift(@{$head1{$id}});
push(@{$tail{$id}}, report_delay( $id, 'tail', @slep{$id}), 0, 0);
$print_line.= sprintf "%s overlap %8d %2d ", $head, $head_seq{$id}, $#{$ntail{$id}}+1;
shift(@{$ntail{$id}});
&emit_stats($id);
$nbreak{$id}--;
}
# }
# }
# }
}
} # while read
foreach $id ( keys %t0 ) {
$duration{$id}=$tx-$t0{$id};
}
}
##################################################################################
sub emit_break_head {
my ($id, $dseq) = @_;
$nbreak{$id}++;
# $tx1=&tx($slep{$id}[$#{$slep{$id}}]);
my $tx1=&tx($head_end{$id});
my $rx1=&rx($head_end{$id});
# my $tx2=&tx( $slep{$id}[$#{$slep{$id}} - $ntail_seq{$id} + 2] );
my $tx2=&tx( $gap_end{$id} );
my $rx2=&rx( $gap_end{$id} );
# my $dt= $rx2 - $tx1 - $min_delay;
my $dt= $tx2 - $tx1 - &p_interval($slep{$id}); # clock from same side more accurate diff
($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) =
localtime($tx1);
push( @{$head1{$id}}, sprintf "%-25s %4d-%02d-%02d %02d:%02d:%02d %s gap %5d %5.1f %6d ",
$id, $year+1900,$mon+1,$mday, $hour, $min, $sec, $tx1,
$dseq-1, $dt*1000, $head_seq{$id}-$seq0{$id} );
push(@{$ntail{$id}}, 0); # remember this break
push(@{$head{$id}}, report_delay($id, 'head', @slep{$id}, $tx1, $dt));
# @slep{$id}=();
# $nslep{$id}=0;
}
################################################################################
# guess packet interval by meadian transmit interval
sub p_interval{
my $lines=shift;
my $ptx;
my @tx;
foreach $l(@$lines){
my $tx=&tx($l);
if ( $ptx){
push(@tx, $tx-$ptx);
}
$ptx=$tx;
}
my @txs=sort {$a <=> $b} @tx;
my $median=$txs[ int( ($#txs + 1) / 2) ];
return $median;
}
sub tx{
my $l=shift;
my $seq;
if ( $l =~ /Tx=(\d+\.\d+)/ ){
return $1 ;
} elsif (($seq, $src, $tx, $rx) = $l=~/$bv_fmt/ ){
return $tx;
}
return -1;
}
sub rx{
my $l=shift;
my $seq;
if ( $l =~ /Rx=(\d+\.\d+)/ ){
return $1 ;
} elsif (($seq, $src, $tx, $rx) = $l=~/$bv_fmt/ ){
return $rx;
}
return -1;
}
sub emit_stats{
my $id=shift;
my @lostseq= ();
@lostseq= keys %{$lost{$id}}; # if %lost{$id};
# print "head###########\n",@{$head{$id}}, "tail ####\n",@{$tail{$id}};
# printf "head %3d %s tail %3d %s\n", $#{$head{$id}}+1, &report_delay($head{$id}),
if ( $#lostseq >= 0 ){ # still lost packets later (reorder)
$print_line .= sprintf "head %s tail %s\n", shift(@{$head{$id}}), shift(@{$tail{$id}});
print_line(\$print_line);
} else {
$print_line='';
# print stderr "reordering fixed : $print_line\n";
}
# $get_tail{$id}=0;
# $ntail{$id}=0;
$tail{$id}=();
$lost{$id}=();
}
sub report_delay{ # jitter for one delay
my $id=shift;
my $type=shift; # head, tail
my $refl=shift; # array of lines
my $txgap=shift;
my $dt=shift;
# my @l=@$refl;
my $ptx=0, $prx=0, $sumjit=0, $njit=0, $ndelay=0, $sumdd=0, $sumdelay=0;
my $start=$#$refl-$maxhead;
my $mindelay, $tail_delay;
my @rdelay=(), @rtx=(), $tx0=0;
my @rrx=(), $rx0=0;
my $rudeid, $seq, $src, $dst, $tx, $rx, $size;
foreach $i( 0 .. ($#$refl - 0) ){ # skip the last one which might be after the gap
my $rline=\@$refl[$i];
if ( ( ($rudeid, $seq, $src, $dst, $tx, $rx, $size)=
$$rline =~ /ID=(\d+)\s+SEQ=(\d+)\s+SRC=([\d.:]+)\s+DST=([\d.:]+)\s+Tx=([\d.,]+)\s+Rx=([\d.,]+).+SIZE=(\d+)/)
|| ( ($seq, $src, $tx, $rx) = $$rline =~ /$bv_fmt/ )
){
$delay=$rx-$tx;
if ( $i < $start ) {
if ( !$mindelay || $delay < $mindelay){
$mindelay = $delay;
}
} else {
if ( !$taildelay || $delay < $taildelay){
$taildelay = $delay;
}
if ( $tx0 == 0 ) {
$tx0=$tx;
$rx0=$rx;
if ( $type eq "head" ){ # use start of head for tail also
push(@{$txgap{$id}}, $txgap); # stack head if nested gaps
} elsif ( $type eq "tail"){ # pull from stacked head
$txgap=shift( @{$txgap{$id}} );
}
}
$sumdelay+=$delay;
$ndelay++;
if ($ptx && ($seq - $pseq) == 1){
$dtx=$tx-$ptx;
$drx=$rx-$prx;
$jit=$drx-$dtx;
$sumjit+=$jit;
$njit++;
$sumdd+=$delay;
push(@rtx, ($tx-$tx0)*1000); #ms
push(@rrx, ($rx-$rx0)*1000); #ms
push(@rdelay, $delay);
}
$ptx=$tx;
$prx=$rx;
$pdelay=$delay;
$pseq=$seq;
}
}
}
for ($i=0; $i<=$#rdelay; $i++){ # relative delay in ms
$rdelay[$i]=($rdelay[$i]-$mindelay)*1000;
}
$sumdd=$sumdd-$njit*$mindelay; # sum differences from minimum
if($njit> 0 && $ndelay > 0){
$lineFit = Statistics::LineFit->new();
my @slope=(), $slopes=""; my $lr_start; my $lr_a; my $lr_b;
my $cc; # chart object
my $ctx; # chart context
my $ctx_lr;
if ( $opt_graph && $emit_graph{$id}){
$cc = Chart::Clicker->new( width=>800, height=>600);
if ( $dt > 0) {
$cc->title->text(sprintf "$title $type $id %.3fs", $dt);
} else {
$cc->title->text("$title $type $id");
}
$ctx = $cc->get_context('default');
$ctx->renderer(Chart::Clicker::Renderer::Point->new);
# $ctx->renderer(Chart::Clicker::Renderer::Line->new);
$ctx->domain_axis->label('Time(ms)');
$ctx->range_axis->label('d-delay(ms)');
my $series = Chart::Clicker::Data::Series->new( name => 'delay',
keys => \@rtx, values => \@rdelay);
my $ds = Chart::Clicker::Data::DataSet->new(series => [ $series ]);
$cc->add_to_datasets($ds);
$ctx_lr=Chart::Clicker::Context->new( name => 'LR' );
$ctx_lr->renderer(Chart::Clicker::Renderer::Line->new);
$ctx_lr->share_axes_with($ctx);
$cc->add_to_contexts($ctx_lr);
}
foreach ( $i=0; $i < $#rtx-4; $i+=10){
my $lr_start, $lr_end;
if ( $type eq "head"){ # analyze head from end and tail from start
$lr_start=$i; $lr_end=$#rtx;
} else {
$lr_start=0; $lr_end=min($i+10,$#rtx);
}
my @drtx=@rtx[$lr_start..$lr_end];
my @drdelay=@rdelay[$lr_start..$lr_end];
# virker ikke
# ($yfit, $coeffs) = fitpoly1d \@drtx, \@drdelay, 4; # Fit a cubi