Commit 3bf8768d authored by Cedric Roux's avatar Cedric Roux

Merge branch 'develop_integration_w50' into 'develop'

Develop integration w50

See merge request !61
parents 01c50449 5ebdbf9f
......@@ -119,6 +119,7 @@ endmacro(add_list_string_option)
####################################################
# compilation flags
#############################################
#set(CMAKE_BUILD_TYPE "Debug")
if (CMAKE_BUILD_TYPE STREQUAL "")
set(CMAKE_BUILD_TYPE "RelWithDebInfo")
endif()
......@@ -235,6 +236,7 @@ add_boolean_option(DEBUG_OMG False "???")
add_boolean_option(XFORMS False "This adds the possibility to see the signal oscilloscope")
add_boolean_option(PRINT_STATS False "This adds the possibility to see the status")
add_boolean_option(T_TRACER False "Activate the T tracer, a debugging/monitoring framework" )
add_boolean_option(UE_AUTOTEST_TRACE False "Activate UE autotest specific logs")
add_boolean_option(DEBUG_CONSOLE False "makes debugging easier, disables stdout/stderr buffering")
......@@ -475,7 +477,7 @@ include_directories ("${X2AP_DIR}")
# Hardware dependant options
###################################
add_list1_option(NB_ANTENNAS_RX "2" "Number of antennas in reception" "1" "2" "4")
add_list1_option(NB_ANTENNAS_TX "2" "Number of antennas in transmission" "1" "2" "4")
add_list1_option(NB_ANTENNAS_TX "4" "Number of antennas in transmission" "1" "2" "4")
add_list1_option(NB_ANTENNAS_TXRX "2" "Number of antennas in ????" "1" "2" "4")
add_list2_option(RF_BOARD "EXMIMO" "RF head type" "None" "EXMIMO" "OAI_USRP" "OAI_BLADERF" "CPRIGW" "OAI_LMSSDR")
......@@ -1069,11 +1071,14 @@ set(PHY_SRC
${OPENAIR1_DIR}/PHY/MODULATION/slot_fep_mbsfn.c
${OPENAIR1_DIR}/PHY/MODULATION/slot_fep_ul.c
${OPENAIR1_DIR}/PHY/MODULATION/ul_7_5_kHz.c
${OPENAIR1_DIR}/PHY/MODULATION/beamforming.c
${OPENAIR1_DIR}/PHY/MODULATION/compute_bf_weights.c
${OPENAIR1_DIR}/PHY/LTE_ESTIMATION/freq_equalization.c
${OPENAIR1_DIR}/PHY/LTE_ESTIMATION/lte_sync_time.c
${OPENAIR1_DIR}/PHY/LTE_ESTIMATION/lte_sync_timefreq.c
${OPENAIR1_DIR}/PHY/LTE_ESTIMATION/lte_adjust_sync.c
${OPENAIR1_DIR}/PHY/LTE_ESTIMATION/lte_dl_channel_estimation.c
${OPENAIR1_DIR}/PHY/LTE_ESTIMATION/lte_dl_bf_channel_estimation.c
${OPENAIR1_DIR}/PHY/LTE_ESTIMATION/lte_dl_mbsfn_channel_estimation.c
${OPENAIR1_DIR}/PHY/LTE_ESTIMATION/lte_ul_channel_estimation.c
${OPENAIR1_DIR}/PHY/LTE_ESTIMATION/lte_est_freq_offset.c
......@@ -1966,7 +1971,7 @@ target_link_libraries (oaisim_nos1 ${T_LIB})
# Unitary tests for each piece of L1: example, mbmssim is MBMS L1 simulator
#####################################
foreach(myExe dlsim ulsim pbchsim scansim mbmssim pdcchsim pucchsim prachsim syncsim)
foreach(myExe dlsim dlsim_tm7 ulsim pbchsim scansim mbmssim pdcchsim pucchsim prachsim syncsim)
add_executable(${myExe}
${OPENAIR_BIN_DIR}/messages_xml.h
${OPENAIR1_DIR}/SIMULATION/LTE_PHY/${myExe}.c
......@@ -2048,6 +2053,7 @@ if (${T_TRACER})
add_dependencies(oaisim generate_T)
add_dependencies(oaisim_nos1 generate_T)
add_dependencies(dlsim generate_T)
add_dependencies(dlsim_tm7 generate_T)
add_dependencies(ulsim generate_T)
add_dependencies(pbchsim generate_T)
add_dependencies(scansim generate_T)
......
......@@ -75,7 +75,7 @@ class openair(core):
return (stdout, stderr)
def connect(self, username, password, prompt='PEXPECT_OAI'):
max_retries=100
max_retries=10
i=0
while i <= max_retries:
self.prompt1 = prompt
......@@ -97,9 +97,9 @@ class openair(core):
# need to look for twice the string of the prompt
self.oai.prompt()
self.oai.prompt()
self.oai.sendline('uptime')
self.oai.prompt()
print self.oai.before
# self.oai.sendline('uptime')
# self.oai.prompt()
# print self.oai.before
break
except Exception, e:
error=''
......@@ -171,7 +171,7 @@ class openair(core):
sys.exit(1)
def disconnect(self):
print 'disconnecting the ssh connection to ' + self.address + '\n'
# print 'disconnecting the ssh connection to ' + self.address + '\n'
self.oai.send('exit')
# self.cancel()
......
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8"/>
<title>OAI5G UE Autotest Report</title>
<script type="text/javascript">
function showhide(id) {
var e = document.getElementById(id);
e.style.display = (e.style.display == 'block') ? 'none' : 'block';
}
</script>
</head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
padding: 5px;
}
</style>
<body>
<center>
<h2>OAI5G UE Autotest Report</h2>
</center>
<p>
<table border>
<caption>Test session configuration</caption>
<tr><td>Start time</td><td>{{test_session_start_time}}</td></tr>
<tr><td>Stop time</td><td>{{test_session_stop_time}}</td></tr>
<tr><td>Duration</td><td>{{test_session_duration}}</td></tr>
<tr><td>MTC host</td><td>{{mtc_host}}</td></tr>
<tr><td>User</td><td>{{user}}</td></tr>
<tr><td>Password</td><td>{{password}}</td></tr>
</table>
</p>
<h3>Test Setup</h3>
To be complete
<br></br>
<h3>UE phy-test performances tests results</h3>
<h4>Objectives</h4>
<p>Checks that OAI UE can achieve at least 75 percent of the theoretical throughput.</p>
<p>Tests are done for all MCS (0 to 28) for 5MHz and 10MHz bandwidth.</p>
<h4>Results</h4>
<table>
<TR><TH>ID</TH><TH>TAG</TH><TH>VERDICT</TH><TH>NB RUNS</TH><TH>PASS</TH><TH>FAILED</TH><TH>INCON</TH><TH>SKIPPED</TH><TH>SEG FAULT</TH><TH>TC Timeout</TH><TH>Start</TH><TH>Stop</TH><TH>Duration</TH><TH>Details</TH></TR>
{% for result in test_results|sort(attribute='testcase_name') %}
<TR>
<TD >{{result.testcase_name}}</TD>
<TD align="right">{{result.tags}}</TD>
{% if result.testcase_verdict == "PASS" %}
<TD align="center" style="background-color:green">{{result.testcase_verdict}}</TD>
{% elif result.testcase_verdict == "FAIL" %}
<TD align="center" style="background-color:red">{{result.testcase_verdict}}</TD>
{% else %}
<TD align="center" style="background-color:orange">{{result.testcase_verdict}}</TD>
{% endif %}
<TD align='center'>{{result.nruns}}</TD>
<TD align='center'>{{result.nb_run_pass}}</TD>
<TD align='center'>{{result.nb_run_failed}}</TD>
<TD align='center'>{{result.nb_run_inc}}</TD>
<TD align='center'>{{result.nb_run_skip}}</TD>
<TD align='center'>{{result.nb_seg_fault}}</TD>
<TD >{{result.testcase_timeout}}</TD>
<TD >{{result.testcase_time_start.strftime('%Y-%m-%d %H:%M:%S')}}</TD>
<TD >{{result.testcase_time_stop.strftime('%Y-%m-%d %H:%M:%S')}}</TD>
<TD >{{result.testcase_duration}}</TD>
<TD ><a href="{{ result.testcase_name }}/{{ result.testcase_name }}_report.html">{{ result.testcase_name }}_report.html</a></TD>
</TR>
{% endfor %}
</table>
<br></br>
<h3>UE phy-test stability tests results</h3>
<h4>Objectives</h4>
<p>To be complete</p>
<h4>Results</h4>
To be complete
</table>
</body>
</html>
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8"/>
<title>OAI5G UE test case report</title>
<script type="text/javascript">
function showhide(id) {
var e = document.getElementById(id);
e.style.display = (e.style.display == 'block') ? 'none' : 'block';
}
</script>
</head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
padding: 5px;
}
</style>
<body>
<center>
<h2>OAI5G UE test case report details</h2>
</center>
<h3>Test Case description</h3>
<p>
<table border>
<tr><td>ID</td><td>{{testcase_name}}</td></tr>
<tr><td>TAG</td><td>{{tags}}</td></tr>
<tr><td>class</td><td>{{testcaseclass}}</td></tr>
<tr><td>description</td><td></td></tr>
<tr><td>timeout</td><td>{{testcase_timeout}}</td></tr>
<tr><td>number of runs</td><td>{{nruns}}</td></tr>
<tr><td>eNB machine</td><td>{{testcase_eNBMachine}}</td></tr>
<tr><td>UE machine</td><td>{{testcase_UEMachine}}</td></tr>
</table>
</p>
<h3>Test Case execution</h3>
<p>
<table border>
<tr><td>testcase_time_start</td><td>{{testcase_time_start}}</td></tr>
<tr><td>testcase_time_stop</td><td>{{testcase_time_stop}}</td></tr>
<tr><td>testcase_duration</td><td>{{testcase_duration}}</td></tr>
<tr><td>Nb runs</td><td>{{nruns}}</td></tr>
<tr><td>Nb PASS</td><td>{{nb_run_pass}}</td></tr>
<tr><td>Nb FAILED</td><td>{{nb_run_failed}}</td></tr>
<tr><td>Nb INCONCLUSIVE</td><td>{{nb_run_inc}}</td></tr>
<tr>
<td>testcase_verdict</td>
{% if testcase_verdict == "PASS" %}
<TD align="center" style="background-color:green">{{testcase_verdict}}</TD>
{% elif testcase_verdict == "FAIL" %}
<TD align="center" style="background-color:red">{{testcase_verdict}}</TD>
{% else %}
<TD align="center" style="background-color:orange">{{testcase_verdict}}</TD>
{% endif %}
</tr>
<tr><td>Nb Seg Fault</td><td>{{nb_seg_fault}}</td></tr>
</table>
</p>
<h3>Test Case runs results</h3>
{% for run_results in runs_results|sort(attribute='run_id') %}
<h4>RUN {{run_results.run_id}} </h4>
<table border>
<tr><td>run_start_time </td><td>{{run_results.run_start_time}}</td></tr>
<tr><td>run_stop_time</td><td>{{run_results.run_stop_time}}</td></tr>
<tr><td>run_duration</td><td>{{run_results.run_duration}}</td></tr>
<tr>
<td>run_verdict</td>
{% if run_results.run_verdict == "PASS" %}
<TD align="center" style="background-color:green">{{run_results.run_verdict}}</TD>
{% elif run_results.run_verdict == "FAIL" %}
<TD align="center" style="background-color:red">{{run_results.run_verdict}}</TD>
{% else %}
<TD align="center" style="background-color:orange">{{run_results.run_verdict}}</TD>
{% endif %}
</tr>
<tr><td>Seg Fault Satus</td>
{% if run_results.ue_seg_fault_status == "NO_SEG_FAULT" %}
<TD align="center" style="background-color:green">{{run_results.ue_seg_fault_status}}</TD>
{% elif run_results.ue_seg_fault_status == "SEG_FAULT" %}
<TD align="center" style="background-color:red">{{run_results.ue_seg_fault_status}}</TD>
{% else %}
<TD align="center" style="background-color:orange">unknown</TD>
{% endif %}
</tr>
</table>
{% for run_metrics in run_results.runs_metrics %}
<br></br>
<table border>
<tr><td>metric_id</td><td>{{run_metrics.metric_id}}</td></tr>
<tr><td>Description</td><td>{{run_metrics.metric_desc}}</td></tr>
<tr><td>Unit of measure</td><td>{{run_metrics.metric_uom}}</td></tr>
<tr><td>metric_min</td><td>{{run_metrics.metric_min}}</td></tr>
<tr><td>metric_max</td><td>{{run_metrics.metric_max}}</td></tr>
<tr><td>metric_mean</td><td>{{run_metrics.metric_mean}}</td></tr>
<tr><td>metric_median</td><td>{{run_metrics.metric_median}}</td></tr>
<tr><td colspan="2"></td></tr>
{% if run_metrics.pass_fail_stat is defined %}
<tr><td>Pass/fail stat</td><td>{{run_metrics.pass_fail_stat}}</td></tr>
{% endif %}
{% if run_metrics.pass_fail_min_limit is defined %}
<tr><td>Pass/fail min limit</td><td>{{run_metrics.pass_fail_min_limit}}</td></tr>
{% endif %}
{% if run_metrics.pass_fail_max_limit is defined %}
<tr><td>Pass/fail max limit</td><td>{{run_metrics.pass_fail_max_limit}}</td></tr>
{% endif %}
<tr><td colspan="2"></td></tr>
<tr><td>metric_fig</td><td><IMG src={{run_metrics.metric_fig}}></td></tr>
</table>
{% endfor %}
{% if run_results.run_traffic.traffic_count != 0 %}
<br></br>
<table border>
<TR><TH>Iperf metric</TH><TH>min</TH><TH>max</TH><TH>mean</TH><TH>median</TH><TR>
<TR><td>Bandwidth </td><td>{{run_results.run_traffic.bw_min}}</td><td>{{run_results.run_traffic.bw_max}}</td><td>{{run_results.run_traffic.bw_mean}}</td><td>{{run_results.run_traffic.bw_median}}</td><TR>
<TR><td>Jitter</td><td>{{run_results.run_traffic.jitter_min}}</td><td>{{run_results.run_traffic.jitter_max}}</td><td>{{run_results.run_traffic.jitter_mean}}</td><td>{{run_results.run_traffic.jitter_median}}</td><TR>
<TR><td>Loss rate</td><td>{{run_results.run_traffic.rl_min}}</td><td>{{run_results.run_traffic.rl_max}}</td><td>{{run_results.run_traffic.rl_mean}}</td><td>{{run_results.run_traffic.rl_median}}</td><TR>
<TR><td colspan="5"></td></TR>
<TR><td>Iperf duration</td><td>{{run_results.run_traffic.iperf_duration}}</td><td></td><td>Pass/Fail criteria (min duration)</td><td>{{run_results.run_traffic.dur_pass_fail_crit}}</td><TR>
<TR><td colspan="5"></td></TR>
<tr><td>traffic_fig</td><td colspan="4"><IMG src={{run_results.run_traffic.traffic_fig}}></td></tr>
</table>
{% endif %}
{% endfor %}
</body>
</html>
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -35,12 +35,15 @@ import getopt
import sys
from subprocess import call
import encoder
sys.path.append(os.path.expandvars('$OPENAIR_DIR/cmake_targets/autotests/tools/'))
#test_cases = ('030001', '030901', '031001', '031601', '031701', '031801', '031901', '032001', '032101', '032201', '032301', '032501', '032601', '032801')
test_cases = ('030030' , '030030' )
test_cases = ('032800' , '032730' )
nb_run = 3
nb_run = 2
def error_opt(msg):
print("Option error: " + msg)
......@@ -58,14 +61,27 @@ def main(args):
# metric = {}
# metric['id'] = 'UE_DLSCH_BITRATE'
# metric['description'] = 'UE downlink physical throughput'
# metric['regex'] = '(UE_DLSCH_BITRATE) =\s+(\d+\.\d+) kbps.+frame = (\d+)\)'
# metric['unit_of_meas'] = 'kbps'
# metric['min_limit'] = 14668.8
#AUTOTEST Metric : RRC Measurments RSRP[0]=-97.60 dBm/RE, RSSI=-72.83 dBm, RSRQ[0] 9.03 dB, N0 -125 dBm/RE, NF 7.2 dB (frame = 4490)
metric = {}
metric['id'] = 'UE_DLSCH_BITRATE'
metric['description'] = 'UE downlink physical throughput'
metric['regex'] = '(UE_DLSCH_BITRATE) =\s+(\d+\.\d+) kbps.+frame = (\d+)\)'
metric['id'] = 'UE_DL_RRC_MEAS'
metric['description'] = 'UE downlink RRC Measurments'
metric['nb_metric'] = 5
# metric['regex'] = 'AUTOTEST Metric : RRC Measurments (RSRP\[0\])=(-?\d+\.?\d*)\s+(.+),\s+(RSRQ\[0\])=(-?\d+\.?\d*)\s+(.+),,\s+(N0)=(-?\d+\.?\d*)\s+(.+),,\s+(NF)=(-?\d+\.?\d*)\s+(.+)\s+\(frame = (\d+)\) '
metric['regex'] = 'AUTOTEST Metric : RRC Measurments (RSRP\[0\])=(-?\d+\.?\d*)\s+(.+)\,\s+(RSSI)=(-?\d+\.?\d*)\s+(.+)\,\s+(RSRQ\[0\])=(-?\d+\.?\d*)\s+(.+)\,\s+(N0)=(-?\d+\.?\d*)\s+(.+)\,\s+(NF)=(-?\d+\.?\d*)\s+(.+)\s+\(frame = (\d+)\)'
metric['unit_of_meas'] = 'kbps'
metric['min_limit'] = 14668.8
#report_path = log_path+'/report/'
#os.system(' mkdir -p ' + report_path)
......@@ -74,58 +90,44 @@ def main(args):
#return(0)
for test_case in test_cases:
# print test_case
if test_case == '030001':
metric['min_limit'] = 500.0
if test_case == '030901':
metric['min_limit'] = 640.0
if test_case == '031001':
metric['min_limit'] = 3200.0
if test_case == '031601':
metric['min_limit'] = 5920.0
if test_case == '031701':
metric['min_limit'] = 6000.0
if test_case == '031801':
metric['min_limit'] = 6200.0
if test_case == '031901':
metric['min_limit'] = 7000.0
if test_case == '032001':
metric['min_limit'] = 7800.0
if test_case == '032101':
metric['min_limit'] = 8000.0
if test_case == '032201':
metric['min_limit'] = 9000.0
if test_case == '032301':
metric['min_limit'] = 10000.0
if test_case == '032501':
metric['min_limit'] = 11000.0
if test_case == '032601':
metric['min_limit'] = 12000.0
if test_case == '032801':
metric['min_limit'] = 12500.0
if test_case == '035201':
metric['min_limit'] = 14668.8
if test_case == '036001':
metric['min_limit'] = 25363.2
test_results = []
for test_case in test_cases:
for i in range(0, nb_run):
fname = 'log//'+test_case+'/run_'+str(i)+'/UE_exec_'+str(i)+'_.log'
fname = '..//log//'+test_case+'/run_'+str(i)+'/UE_exec_'+str(i)+'_.log'
args = {'metric' : metric,
'file' : fname }
cell_synch_status = analyser.check_cell_synchro(fname)
if cell_synch_status == 'CELL_SYNCH':
print '!!!!!!!!!!!!!! Cell synchronized !!!!!!!!!!!'
metric_checks_flag = 0
else :
print '!!!!!!!!!!!!!! Cell NOT NOT synchronized !!!!!!!!!!!'
# cell_synch_status = analyser.check_cell_synchro(fname)
# if cell_synch_status == 'CELL_SYNCH':
# print '!!!!!!!!!!!!!! Cell synchronized !!!!!!!!!!!'
# metric_checks_flag = 0
# else :
# print '!!!!!!!!!!!!!! Cell NOT NOT synchronized !!!!!!!!!!!'
# metric_extracted = analyser.do_extract_metrics(args)
# metrics_extracted = analyser.do_extract_metrics_new(args)
# de-xmlfy test report
xml_file = '..//log//'+test_case+'/test.'+test_case+'_ng.xml'
print xml_file
# test_result =
# test_results.append(test_result)
# xmlFile = logdir_local_testcase + '/test.' + testcasename + '.xml'
# xml="\n<testcase classname=\'"+ testcaseclass + "\' name=\'" + testcasename + "."+tags + "\' Run_result=\'" + test_result_string + "\' time=\'" + str(duration) + " s \' RESULT=\'" + testcase_verdict + "\'></testcase> \n"
# write_file(xmlFile, xml, mode="w")
# xmlFile_ng = logdir_local_testcase + '/test.' + testcasename + '_ng.xml'
# xml_ng = xmlify(test_result, wrap=testcasename, indent=" ")
# write_file(xmlFile_ng, xml_ng, mode="w")
# print "min = "+ str( metric_extracted['metric_min'] )
# print "min_index = "+ str( metric_extracted['metric_min_index'] )
......@@ -143,16 +145,27 @@ def main(args):
# print fname
# analyser.do_img_metrics(metric, metric_extracted, fname)
# fname = 'log//'+test_case+'/run_'+str(i)+'/UE_traffic_'+str(i)+'_.log'
# fname = 'log//'+test_case+'/run_'+str(i)+'/UE_traffic_'+str(i)+'_.log'
# args = {'file' : fname }
# args = {'file' : fname }
# traffic_metrics = analyser.do_extract_traffic_metrics(args)
# traffic_metrics = analyser.do_extract_traffic_metrics(args)
# fname= 'report/iperf_'+test_case+'_'+str(i)+'.png'
# fname= 'report/iperf_'+test_case+'_'+str(i)+'.png'
# print fname
# analyser.do_img_traffic(traffic_metrics, fname)
# print fname
# analyser.do_img_traffic(traffic_metrics, fname)
for test_result in test_results:
cmd = 'mkdir -p ' + report_dir + '/'+ test_result['testcase_name']
result = os.system(cmd)
report_file = report_dir + '/'+ test_result['testcase_name'] + '/'+ test_result['testcase_name']+ '_report.html'
analyser.create_test_report_detailed_html(test_result, report_file )
print test_result
......
......@@ -41,7 +41,7 @@ from jinja2 import Environment, FileSystemLoader
PATH = os.path.dirname(os.path.abspath(__file__))
TEMPLATE_ENVIRONMENT = Environment(
autoescape=False,
loader=FileSystemLoader(os.path.join(PATH, 'templates')),
loader=FileSystemLoader(os.path.join(PATH, '../templates')),
trim_blocks=False)
......@@ -103,6 +103,129 @@ def do_extract_metrics(args):
}
return(ret)
def do_extract_metrics_new(args):
# print ""
# print "do_extract_metrics ... "
fname = args['file']
metric = args['metric']
print(fname)
print 'metric id = ' + metric['id']
print 'metric regex = ' + metric['regex']
count = 0
mmin = 0
mmin_index = 0
mmax = 0
mmax_index = 0
mean = 0
median = 0
toto = [('id', 'S20'), ('metric', np.float), ('frame', np.int)]
print toto
np_format = []
for x in range(0, metric['nb_metric']):
np_format.append( ('id'+str(x), 'S20') )
np_format.append( ('metric'+str(x), np.float) )
np_format.append( ('uom'+str(x), 'S20') )
np_format.append( ('frame', np.int))
print np_format
output = np.fromregex(fname,metric['regex'], np_format)
print output
count = output['frame'].size
print count
if count > 0:
fontP = FontProperties()
fontP.set_size('small')
fig = plt.figure(1)
plt.figure(figsize=(10,10))
plot_xmax = np.amax(output['frame'])+np.amin(output['frame'])
for x in range(0, metric['nb_metric']):
metric_name = output['id'+str(x)][0]
metric_uom = output['uom'+str(x)][0]
mmin = np.amin(output['metric'+str(x)])
mmax = np.amax(output['metric'+str(x)])
mmean = np.mean(output['metric'+str(x)])
mmedian = np.median(output['metric'+str(x)])
plot_loc = 100*metric['nb_metric']+10+x+1
sbplt = plt.subplot(plot_loc)
sbplt.plot(output['frame'], output['metric'+str(x)], color='b' )
sbplt.set_title( metric_name+' ('+metric_uom+')')
if mmin < 0:
sbplot_ymin=mmin+mmin/10
else:
sbplot_ymin=0
sbplt.set_ylim(ymin=sbplot_ymin)
if mmax > 0:
sbplot_ymax=mmax+mmax/10
else:
sbplot_ymax=0
sbplt.set_ylim(ymax=sbplot_ymax)
sbplt.set_xlim(xmax=plot_xmax)
sbplt.set_xlim(xmin=0)
text='min: '+str(mmin)+'\nmax: '+str(mmax)+'\nmean: '+str(mmean)+'\nmedian: '+str(mmedian)
sbplt.text( plot_xmax+10,sbplot_ymin,text)
sbplt.set_xlabel('frame')
sbplt.set_ylabel(metric_name)
plt.tight_layout()
fname = "toto.png"
# lgd = plt.legend(prop=fontP, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
mng = plt.get_current_fig_manager()
plt.savefig(fname, bbox_inches='tight')
plt.close()
mmin = np.amin(output['metric']);
mmin_index = np.argmin(output['metric']);
mmax = np.amax(output['metric']);
mmax_index = np.argmax(output['metric']);
mean = np.mean(output['metric']);
median = np.median(output['metric']);
# print ( ( (metric['min_limit'] > output['metric']).sum() / float(output['metric'].size) ) * 100 )
ret = { 'metric_count' : count,
'metric_buf' : output,
'metric_min' : mmin,
'metric_min_index' : mmin_index,
'metric_max' : mmax,
'metric_max_index' : mmax_index,
'metric_mean' : mean,
'metric_median' : median,
}
return(ret)
#
#
#
......@@ -182,24 +305,30 @@ def do_img_metrics(metric_def, metric_data, fname):
# print output['metric'].size
plt.scatter(output['frame'], output['metric'], color='b', alpha=0.33, s = 1 , label=metric_def['id'])
plt.plot([0, output['frame'][metric_data['metric_count']-1]],[ metric_def['min_limit'],metric_def['min_limit']], 'r-', lw=2, label='min limit') # Red straight line
if 'min_limit' in metric_def:
plt.plot([0, output['frame'][metric_data['metric_count']-1]],[ metric_def['min_limit'],metric_def['min_limit']], 'r-', lw=2, label='min limit') # Red straight line
plt.title('Physical throughput ('+metric_def['unit_of_meas']+')')
plt.title(metric_def['id'] +' ('+metric_def['unit_of_meas']+')')
plt.xlabel('frame')
plt.ylabel(metric_def['id'])
# Set graphic minimum Y axis
# -------------------------
if metric_data['metric_min'] == 0 :
plt.ylim(ymin=-metric_def['min_limit']/10)
if metric_data['metric_min'] < 0:
plt.ylim(ymin=metric_data['metric_min']+metric_data['metric_min']/10)
else :
plt.ylim(ymin=0)
y_axis_max = 0
if metric_data['metric_max'] > metric_def['min_limit']:
y_axis_max =metric_data['metric_max']+metric_data['metric_max']/10
if 'min_limit' in metric_def:
if metric_data['metric_max'] > metric_def['min_limit']:
y_axis_max =metric_data['metric_max']+metric_data['metric_max']/10