Commit 219016e7 authored by knopp's avatar knopp

Merge branch 'enhancement-10-harmony' of...

Merge branch 'enhancement-10-harmony' of https://gitlab.eurecom.fr/oai/openairinterface5g into enhancement-10-harmony

Conflicts:
	openair1/PHY/INIT/lte_init.c
	targets/RT/USER/lte-enb.c
	targets/RT/USER/lte-softmodem.c
	targets/SIMU/USER/init_lte.c
parents 1d331c0b 2507933c
......@@ -34,4 +34,5 @@ job1:
- sshpass -p "$OAI_PASS" rsync -az -e "ssh -o StrictHostKeyChecking=no " --rsync-path="mkdir -p $NFS_TEST_RESULTS_DIR && rsync" $OPENAIR_DIR/cmake_targets/autotests/log $OAI_USER@localhost:$NFS_TEST_RESULTS_DIR
- sshpass -p "$OAI_PASS" rsync -az -e "ssh -o StrictHostKeyChecking=no " --rsync-path="mkdir -p $EXTERNAL_SHARE_DIR && rsync" $OPENAIR_DIR/cmake_targets/autotests/log $OAI_USER@localhost:$EXTERNAL_SHARE_DIR
- cat $OPENAIR_DIR/cmake_targets/autotests/log/results_autotests.xml
when: manual
only:
- triggers
This diff is collapsed.
......@@ -75,7 +75,7 @@ class openair(core):
return (stdout, stderr)
def connect(self, username, password, prompt='PEXPECT_OAI'):
max_retries=100
max_retries=10
i=0
while i <= max_retries:
self.prompt1 = prompt
......@@ -97,9 +97,9 @@ class openair(core):
# need to look for twice the string of the prompt
self.oai.prompt()
self.oai.prompt()
self.oai.sendline('uptime')
self.oai.prompt()
print self.oai.before
# self.oai.sendline('uptime')
# self.oai.prompt()
# print self.oai.before
break
except Exception, e:
error=''
......@@ -171,7 +171,7 @@ class openair(core):
sys.exit(1)
def disconnect(self):
print 'disconnecting the ssh connection to ' + self.address + '\n'
# print 'disconnecting the ssh connection to ' + self.address + '\n'
self.oai.send('exit')
# self.cancel()
......
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8"/>
<title>OAI5G UE Autotest Report</title>
<script type="text/javascript">
function showhide(id) {
var e = document.getElementById(id);
e.style.display = (e.style.display == 'block') ? 'none' : 'block';
}
</script>
</head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
padding: 5px;
}
</style>
<body>
<center>
<h2>OAI5G UE Autotest Report</h2>
</center>
<p>
<table border>
<caption>Test session configuration</caption>
<tr><td>Start time</td><td>{{test_session_start_time}}</td></tr>
<tr><td>Stop time</td><td>{{test_session_stop_time}}</td></tr>
<tr><td>Duration</td><td>{{test_session_duration}}</td></tr>
<tr><td>MTC host</td><td>{{mtc_host}}</td></tr>
<tr><td>User</td><td>{{user}}</td></tr>
<tr><td>Password</td><td>{{password}}</td></tr>
</table>
</p>
<h3>Test Setup</h3>
To be complete
<br></br>
<h3>UE phy-test performances tests results</h3>
<h4>Objectives</h4>
<p>Checks that OAI UE can achieve at least 75 percent of the theoretical throughput.</p>
<p>Tests are done for all MCS (0 to 28) for 5MHz and 10MHz bandwidth.</p>
<h4>Results</h4>
<table>
<TR><TH>ID</TH><TH>TAG</TH><TH>VERDICT</TH><TH>NB RUNS</TH><TH>PASS</TH><TH>FAILED</TH><TH>INCON</TH><TH>SKIPPED</TH><TH>SEG FAULT</TH><TH>TC Timeout</TH><TH>Start</TH><TH>Stop</TH><TH>Duration</TH><TH>Details</TH></TR>
{% for result in test_results|sort(attribute='testcase_name') %}
<TR>
<TD >{{result.testcase_name}}</TD>
<TD align="right">{{result.tags}}</TD>
{% if result.testcase_verdict == "PASS" %}
<TD align="center" style="background-color:green">{{result.testcase_verdict}}</TD>
{% elif result.testcase_verdict == "FAIL" %}
<TD align="center" style="background-color:red">{{result.testcase_verdict}}</TD>
{% else %}
<TD align="center" style="background-color:orange">{{result.testcase_verdict}}</TD>
{% endif %}
<TD align='center'>{{result.nruns}}</TD>
<TD align='center'>{{result.nb_run_pass}}</TD>
<TD align='center'>{{result.nb_run_failed}}</TD>
<TD align='center'>{{result.nb_run_inc}}</TD>
<TD align='center'>{{result.nb_run_skip}}</TD>
<TD align='center'>{{result.nb_seg_fault}}</TD>
<TD >{{result.testcase_timeout}}</TD>
<TD >{{result.testcase_time_start.strftime('%Y-%m-%d %H:%M:%S')}}</TD>
<TD >{{result.testcase_time_stop.strftime('%Y-%m-%d %H:%M:%S')}}</TD>
<TD >{{result.testcase_duration}}</TD>
<TD ><a href="{{ result.testcase_name }}/{{ result.testcase_name }}_report.html">{{ result.testcase_name }}_report.html</a></TD>
</TR>
{% endfor %}
</table>
<br></br>
<h3>UE phy-test stability tests results</h3>
<h4>Objectives</h4>
<p>To be complete</p>
<h4>Results</h4>
To be complete
</table>
</body>
</html>
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8"/>
<title>OAI5G UE test case report</title>
<script type="text/javascript">
function showhide(id) {
var e = document.getElementById(id);
e.style.display = (e.style.display == 'block') ? 'none' : 'block';
}
</script>
</head>
<style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
padding: 5px;
}
</style>
<body>
<center>
<h2>OAI5G UE test case report details</h2>
</center>
<h3>Test Case description</h3>
<p>
<table border>
<tr><td>ID</td><td>{{testcase_name}}</td></tr>
<tr><td>TAG</td><td>{{tags}}</td></tr>
<tr><td>class</td><td>{{testcaseclass}}</td></tr>
<tr><td>description</td><td></td></tr>
<tr><td>timeout</td><td>{{testcase_timeout}}</td></tr>
<tr><td>number of runs</td><td>{{nruns}}</td></tr>
<tr><td>eNB machine</td><td>{{testcase_eNBMachine}}</td></tr>
<tr><td>UE machine</td><td>{{testcase_UEMachine}}</td></tr>
</table>
</p>
<h3>Test Case execution</h3>
<p>
<table border>
<tr><td>testcase_time_start</td><td>{{testcase_time_start}}</td></tr>
<tr><td>testcase_time_stop</td><td>{{testcase_time_stop}}</td></tr>
<tr><td>testcase_duration</td><td>{{testcase_duration}}</td></tr>
<tr><td>Nb runs</td><td>{{nruns}}</td></tr>
<tr><td>Nb PASS</td><td>{{nb_run_pass}}</td></tr>
<tr><td>Nb FAILED</td><td>{{nb_run_failed}}</td></tr>
<tr><td>Nb INCONCLUSIVE</td><td>{{nb_run_inc}}</td></tr>
<tr>
<td>testcase_verdict</td>
{% if testcase_verdict == "PASS" %}
<TD align="center" style="background-color:green">{{testcase_verdict}}</TD>
{% elif testcase_verdict == "FAIL" %}
<TD align="center" style="background-color:red">{{testcase_verdict}}</TD>
{% else %}
<TD align="center" style="background-color:orange">{{testcase_verdict}}</TD>
{% endif %}
</tr>
<tr><td>Nb Seg Fault</td><td>{{nb_seg_fault}}</td></tr>
</table>
</p>
<h3>Test Case runs results</h3>
{% for run_results in runs_results|sort(attribute='run_id') %}
<h4>RUN {{run_results.run_id}} </h4>
<table border>
<tr><td>run_start_time </td><td>{{run_results.run_start_time}}</td></tr>
<tr><td>run_stop_time</td><td>{{run_results.run_stop_time}}</td></tr>
<tr><td>run_duration</td><td>{{run_results.run_duration}}</td></tr>
<tr>
<td>run_verdict</td>
{% if run_results.run_verdict == "PASS" %}
<TD align="center" style="background-color:green">{{run_results.run_verdict}}</TD>
{% elif run_results.run_verdict == "FAIL" %}
<TD align="center" style="background-color:red">{{run_results.run_verdict}}</TD>
{% else %}
<TD align="center" style="background-color:orange">{{run_results.run_verdict}}</TD>
{% endif %}
</tr>
<tr><td>Seg Fault Satus</td>
{% if run_results.ue_seg_fault_status == "NO_SEG_FAULT" %}
<TD align="center" style="background-color:green">{{run_results.ue_seg_fault_status}}</TD>
{% elif run_results.ue_seg_fault_status == "SEG_FAULT" %}
<TD align="center" style="background-color:red">{{run_results.ue_seg_fault_status}}</TD>
{% else %}
<TD align="center" style="background-color:orange">unknown</TD>
{% endif %}
</tr>
</table>
{% for run_metrics in run_results.runs_metrics %}
<br></br>
<table border>
<tr><td>metric_id</td><td>{{run_metrics.metric_id}}</td></tr>
<tr><td>Description</td><td>{{run_metrics.metric_desc}}</td></tr>
<tr><td>Unit of measure</td><td>{{run_metrics.metric_uom}}</td></tr>
<tr><td>metric_min</td><td>{{run_metrics.metric_min}}</td></tr>
<tr><td>metric_max</td><td>{{run_metrics.metric_max}}</td></tr>
<tr><td>metric_mean</td><td>{{run_metrics.metric_mean}}</td></tr>
<tr><td>metric_median</td><td>{{run_metrics.metric_median}}</td></tr>
<tr><td colspan="2"></td></tr>
{% if run_metrics.pass_fail_stat is defined %}
<tr><td>Pass/fail stat</td><td>{{run_metrics.pass_fail_stat}}</td></tr>
{% endif %}
{% if run_metrics.pass_fail_min_limit is defined %}
<tr><td>Pass/fail min limit</td><td>{{run_metrics.pass_fail_min_limit}}</td></tr>
{% endif %}
{% if run_metrics.pass_fail_max_limit is defined %}
<tr><td>Pass/fail max limit</td><td>{{run_metrics.pass_fail_max_limit}}</td></tr>
{% endif %}
<tr><td colspan="2"></td></tr>
<tr><td>metric_fig</td><td><IMG src={{run_metrics.metric_fig}}></td></tr>
</table>
{% endfor %}
{% if run_results.run_traffic.traffic_count != 0 %}
<br></br>
<table border>
<TR><TH>Iperf metric</TH><TH>min</TH><TH>max</TH><TH>mean</TH><TH>median</TH><TR>
<TR><td>Bandwidth </td><td>{{run_results.run_traffic.bw_min}}</td><td>{{run_results.run_traffic.bw_max}}</td><td>{{run_results.run_traffic.bw_mean}}</td><td>{{run_results.run_traffic.bw_median}}</td><TR>
<TR><td>Jitter</td><td>{{run_results.run_traffic.jitter_min}}</td><td>{{run_results.run_traffic.jitter_max}}</td><td>{{run_results.run_traffic.jitter_mean}}</td><td>{{run_results.run_traffic.jitter_median}}</td><TR>
<TR><td>Loss rate</td><td>{{run_results.run_traffic.rl_min}}</td><td>{{run_results.run_traffic.rl_max}}</td><td>{{run_results.run_traffic.rl_mean}}</td><td>{{run_results.run_traffic.rl_median}}</td><TR>
<TR><td colspan="5"></td></TR>
<TR><td>Iperf duration</td><td>{{run_results.run_traffic.iperf_duration}}</td><td></td><td>Pass/Fail criteria (min duration)</td><td>{{run_results.run_traffic.dur_pass_fail_crit}}</td><TR>
<TR><td colspan="5"></td></TR>
<tr><td>traffic_fig</td><td colspan="4"><IMG src={{run_results.run_traffic.traffic_fig}}></td></tr>
</table>
{% endif %}
{% endfor %}
</body>
</html>
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -35,12 +35,15 @@ import getopt
import sys
from subprocess import call
import encoder
sys.path.append(os.path.expandvars('$OPENAIR_DIR/cmake_targets/autotests/tools/'))
#test_cases = ('030001', '030901', '031001', '031601', '031701', '031801', '031901', '032001', '032101', '032201', '032301', '032501', '032601', '032801')
test_cases = ('030030' , '030030' )
test_cases = ('032800' , '032730' )
nb_run = 3
nb_run = 2
def error_opt(msg):
print("Option error: " + msg)
......@@ -58,14 +61,27 @@ def main(args):
# metric = {}
# metric['id'] = 'UE_DLSCH_BITRATE'
# metric['description'] = 'UE downlink physical throughput'
# metric['regex'] = '(UE_DLSCH_BITRATE) =\s+(\d+\.\d+) kbps.+frame = (\d+)\)'
# metric['unit_of_meas'] = 'kbps'
# metric['min_limit'] = 14668.8
#AUTOTEST Metric : RRC Measurments RSRP[0]=-97.60 dBm/RE, RSSI=-72.83 dBm, RSRQ[0] 9.03 dB, N0 -125 dBm/RE, NF 7.2 dB (frame = 4490)
metric = {}
metric['id'] = 'UE_DLSCH_BITRATE'
metric['description'] = 'UE downlink physical throughput'
metric['regex'] = '(UE_DLSCH_BITRATE) =\s+(\d+\.\d+) kbps.+frame = (\d+)\)'
metric['id'] = 'UE_DL_RRC_MEAS'
metric['description'] = 'UE downlink RRC Measurments'
metric['nb_metric'] = 5
# metric['regex'] = 'AUTOTEST Metric : RRC Measurments (RSRP\[0\])=(-?\d+\.?\d*)\s+(.+),\s+(RSRQ\[0\])=(-?\d+\.?\d*)\s+(.+),,\s+(N0)=(-?\d+\.?\d*)\s+(.+),,\s+(NF)=(-?\d+\.?\d*)\s+(.+)\s+\(frame = (\d+)\) '
metric['regex'] = 'AUTOTEST Metric : RRC Measurments (RSRP\[0\])=(-?\d+\.?\d*)\s+(.+)\,\s+(RSSI)=(-?\d+\.?\d*)\s+(.+)\,\s+(RSRQ\[0\])=(-?\d+\.?\d*)\s+(.+)\,\s+(N0)=(-?\d+\.?\d*)\s+(.+)\,\s+(NF)=(-?\d+\.?\d*)\s+(.+)\s+\(frame = (\d+)\)'
metric['unit_of_meas'] = 'kbps'
metric['min_limit'] = 14668.8
#report_path = log_path+'/report/'
#os.system(' mkdir -p ' + report_path)
......@@ -74,58 +90,44 @@ def main(args):
#return(0)
for test_case in test_cases:
# print test_case
if test_case == '030001':
metric['min_limit'] = 500.0
if test_case == '030901':
metric['min_limit'] = 640.0
if test_case == '031001':
metric['min_limit'] = 3200.0
if test_case == '031601':
metric['min_limit'] = 5920.0
if test_case == '031701':
metric['min_limit'] = 6000.0
if test_case == '031801':
metric['min_limit'] = 6200.0
if test_case == '031901':
metric['min_limit'] = 7000.0
if test_case == '032001':
metric['min_limit'] = 7800.0
if test_case == '032101':
metric['min_limit'] = 8000.0
if test_case == '032201':
metric['min_limit'] = 9000.0
if test_case == '032301':
metric['min_limit'] = 10000.0
if test_case == '032501':
metric['min_limit'] = 11000.0
if test_case == '032601':
metric['min_limit'] = 12000.0
if test_case == '032801':
metric['min_limit'] = 12500.0
if test_case == '035201':
metric['min_limit'] = 14668.8
if test_case == '036001':
metric['min_limit'] = 25363.2
test_results = []
for test_case in test_cases:
for i in range(0, nb_run):
fname = 'log//'+test_case+'/run_'+str(i)+'/UE_exec_'+str(i)+'_.log'
fname = '..//log//'+test_case+'/run_'+str(i)+'/UE_exec_'+str(i)+'_.log'
args = {'metric' : metric,
'file' : fname }
cell_synch_status = analyser.check_cell_synchro(fname)
if cell_synch_status == 'CELL_SYNCH':
print '!!!!!!!!!!!!!! Cell synchronized !!!!!!!!!!!'
metric_checks_flag = 0
else :
print '!!!!!!!!!!!!!! Cell NOT NOT synchronized !!!!!!!!!!!'
# cell_synch_status = analyser.check_cell_synchro(fname)
# if cell_synch_status == 'CELL_SYNCH':
# print '!!!!!!!!!!!!!! Cell synchronized !!!!!!!!!!!'
# metric_checks_flag = 0
# else :
# print '!!!!!!!!!!!!!! Cell NOT NOT synchronized !!!!!!!!!!!'
# metric_extracted = analyser.do_extract_metrics(args)
# metrics_extracted = analyser.do_extract_metrics_new(args)
# de-xmlfy test report
xml_file = '..//log//'+test_case+'/test.'+test_case+'_ng.xml'
print xml_file
# test_result =
# test_results.append(test_result)
# xmlFile = logdir_local_testcase + '/test.' + testcasename + '.xml'
# xml="\n<testcase classname=\'"+ testcaseclass + "\' name=\'" + testcasename + "."+tags + "\' Run_result=\'" + test_result_string + "\' time=\'" + str(duration) + " s \' RESULT=\'" + testcase_verdict + "\'></testcase> \n"
# write_file(xmlFile, xml, mode="w")
# xmlFile_ng = logdir_local_testcase + '/test.' + testcasename + '_ng.xml'
# xml_ng = xmlify(test_result, wrap=testcasename, indent=" ")
# write_file(xmlFile_ng, xml_ng, mode="w")
# print "min = "+ str( metric_extracted['metric_min'] )
# print "min_index = "+ str( metric_extracted['metric_min_index'] )
......@@ -143,16 +145,27 @@ def main(args):
# print fname
# analyser.do_img_metrics(metric, metric_extracted, fname)
# fname = 'log//'+test_case+'/run_'+str(i)+'/UE_traffic_'+str(i)+'_.log'
# fname = 'log//'+test_case+'/run_'+str(i)+'/UE_traffic_'+str(i)+'_.log'
# args = {'file' : fname }
# args = {'file' : fname }
# traffic_metrics = analyser.do_extract_traffic_metrics(args)
# traffic_metrics = analyser.do_extract_traffic_metrics(args)
# fname= 'report/iperf_'+test_case+'_'+str(i)+'.png'
# fname= 'report/iperf_'+test_case+'_'+str(i)+'.png'
# print fname
# analyser.do_img_traffic(traffic_metrics, fname)
# print fname
# analyser.do_img_traffic(traffic_metrics, fname)
for test_result in test_results:
cmd = 'mkdir -p ' + report_dir + '/'+ test_result['testcase_name']
result = os.system(cmd)
report_file = report_dir + '/'+ test_result['testcase_name'] + '/'+ test_result['testcase_name']+ '_report.html'
analyser.create_test_report_detailed_html(test_result, report_file )
print test_result
......
......@@ -41,7 +41,7 @@ from jinja2 import Environment, FileSystemLoader
PATH = os.path.dirname(os.path.abspath(__file__))
TEMPLATE_ENVIRONMENT = Environment(
autoescape=False,
loader=FileSystemLoader(os.path.join(PATH, 'templates')),
loader=FileSystemLoader(os.path.join(PATH, '../templates')),
trim_blocks=False)
......@@ -103,6 +103,129 @@ def do_extract_metrics(args):
}
return(ret)
def do_extract_metrics_new(args):
# print ""
# print "do_extract_metrics ... "
fname = args['file']
metric = args['metric']
print(fname)
print 'metric id = ' + metric['id']
print 'metric regex = ' + metric['regex']
count = 0
mmin = 0
mmin_index = 0
mmax = 0
mmax_index = 0
mean = 0
median = 0
toto = [('id', 'S20'), ('metric', np.float), ('frame', np.int)]
print toto
np_format = []
for x in range(0, metric['nb_metric']):
np_format.append( ('id'+str(x), 'S20') )
np_format.append( ('metric'+str(x), np.float) )
np_format.append( ('uom'+str(x), 'S20') )
np_format.append( ('frame', np.int))
print np_format
output = np.fromregex(fname,metric['regex'], np_format)
print output
count = output['frame'].size
print count
if count > 0:
fontP = FontProperties()
fontP.set_size('small')
fig = plt.figure(1)