Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • pasolini/openairinterface5g
  • odukan/openairinterface5g
  • ewa/openairinterface5g
  • deksprime/openairinterface5g
  • jackokie/openairinterface5g
  • Srushti16/openairinterface5g
  • BRodolphe/openairinterface5g
  • kramantas/openairinterface5g
  • suraj_4g5g/openairinterface5g
  • turletti/openairinterface5g
  • anandriisc/openairinterface5g
  • lvguorong/openairinterface5g
  • dast/openairinterface5g
  • yashwanthr/openairinterface5g
  • ajiti2tb/openairinterface5g
  • qzhou/openairinterface5g
  • nickmxxx/openairinterface5g
  • bin_he4/openairinterface5g
  • delarco/openairinterface5g
  • limx1980/openairinterface5g
  • Aniq/openairinterface5g
  • yassir63/openairinterface5g
  • orc318/openairinterface5g
  • vader/openairinterface5g
  • limx59/openairinterface5g
  • nadavaati_12345/openairinterface5g
  • jenshz/openairinterface5g
  • kuldeep/openairinterface5g
  • lurker/openairinterface5g
  • shariat/openairinterface5g
  • Alireza.najafzadeh/openairinterface5g
  • Ling/openairinterface5g
  • EvanKrall/openairinterface5g
  • youyih/openairinterface5g
  • anindya/openairinterface5g
  • ahan/openairinterface5g
  • beraoud/openairinterface5g
  • obejarano/openairinterface5g
  • Monti/openairinterface5g
  • akhamsi/openairinterface5g
  • Worker.N/openairinterface5g
  • zhangtu/openairinterface5g
  • desouza/openairinterface5g
  • zhijun/openairinterface5g
  • sureshkumar/openairinterface5g
  • milan/openairinterface5g
  • bigbangbingo/openairinterface5g
  • platini/openairinterface5g
  • muralir-nv/openairinterface5g
  • Joshua_Zhang/openairinterface5g
  • siddharthmurali1/openairinterface5g
  • sorinros/openairinterface5g
  • elainecao/openairinterface5g
  • sneltved/openairinterface5g
  • aikaterini.trilyraki/openairinterface5g
  • wujunning11/openairinterface5g
  • magounak/openairinterface5g
  • ycl1729020039/openairinterface5g
  • mayukhweb/openairinterface5g
  • wataru/openairinterface5g
  • afonsoli/openairinterface5g
  • ppokar/openairinterface5g
  • emest/openairinterface5g
  • Najib/openairinterface5g
  • liqing/openairinterface5g
  • gprshome/openairinterface5g
  • Dvevgedveccc/openairinterface5g
  • Elena_Lukashova/openairinterface5g
  • imaneouss/openairinterface5g
  • yangyuan/openairinterface5g
  • ycliang/openairinterface5g
  • rohanfds/openairinterface5g
  • cong2008abc/openairinterface5g
  • Giovanni/openairinterface5g
  • willvegapunk/openairinterface5g
  • Chen/openairinterface5g
  • Ella/openairinterface5g
  • kollabalu/openairinterface5g
  • tsaichanglan/openairinterface5g
  • Artifice/openairinterface5g
  • HJR0129/openairinterface5g
  • alextp/openairinterface5g
  • Changron/openairinterface5g
  • pedosb/openairinterface5g
  • Flozzen/openairinterface5g
  • hobei/openairinterface5g
  • WP_Jing/openairinterface5g
  • reset4/openairinterface5g
  • alexjoseph/openairinterface5g
  • latuan1710/openairinterface5g
  • wynter-wang/openairinterface5g
  • stt12706/openairinterface5g
  • sy/openairinterface5g
  • dzxu/openairinterface5g
  • ptizoom/openairinterface5g
  • Thierry/openairinterface5g
  • tjamc80/openairinterface5g
  • yenmuse/openairinterface5g
  • archerling/openairinterface5g
  • grahul/openairinterface5g
  • ashish.shri/openairinterface5g
  • TianyuChen/openairinterface5g
  • cuixf1/openairinterface5g
  • Jan/openairinterface5g
  • jboatenng/openairinterface5g_gpio
  • geokal/openairinterface5g
  • johannhg/openairinterface5g
  • TofunmiA/openairinterface5g
  • razvanursu/openairinterface5g-mac-scheduling
  • Julio/openairinterface5g
  • fredrichx/openairinterface5g
  • nems/openairinterface5g
  • wb_li/openairinterface5g
  • ferrieux/openairinterface5g
  • prajna_g/openairinterface-5-g-xnap-ho
  • mtinasc/openairinterface5g
  • Hofschroeer/openairinterface5g
  • buptxiaofeng/openairinterface5g
  • fjgh_759/openairinterface5g
  • calcel/openairinterface5g
  • Reem/openairinterface5g
  • havar_mind/openairinterface5g
  • shrinish/openairinterface5g
  • YANGHELINDE/openairinterface5g
  • lool/openairinterface5g
  • raghav1900/openairinterface5g
  • allan1201/openairinterface5g
  • ferris/openairinterface5g
  • seanzw/openairinterface5g
  • emad72/openairinterface5g
  • guojilong123/openairinterface5g
  • Rony99/openairinterface5g
  • lity/openairinterface5g
  • sshrivastava/openairinterface5g
  • zhihengzhang/openairinterface5g
  • Rakesh_B_B/openairinterface5g
  • baleeiro/openairinterface5g
  • 19125064/openairinterface5g
  • linlin/openairinterface5g
  • NA1VE/openairinterface5g
  • oai1B/openairinterface5g
  • daveprice/openairinterface5g
  • mo/openairinterface5g
  • dhanmeet/openairinterface5g
  • mv2290/openairinterface-5-g-test
  • pagmatt/openairinterface5g
  • mmTestNYU/openairinterface5g
  • mmezzavilla/openairinterface5g
  • sudhakarb/openairinterface5g
  • mekki/openairinterface5g
  • virtanen/openairinterface5g
  • dyyu/openairinterface5g
  • mohammed_safwan/openairinterface5g
  • venkat/openairinterface5g
  • rupadhya/openairinterface5g
  • adjou/openairinterface5g
  • samiemostafavi/openairinterface5g-edaf
  • Sreeram/openairinterface5g
  • oliverxsch/openairinterface5g
  • oai/openairinterface5g
160 results
Show changes
Showing
with 4933 additions and 157 deletions
#!/bin/groovy
/*
* Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The OpenAirInterface Software Alliance licenses this file to You under
* the OAI Public License, Version 1.1 (the "License"); you may not use this file
* except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.openairinterface.org/?page_id=698
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*-------------------------------------------------------------------------------
* For more information about the OpenAirInterface (OAI) Software Alliance:
* contact@openairinterface.org
*/
// Template Jenkins Declarative Pipeline script to run Test w/ RF HW
// Location of the python executor node shall be in the same subnet as the others servers
def pythonExecutor = params.pythonExecutor
// Location of the test XML file to be run
def testXMLFile = params.pythonTestXmlFile
def mainPythonAllXmlFiles = ""
def buildStageStatus = true
// Name of the test stage
def testStageName = params.pipelineTestStageName
def lockResources = []
if (params.LockResources != null && params.LockResources.trim().length() > 0)
params.LockResources.trim().split(",").each{lockResources += [resource: it.trim()]}
// Global Parameters. Normally they should be populated when the master job
// triggers the slave job with parameters
def eNB_Repository
def eNB_Branch
def eNB_CommitID
def eNB_AllowMergeRequestProcess
def eNB_TargetBranch
//Status fed to the database
def StatusForDb = ""
pipeline {
agent {label pythonExecutor}
options {
ansiColor('xterm')
lock(extra: lockResources)
}
stages {
stage("Build Init") {
steps {
// update the build name and description
buildName "${params.eNB_MR}"
buildDescription "Branch : ${params.eNB_Branch}"
}
}
stage ("Verify Parameters") {
steps {
script {
echo '\u2705 \u001B[32mVerify Parameters\u001B[0m'
def allParametersPresent = true
// It is already to late to check it
if (params.pythonExecutor != null) {
echo "eNB CI executor node : ${pythonExecutor}"
}
// If not present picking a default Stage Name
if (params.pipelineTestStageName == null) {
// picking default
testStageName = 'Template Test Stage'
}
if (params.LockResources == null) {
echo "no LockResources given"
allParametersPresent = false
}
// 1st eNB parameters
if (params.eNB_IPAddress == null) {
allParametersPresent = false
}
if (params.eNB_SourceCodePath == null) {
allParametersPresent = false
}
if (params.eNB_Credentials == null) {
allParametersPresent = false
}
// 2nd eNB parameters
if (params.eNB1_IPAddress == null) {
allParametersPresent = false
}
if (params.eNB1_SourceCodePath == null) {
allParametersPresent = false
}
if (params.eNB1_Credentials == null) {
allParametersPresent = false
}
// 3rd eNB parameters
if (params.eNB2_IPAddress == null) {
allParametersPresent = false
}
if (params.eNB2_SourceCodePath == null) {
allParametersPresent = false
}
if (params.eNB2_Credentials == null) {
allParametersPresent = false
}
if (params.UE_IPAddress == null) {
allParametersPresent = false
}
if (params.UE_SourceCodePath == null) {
allParametersPresent = false
}
if (params.UE_Credentials == null) {
allParametersPresent = false
}
// the following 4 parameters should be pushed by the master trigger
// if not present, take the job GIT variables (used for developing)
if (params.eNB_Repository == null) {
eNB_Repository = env.GIT_URL
} else {
eNB_Repository = params.eNB_Repository
}
echo "eNB_Repository : ${eNB_Repository}"
if (params.eNB_Branch == null) {
eNB_Branch = env.GIT_BRANCH
} else {
eNB_Branch = params.eNB_Branch
}
echo "eNB_Branch : ${eNB_Branch}"
if (params.eNB_CommitID == null) {
eNB_CommitID = env.GIT_COMMIT
} else {
eNB_CommitID = params.eNB_CommitID
}
echo "eNB_CommitID : ${eNB_CommitID}"
if (params.eNB_mergeRequest!= null) {
eNB_AllowMergeRequestProcess = params.eNB_mergeRequest
if (eNB_AllowMergeRequestProcess) {
if (params.eNB_TargetBranch != null) {
eNB_TargetBranch = params.eNB_TargetBranch
} else {
eNB_TargetBranch = 'develop'
}
echo "eNB_TargetBranch : ${eNB_TargetBranch}"
}
}
if (params.EPC_IPAddress == null) {
allParametersPresent = false
}
if (params.EPC_Type == null) {
allParametersPresent = false
}
if (params.EPC_SourceCodePath == null) {
allParametersPresent = false
}
if (params.EPC_Credentials == null) {
allParametersPresent = false
}
if (allParametersPresent) {
echo "All parameters are present"
if (eNB_AllowMergeRequestProcess) {
sh "git fetch"
sh "./ci-scripts/doGitLabMerge.sh --src-branch ${eNB_Branch} --src-commit ${eNB_CommitID} --target-branch ${eNB_TargetBranch} --target-commit latest"
} else {
sh "git fetch"
sh "git checkout -f ${eNB_CommitID}"
}
} else {
echo "Some parameters are missing"
sh "./ci-scripts/fail.sh"
}
}
}
}
stage ("Build and Test") {
steps {
script {
dir ('ci-scripts') {
echo "\u2705 \u001B[32m${testStageName}\u001B[0m"
// If not present picking a default XML file
if (params.pythonTestXmlFile == null) {
// picking default
testXMLFile = 'xml_files/enb_usrpB210_band7_50PRB.xml'
echo "Test XML file(default): ${testXMLFile}"
mainPythonAllXmlFiles += "--XMLTestFile=" + testXMLFile + " "
} else {
String[] myXmlTestSuite = testXMLFile.split("\\r?\\n")
for (xmlFile in myXmlTestSuite) {
if (fileExists(xmlFile)) {
mainPythonAllXmlFiles += "--XMLTestFile=" + xmlFile + " "
echo "Test XML file : ${xmlFile}"
} else {
echo "Test XML file ${xmlFile}: no such file"
}
}
}
withCredentials([
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB_Credentials}", usernameVariable: 'eNB_Username', passwordVariable: 'eNB_Password'],
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB1_Credentials}", usernameVariable: 'eNB1_Username', passwordVariable: 'eNB1_Password'],
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB2_Credentials}", usernameVariable: 'eNB2_Username', passwordVariable: 'eNB2_Password'],
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.EPC_Credentials}", usernameVariable: 'EPC_Username', passwordVariable: 'EPC_Password'],
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.UE_Credentials}", usernameVariable: 'UE_Username', passwordVariable: 'UE_Password']
]) {
sh "python3 main.py --mode=InitiateHtml --ranRepository=${eNB_Repository} --ranBranch=${eNB_Branch} --ranCommitID=${eNB_CommitID} --ranAllowMerge=${eNB_AllowMergeRequestProcess} --ranTargetBranch=${eNB_TargetBranch} ${mainPythonAllXmlFiles}"
String[] myXmlTestSuite = testXMLFile.split("\\r?\\n")
for (xmlFile in myXmlTestSuite) {
if (fileExists(xmlFile)) {
try {
timeout (time: 60, unit: 'MINUTES') {
sh "python3 main.py --mode=TesteNB --ranRepository=${eNB_Repository} --ranBranch=${eNB_Branch} --ranCommitID=${eNB_CommitID} --ranAllowMerge=${eNB_AllowMergeRequestProcess} --ranTargetBranch=${eNB_TargetBranch} --eNBIPAddress=${params.eNB_IPAddress} --eNBUserName=${eNB_Username} --eNBPassword=${eNB_Password} --eNBSourceCodePath=${params.eNB_SourceCodePath} --eNB1IPAddress=${params.eNB1_IPAddress} --eNB1UserName=${eNB1_Username} --eNB1Password=${eNB1_Password} --eNB1SourceCodePath=${params.eNB1_SourceCodePath} --eNB2IPAddress=${params.eNB2_IPAddress} --eNB2UserName=${eNB2_Username} --eNB2Password=${eNB2_Password} --eNB2SourceCodePath=${params.eNB2_SourceCodePath} --UEIPAddress=${params.UE_IPAddress} --UEUserName=${UE_Username} --UEPassword=${UE_Password} --UESourceCodePath=${params.UE_SourceCodePath} --EPCIPAddress=${params.EPC_IPAddress} --EPCType=${params.EPC_Type} --EPCUserName=${EPC_Username} --EPCPassword=${EPC_Password} --EPCSourceCodePath=${params.EPC_SourceCodePath} --XMLTestFile=${xmlFile}"
}
} catch (Exception e) {
currentBuild.result = 'FAILURE'
buildStageStatus = false
}
}
}
sh "python3 main.py --mode=FinalizeHtml --finalStatus=${buildStageStatus} --eNBIPAddress=${params.eNB_IPAddress} --eNBUserName=${eNB_Username} --eNBPassword=${eNB_Password} --UEIPAddress=${params.UE_IPAddress} --UEUserName=${UE_Username} --UEPassword=${UE_Password}"
}
}
}
}
}
stage('Log Collection') {
parallel {
stage('Log Collection (eNB - Build)') {
steps {
withCredentials([
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB_Credentials}", usernameVariable: 'eNB_Username', passwordVariable: 'eNB_Password']
]) {
echo '\u2705 \u001B[32mLog Collection (eNB - Build)\u001B[0m'
sh "python3 ci-scripts/main.py --mode=LogCollectBuild --eNBIPAddress=${params.eNB_IPAddress} --eNBUserName=${eNB_Username} --eNBPassword=${eNB_Password} --eNBSourceCodePath=${params.eNB_SourceCodePath}"
echo '\u2705 \u001B[32mLog Transfer (eNB - Build)\u001B[0m'
sh "sshpass -p \'${eNB_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${eNB_Username}@${params.eNB_IPAddress}:${eNB_SourceCodePath}/cmake_targets/build.log.zip ./build.log.${env.BUILD_ID}.zip || true"
}
script {
if(fileExists("build.log.${env.BUILD_ID}.zip")) {
archiveArtifacts "build.log.${env.BUILD_ID}.zip"
}
}
}
}
stage('Log Collection (OAI UE - Build)') {
steps {
echo '\u2705 \u001B[32mLog Collection (OAI UE - Build)\u001B[0m'
withCredentials([
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.UE_Credentials}", usernameVariable: 'UE_Username', passwordVariable: 'UE_Password']
]) {
sh "python3 ci-scripts/main.py --mode=LogCollectBuild --UEIPAddress=${params.UE_IPAddress} --UEUserName=${UE_Username} --UEPassword=${UE_Password} --UESourceCodePath=${params.UE_SourceCodePath}"
echo '\u2705 \u001B[32mLog Transfer (UE - Build)\u001B[0m'
sh "sshpass -p \'${UE_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${UE_Username}@${params.UE_IPAddress}:${UE_SourceCodePath}/cmake_targets/build.log.zip ./build.log.${env.BUILD_ID}.zip || true"
}
script {
if(fileExists("build.log.${env.BUILD_ID}.zip")) {
archiveArtifacts "build.log.${env.BUILD_ID}.zip"
}
}
}
}
stage('Log Collection (eNB - Run)') {
steps {
withCredentials([
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB_Credentials}", usernameVariable: 'eNB_Username', passwordVariable: 'eNB_Password']
]) {
echo '\u2705 \u001B[32mLog Collection (eNB - Run)\u001B[0m'
sh "python3 ci-scripts/main.py --mode=LogCollecteNB --eNBIPAddress=${params.eNB_IPAddress} --eNBUserName=${eNB_Username} --eNBPassword=${eNB_Password} --eNBSourceCodePath=${params.eNB_SourceCodePath} --BuildId=${env.BUILD_ID}"
echo '\u2705 \u001B[32mLog Transfer (eNB - Run)\u001B[0m'
sh "sshpass -p \'${eNB_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${eNB_Username}@${params.eNB_IPAddress}:${eNB_SourceCodePath}/cmake_targets/enb.log.zip ./enb.log.${env.BUILD_ID}.zip || true"
}
script {
if(fileExists("enb.log.${env.BUILD_ID}.zip")) {
archiveArtifacts "enb.log.${env.BUILD_ID}.zip"
}
}
}
}
stage('Log Collection (OAI UE - Run)') {
steps {
withCredentials([
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.UE_Credentials}", usernameVariable: 'UE_Username', passwordVariable: 'UE_Password']
]) {
echo '\u2705 \u001B[32mLog Collection (OAI UE - Run)\u001B[0m'
sh "python3 ci-scripts/main.py --mode=LogCollectOAIUE --UEIPAddress=${params.UE_IPAddress} --UEUserName=${UE_Username} --UEPassword=${UE_Password} --UESourceCodePath=${params.UE_SourceCodePath}"
echo '\u2705 \u001B[32mLog Transfer (OAI UE - Run)\u001B[0m'
sh "sshpass -p \'${UE_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${UE_Username}@${params.UE_IPAddress}:${UE_SourceCodePath}/cmake_targets/ue.log.zip ./ue.log.${env.BUILD_ID}.zip || true"
}
script {
if(fileExists("ue.log.${env.BUILD_ID}.zip")) {
archiveArtifacts "ue.log.${env.BUILD_ID}.zip"
}
}
}
}
stage('Log Collection (CN)') {
// Bypassing this stage if EPC server is not defined
when {
expression { params.EPC_IPAddress != "none" }
}
steps {
script {
withCredentials([
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.EPC_Credentials}", usernameVariable: 'EPC_Username', passwordVariable: 'EPC_Password']
]) {
echo '\u2705 \u001B[32mLog Collection (HSS)\u001B[0m'
sh "python3 ci-scripts/main.py --mode=LogCollectHSS --EPCIPAddress=${params.EPC_IPAddress} --EPCUserName=${EPC_Username} --EPCPassword=${EPC_Password} --EPCSourceCodePath=${params.EPC_SourceCodePath} --EPCType=${params.EPC_Type}"
if (params.EPC_Type != 'OAICN5G') {
sh "sshpass -p \'${EPC_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${EPC_Username}@${params.EPC_IPAddress}:${EPC_SourceCodePath}/scripts/hss.log.zip ./hss.log.${env.BUILD_ID}.zip || true"
}
echo '\u2705 \u001B[32mLog Collection (MME or AMF)\u001B[0m'
sh "python3 ci-scripts/main.py --mode=LogCollectMME --EPCIPAddress=${params.EPC_IPAddress} --EPCUserName=${EPC_Username} --EPCPassword=${EPC_Password} --EPCSourceCodePath=${params.EPC_SourceCodePath} --EPCType=${params.EPC_Type}"
if (params.EPC_Type == 'OAICN5G') {
sh "sshpass -p \'${EPC_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${EPC_Username}@${params.EPC_IPAddress}:${EPC_SourceCodePath}/scripts/mme.log.zip ./amf.log.${env.BUILD_ID}.zip || true"
} else {
sh "sshpass -p \'${EPC_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${EPC_Username}@${params.EPC_IPAddress}:${EPC_SourceCodePath}/scripts/mme.log.zip ./mme.log.${env.BUILD_ID}.zip || true"
}
echo '\u2705 \u001B[32mLog Collection (SPGW or SMF/UPF)\u001B[0m'
sh "python3 ci-scripts/main.py --mode=LogCollectSPGW --EPCIPAddress=${params.EPC_IPAddress} --EPCUserName=${EPC_Username} --EPCPassword=${EPC_Password} --EPCSourceCodePath=${params.EPC_SourceCodePath} --EPCType=${params.EPC_Type}"
if (params.EPC_Type == 'OAICN5G') {
sh "sshpass -p \'${EPC_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${EPC_Username}@${params.EPC_IPAddress}:${EPC_SourceCodePath}/scripts/spgw.log.zip ./smf-upf.log.${env.BUILD_ID}.zip || true"
} else {
sh "sshpass -p \'${EPC_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${EPC_Username}@${params.EPC_IPAddress}:${EPC_SourceCodePath}/scripts/spgw.log.zip ./spgw.log.${env.BUILD_ID}.zip || true"
}
}
if(fileExists("hss.log.${env.BUILD_ID}.zip")) {
archiveArtifacts "hss.log.${env.BUILD_ID}.zip"
}
if(fileExists("mme.log.${env.BUILD_ID}.zip")) {
archiveArtifacts "mme.log.${env.BUILD_ID}.zip"
}
if(fileExists("spgw.log.${env.BUILD_ID}.zip")) {
archiveArtifacts "spgw.log.${env.BUILD_ID}.zip"
}
if(fileExists("amf.log.${env.BUILD_ID}.zip")) {
archiveArtifacts "amf.log.${env.BUILD_ID}.zip"
}
if(fileExists("smf-upf.log.${env.BUILD_ID}.zip")) {
archiveArtifacts "smf-upf.log.${env.BUILD_ID}.zip"
}
echo '\u2705 \u001B[32mLog Collection for CoreNetwork Done!\u001B[0m'
}
}
}
}
}
}
post {
always {
script {
if(fileExists("ci-scripts/test_results.html")) {
sh "mv ci-scripts/test_results.html test_results-${JOB_NAME}.html"
sh "sed -i -e 's#TEMPLATE_JOB_NAME#${JOB_NAME}#' -e 's@build #TEMPLATE_BUILD_ID@build #${BUILD_ID}@' -e 's#Build-ID: TEMPLATE_BUILD_ID#Build-ID: <a href=\"${BUILD_URL}\">${BUILD_ID}</a>#' -e 's#TEMPLATE_STAGE_NAME#${testStageName}#' test_results-${JOB_NAME}.html"
archiveArtifacts "test_results-${JOB_NAME}.html"
}
if (params.pipelineZipsConsoleLog != null) {
if (params.pipelineZipsConsoleLog) {
echo "Archiving Jenkins console log"
sh "wget --no-check-certificate --no-proxy ${env.JENKINS_URL}/job/${env.JOB_NAME}/${env.BUILD_ID}/consoleText -O consoleText.log || true"
sh "zip -m consoleText.log.${env.BUILD_ID}.zip consoleText.log || true"
if(fileExists("consoleText.log.${env.BUILD_ID}.zip")) {
archiveArtifacts "consoleText.log.${env.BUILD_ID}.zip"
}
}
}
}
}
}
}
#!/bin/groovy
/*
* Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The OpenAirInterface Software Alliance licenses this file to You under
* the OAI Public License, Version 1.1 (the "License"); you may not use this file
* except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.openairinterface.org/?page_id=698
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*-------------------------------------------------------------------------------
* For more information about the OpenAirInterface (OAI) Software Alliance:
* contact@openairinterface.org
*/
// Template Jenkins Declarative Pipeline script to run Test w/ RF HW
// Location of the python executor node shall be in the same subnet as the others servers
def pythonExecutor = params.pythonExecutor
// Location of the test XML file to be run
def testXMLFile = params.pythonTestXmlFile
def mainPythonAllXmlFiles = ""
def buildStageStatus = true
// Name of the test stage
def testStageName = params.pipelineTestStageName
def lockResources = []
if (params.LockResources != null && params.LockResources.trim().length() > 0)
params.LockResources.trim().split(",").each{lockResources += [resource: it.trim()]}
// Terminate Status
def termUE = 0
def termENB = 1
def termSPGW = 2
def termMME = 3
def termHSS = 4
def termStatusArray = new Boolean[termHSS + 1]
termStatusArray[termUE] = false
termStatusArray[termENB] = false
termStatusArray[termSPGW] = false
termStatusArray[termMME] = false
termStatusArray[termHSS] = false
// Global Parameters. Normally they should be populated when the master job
// triggers the slave job with parameters
def eNB_Repository
def eNB_Branch
def eNB_CommitID
def eNB_AllowMergeRequestProcess = false
def eNB_TargetBranch
def flexricOption = ""
pipeline {
agent {
label pythonExecutor
}
options {
ansiColor('xterm')
lock(extra: lockResources)
}
stages {
stage("Build Init") {
steps {
// update the build name and description
buildName "${params.eNB_MR}"
buildDescription "Branch : ${params.eNB_Branch}"
}
}
stage ("Verify Parameters") {
steps {
script {
echo '\u2705 \u001B[32mVerify Parameters\u001B[0m'
def allParametersPresent = true
// It is already to late to check it
if (params.pythonExecutor != null) {
echo "eNB CI executor node : ${pythonExecutor}"
}
// If not present picking a default Stage Name
if (params.pipelineTestStageName == null) {
// picking default
testStageName = 'Template Test Stage'
}
if (params.LockResources == null) {
echo "no LockResources given"
allParametersPresent = false
}
if (params.eNB_IPAddress == null) {
allParametersPresent = false
}
if (params.eNB_SourceCodePath == null) {
allParametersPresent = false
}
if (params.eNB_Credentials == null) {
allParametersPresent = false
}
// the following 4 parameters should be pushed by the master trigger
// if not present, take the job GIT variables (used for developing)
if (params.eNB_Repository == null) {
eNB_Repository = env.GIT_URL
} else {
eNB_Repository = params.eNB_Repository
}
echo "eNB_Repository : ${eNB_Repository}"
if (params.eNB_Branch == null) {
eNB_Branch = env.GIT_BRANCH
} else {
eNB_Branch = params.eNB_Branch
}
echo "eNB_Branch : ${eNB_Branch}"
if (params.eNB_CommitID == null) {
eNB_CommitID = env.GIT_COMMIT
} else {
eNB_CommitID = params.eNB_CommitID
}
echo "eNB_CommitID : ${eNB_CommitID}"
if (params.eNB_mergeRequest != null) {
eNB_AllowMergeRequestProcess = params.eNB_mergeRequest
if (eNB_AllowMergeRequestProcess) {
if (params.eNB_TargetBranch != null) {
eNB_TargetBranch = params.eNB_TargetBranch
} else {
eNB_TargetBranch = 'develop'
}
echo "eNB_TargetBranch : ${eNB_TargetBranch}"
}
}
if (params.EPC_IPAddress == null) {
allParametersPresent = false
}
if (params.EPC_Type == null) {
allParametersPresent = false
}
if (params.EPC_SourceCodePath == null) {
allParametersPresent = false
}
if (params.EPC_Credentials == null) {
allParametersPresent = false
}
if (params.Flexric_Tag != null) {
echo "This pipeline is configured to run with a FlexRIC deployment."
echo "Appending FlexRicTag option to the list of options"
flexricOption = "--FlexRicTag=${params.Flexric_Tag}"
echo "Using new Flexric option: ${flexricOption}"
}
if (allParametersPresent) {
echo "All parameters are present"
if (eNB_AllowMergeRequestProcess) {
sh "git fetch"
sh "./ci-scripts/doGitLabMerge.sh --src-branch ${eNB_Branch} --src-commit ${eNB_CommitID} --target-branch ${eNB_TargetBranch} --target-commit latest"
} else if (eNB_CommitID != 'develop') {
sh "git fetch"
sh "git checkout -f ${eNB_CommitID}"
}
} else {
error "Some parameters are missing"
}
}
}
}
stage ("Build and Test") {
steps {
script {
dir ('ci-scripts') {
echo "\u2705 \u001B[32m${testStageName}\u001B[0m"
// If not present picking a default XML file
if (params.pythonTestXmlFile == null) {
// picking default
testXMLFile = 'xml_files/enb_usrpB210_band7_50PRB.xml'
echo "Test XML file(default): ${testXMLFile}"
mainPythonAllXmlFiles += "--XMLTestFile=" + testXMLFile + " "
} else {
String[] myXmlTestSuite = testXMLFile.split("\\r?\\n")
for (xmlFile in myXmlTestSuite) {
if (fileExists(xmlFile)) {
mainPythonAllXmlFiles += "--XMLTestFile=" + xmlFile + " "
echo "Test XML file : ${xmlFile}"
} else {
echo "Test XML file ${xmlFile}: no such file"
}
}
}
withCredentials([
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB_Credentials}", usernameVariable: 'eNB_Username', passwordVariable: 'eNB_Password'],
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.EPC_Credentials}", usernameVariable: 'EPC_Username', passwordVariable: 'EPC_Password'],
]) {
sh "python3 main.py --mode=InitiateHtml --ranRepository=${eNB_Repository} --ranBranch=${eNB_Branch} --ranCommitID=${eNB_CommitID} --ranAllowMerge=${eNB_AllowMergeRequestProcess} --ranTargetBranch=${eNB_TargetBranch} ${flexricOption} ${mainPythonAllXmlFiles}"
String[] myXmlTestSuite = testXMLFile.split("\\r?\\n")
for (xmlFile in myXmlTestSuite) {
if (fileExists(xmlFile)) {
try {
timeout (time: 60, unit: 'MINUTES') {
sh "python3 main.py --mode=TesteNB --ranRepository=${eNB_Repository} --ranBranch=${eNB_Branch} --ranCommitID=${eNB_CommitID} --ranAllowMerge=${eNB_AllowMergeRequestProcess} --ranTargetBranch=${eNB_TargetBranch} ${flexricOption} --eNBIPAddress=${params.eNB_IPAddress} --eNBUserName=${eNB_Username} --eNBPassword=${eNB_Password} --eNBSourceCodePath=${params.eNB_SourceCodePath} --EPCIPAddress=${params.EPC_IPAddress} --EPCType=${params.EPC_Type} --EPCUserName=${EPC_Username} --EPCPassword=${EPC_Password} --EPCSourceCodePath=${params.EPC_SourceCodePath} --XMLTestFile=${xmlFile}"
}
} catch (Exception e) {
currentBuild.result = 'FAILURE'
buildStageStatus = false
}
}
}
sh "python3 main.py --mode=FinalizeHtml --finalStatus=${buildStageStatus} --eNBIPAddress=${params.eNB_IPAddress} --eNBUserName=${eNB_Username} --eNBPassword=${eNB_Password}"
}
}
}
}
}
stage ("Terminate") {
parallel {
stage('Terminate eNB') {
steps {
echo '\u2705 \u001B[32mTerminate eNB\u001B[0m'
withCredentials([
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB_Credentials}", usernameVariable: 'eNB_Username', passwordVariable: 'eNB_Password']
]) {
sh "python3 ci-scripts/main.py --mode=TerminateeNB --eNBIPAddress=${params.eNB_IPAddress} --eNBUserName=${eNB_Username} --eNBPassword=${eNB_Password}"
}
}
post {
success {
script {
termStatusArray[termENB] = true
}
}
}
}
stage('Terminate SPGW') {
// Bypassing this stage if EPC server is not defined
when {
expression { params.EPC_IPAddress != "none" }
}
steps {
echo '\u2705 \u001B[32mTerminate SPGW\u001B[0m'
withCredentials([
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.EPC_Credentials}", usernameVariable: 'EPC_Username', passwordVariable: 'EPC_Password']
]) {
sh "python3 ci-scripts/main.py --mode=TerminateSPGW --EPCIPAddress=${params.EPC_IPAddress} --EPCUserName=${EPC_Username} --EPCPassword=${EPC_Password} --EPCType=${params.EPC_Type} --EPCSourceCodePath=${params.EPC_SourceCodePath}"
}
}
post {
success {
script {
termStatusArray[termSPGW] = true
}
}
}
}
stage('Terminate MME') {
// Bypassing this stage if EPC server is not defined
when {
expression { params.EPC_IPAddress != "none" }
}
steps {
echo '\u2705 \u001B[32mTerminate MME\u001B[0m'
withCredentials([
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.EPC_Credentials}", usernameVariable: 'EPC_Username', passwordVariable: 'EPC_Password']
]) {
sh "python3 ci-scripts/main.py --mode=TerminateMME --EPCIPAddress=${params.EPC_IPAddress} --EPCUserName=${EPC_Username} --EPCPassword=${EPC_Password} --EPCType=${params.EPC_Type} --EPCSourceCodePath=${params.EPC_SourceCodePath}"
}
}
post {
success {
script {
termStatusArray[termMME] = true
}
}
}
}
stage('Terminate HSS') {
// Bypassing this stage if EPC server is not defined
when {
expression { params.EPC_IPAddress != "none" }
}
steps {
echo '\u2705 \u001B[32mTerminate HSS\u001B[0m'
withCredentials([
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.EPC_Credentials}", usernameVariable: 'EPC_Username', passwordVariable: 'EPC_Password']
]) {
sh "python3 ci-scripts/main.py --mode=TerminateHSS --EPCIPAddress=${params.EPC_IPAddress} --EPCUserName=${EPC_Username} --EPCPassword=${EPC_Password} --EPCType=${params.EPC_Type} --EPCSourceCodePath=${params.EPC_SourceCodePath}"
}
}
post {
success {
script {
termStatusArray[termHSS] = true
}
}
}
}
}
}
stage('Log Collection') {
parallel {
stage('Log Collection (eNB - Build)') {
steps {
withCredentials([
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB_Credentials}", usernameVariable: 'eNB_Username', passwordVariable: 'eNB_Password']
]) {
echo '\u2705 \u001B[32mLog Collection (eNB - Build)\u001B[0m'
sh "python3 ci-scripts/main.py --mode=LogCollectBuild --eNBIPAddress=${params.eNB_IPAddress} --eNBUserName=${eNB_Username} --eNBPassword=${eNB_Password} --eNBSourceCodePath=${params.eNB_SourceCodePath}"
echo '\u2705 \u001B[32mLog Transfer (eNB - Build)\u001B[0m'
sh "sshpass -p \'${eNB_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${eNB_Username}@${params.eNB_IPAddress}:${eNB_SourceCodePath}/cmake_targets/build.log.zip ./build.log.${env.BUILD_ID}.zip || true"
}
script {
if(fileExists("build.log.${env.BUILD_ID}.zip")) {
archiveArtifacts "build.log.${env.BUILD_ID}.zip"
}
}
}
}
stage('Log Collection (eNB - Run)') {
steps {
withCredentials([
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB_Credentials}", usernameVariable: 'eNB_Username', passwordVariable: 'eNB_Password']
]) {
echo '\u2705 \u001B[32mLog Collection (eNB - Run)\u001B[0m'
sh "python3 ci-scripts/main.py --mode=LogCollecteNB --eNBIPAddress=${params.eNB_IPAddress} --eNBUserName=${eNB_Username} --eNBPassword=${eNB_Password} --eNBSourceCodePath=${params.eNB_SourceCodePath} --BuildId=${env.BUILD_ID}"
echo '\u2705 \u001B[32mLog Transfer (eNB - Run)\u001B[0m'
sh "sshpass -p \'${eNB_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${eNB_Username}@${params.eNB_IPAddress}:${eNB_SourceCodePath}/cmake_targets/enb.log.zip ./enb.log.${env.BUILD_ID}.zip || true"
}
script {
if(fileExists("enb.log.${env.BUILD_ID}.zip")) {
archiveArtifacts "enb.log.${env.BUILD_ID}.zip"
}
if(fileExists("ci-scripts/test_results.html")) {
sh "mv ci-scripts/test_results.html test_results-${JOB_NAME}.html"
sh "sed -i -e 's#TEMPLATE_JOB_NAME#${JOB_NAME}#' -e 's@build #TEMPLATE_BUILD_ID@build #${BUILD_ID}@' -e 's#Build-ID: TEMPLATE_BUILD_ID#Build-ID: <a href=\"${BUILD_URL}\">${BUILD_ID}</a>#' -e 's#TEMPLATE_STAGE_NAME#${testStageName}#' test_results-${JOB_NAME}.html"
archiveArtifacts "test_results-${JOB_NAME}.html"
}
}
}
}
stage('Log Collection (SPGW)') {
// Bypassing this stage if EPC server is not defined
when {
expression { params.EPC_IPAddress != "none" }
}
steps {
withCredentials([
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.EPC_Credentials}", usernameVariable: 'EPC_Username', passwordVariable: 'EPC_Password']
]) {
echo '\u2705 \u001B[32mLog Collection (SPGW)\u001B[0m'
sh "python3 ci-scripts/main.py --mode=LogCollectSPGW --EPCIPAddress=${params.EPC_IPAddress} --EPCUserName=${EPC_Username} --EPCPassword=${EPC_Password} --EPCSourceCodePath=${params.EPC_SourceCodePath} --EPCType=${params.EPC_Type}"
echo '\u2705 \u001B[32mLog Transfer (SPGW)\u001B[0m'
sh "sshpass -p \'${EPC_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${EPC_Username}@${params.EPC_IPAddress}:${EPC_SourceCodePath}/scripts/spgw.log.zip ./spgw.log.${env.BUILD_ID}.zip || true"
}
script {
if(fileExists("spgw.log.${env.BUILD_ID}.zip")) {
archiveArtifacts "spgw.log.${env.BUILD_ID}.zip"
}
}
}
}
stage('Log Collection (MME)') {
// Bypassing this stage if EPC server is not defined
when {
expression { params.EPC_IPAddress != "none" }
}
steps {
withCredentials([
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.EPC_Credentials}", usernameVariable: 'EPC_Username', passwordVariable: 'EPC_Password']
]) {
echo '\u2705 \u001B[32mLog Collection (MME)\u001B[0m'
sh "python3 ci-scripts/main.py --mode=LogCollectMME --EPCIPAddress=${params.EPC_IPAddress} --EPCUserName=${EPC_Username} --EPCPassword=${EPC_Password} --EPCSourceCodePath=${params.EPC_SourceCodePath} --EPCType=${params.EPC_Type}"
echo '\u2705 \u001B[32mLog Transfer (MME)\u001B[0m'
sh "sshpass -p \'${EPC_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${EPC_Username}@${params.EPC_IPAddress}:${EPC_SourceCodePath}/scripts/mme.log.zip ./mme.log.${env.BUILD_ID}.zip || true"
}
script {
if(fileExists("mme.log.${env.BUILD_ID}.zip")) {
archiveArtifacts "mme.log.${env.BUILD_ID}.zip"
}
}
}
}
stage('Log Collection (HSS)') {
// Bypassing this stage if EPC server is not defined
when {
expression { params.EPC_IPAddress != "none" }
}
steps {
withCredentials([
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.EPC_Credentials}", usernameVariable: 'EPC_Username', passwordVariable: 'EPC_Password']
]) {
echo '\u2705 \u001B[32mLog Collection (HSS)\u001B[0m'
sh "python3 ci-scripts/main.py --mode=LogCollectHSS --EPCIPAddress=${params.EPC_IPAddress} --EPCUserName=${EPC_Username} --EPCPassword=${EPC_Password} --EPCSourceCodePath=${params.EPC_SourceCodePath} --EPCType=${params.EPC_Type}"
echo '\u2705 \u001B[32mLog Transfer (HSS)\u001B[0m'
sh "sshpass -p \'${EPC_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${EPC_Username}@${params.EPC_IPAddress}:${EPC_SourceCodePath}/scripts/hss.log.zip ./hss.log.${env.BUILD_ID}.zip || true"
}
script {
if(fileExists("hss.log.${env.BUILD_ID}.zip")) {
archiveArtifacts "hss.log.${env.BUILD_ID}.zip"
}
}
}
}
stage('Log Collection (Ping)') {
// Bypassing this stage if EPC server is not defined
when {
expression { params.EPC_IPAddress != "none" }
}
steps {
withCredentials([
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.EPC_Credentials}", usernameVariable: 'EPC_Username', passwordVariable: 'EPC_Password']
]) {
echo '\u2705 \u001B[32mLog Collection (Ping)\u001B[0m'
sh "python3 ci-scripts/main.py --mode=LogCollectPing --EPCIPAddress=${params.EPC_IPAddress} --EPCUserName=${EPC_Username} --EPCPassword=${EPC_Password} --EPCSourceCodePath=${params.EPC_SourceCodePath} --EPCType=${params.EPC_Type}"
echo '\u2705 \u001B[32mLog Transfer (Ping)\u001B[0m'
sh "sshpass -p \'${EPC_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${EPC_Username}@${params.EPC_IPAddress}:${EPC_SourceCodePath}/scripts/ping.log.zip ./ping.log.${env.BUILD_ID}.zip || true"
}
script {
if(fileExists("ping.log.${env.BUILD_ID}.zip")) {
archiveArtifacts "ping.log.${env.BUILD_ID}.zip"
}
}
}
}
stage('Log Collection (Iperf)') {
// Bypassing this stage if EPC server is not defined
when {
expression { params.EPC_IPAddress != "none" }
}
steps {
withCredentials([
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.EPC_Credentials}", usernameVariable: 'EPC_Username', passwordVariable: 'EPC_Password']
]) {
echo '\u2705 \u001B[32mLog Collection (Iperf)\u001B[0m'
sh "python3 ci-scripts/main.py --mode=LogCollectIperf --EPCIPAddress=${params.EPC_IPAddress} --EPCUserName=${EPC_Username} --EPCPassword=${EPC_Password} --EPCSourceCodePath=${params.EPC_SourceCodePath} --EPCType=${params.EPC_Type}"
echo '\u2705 \u001B[32mLog Transfer (Iperf)\u001B[0m'
sh "sshpass -p \'${EPC_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${EPC_Username}@${params.EPC_IPAddress}:${EPC_SourceCodePath}/scripts/iperf.log.zip ./iperf.log.${env.BUILD_ID}.zip || true"
}
script {
if(fileExists("iperf.log.${env.BUILD_ID}.zip")) {
archiveArtifacts "iperf.log.${env.BUILD_ID}.zip"
}
}
}
}
}
}
}
post {
always {
script {
if (params.pipelineZipsConsoleLog != null) {
if (params.pipelineZipsConsoleLog) {
echo "Archiving Jenkins console log"
sh "wget --no-check-certificate --no-proxy ${env.JENKINS_URL}/job/${env.JOB_NAME}/${env.BUILD_ID}/consoleText -O consoleText.log || true"
sh "zip -m consoleText.log.${env.BUILD_ID}.zip consoleText.log || true"
if(fileExists("consoleText.log.${env.BUILD_ID}.zip")) {
archiveArtifacts "consoleText.log.${env.BUILD_ID}.zip"
}
}
}
}
}
// Making sure that we really shutdown every thing before leaving
failure {
script {
if (!termStatusArray[termENB]) {
withCredentials([
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB_Credentials}", usernameVariable: 'eNB_Username', passwordVariable: 'eNB_Password']
]) {
sh "python3 ci-scripts/main.py --mode=TerminateeNB --eNBIPAddress=${params.eNB_IPAddress} --eNBUserName=${eNB_Username} --eNBPassword=${eNB_Password}"
}
}
if ((!termStatusArray[termSPGW]) && (params.EPC_IPAddress != "none")) {
withCredentials([
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.EPC_Credentials}", usernameVariable: 'EPC_Username', passwordVariable: 'EPC_Password']
]) {
sh "python3 ci-scripts/main.py --mode=TerminateSPGW --EPCIPAddress=${params.EPC_IPAddress} --EPCUserName=${EPC_Username} --EPCPassword=${EPC_Password} --EPCType=${params.EPC_Type} --EPCSourceCodePath=${params.EPC_SourceCodePath}"
}
}
if ((!termStatusArray[termMME]) && (params.EPC_IPAddress != "none")) {
withCredentials([
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.EPC_Credentials}", usernameVariable: 'EPC_Username', passwordVariable: 'EPC_Password']
]) {
sh "python3 ci-scripts/main.py --mode=TerminateMME --EPCIPAddress=${params.EPC_IPAddress} --EPCUserName=${EPC_Username} --EPCPassword=${EPC_Password} --EPCType=${params.EPC_Type} --EPCSourceCodePath=${params.EPC_SourceCodePath}"
}
}
if ((!termStatusArray[termHSS]) && (params.EPC_IPAddress != "none")) {
withCredentials([
[$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.EPC_Credentials}", usernameVariable: 'EPC_Username', passwordVariable: 'EPC_Password']
]) {
sh "python3 ci-scripts/main.py --mode=TerminateHSS --EPCIPAddress=${params.EPC_IPAddress} --EPCUserName=${EPC_Username} --EPCPassword=${EPC_Password} --EPCType=${params.EPC_Type} --EPCSourceCodePath=${params.EPC_SourceCodePath}"
}
}
}
}
}
}
# * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
# * contributor license agreements. See the NOTICE file distributed with
# * this work for additional information regarding copyright ownership.
# * The OpenAirInterface Software Alliance licenses this file to You under
# * the OAI Public License, Version 1.1 (the "License"); you may not use this file
# * except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.openairinterface.org/?page_id=698
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *-------------------------------------------------------------------------------
# * For more information about the OpenAirInterface (OAI) Software Alliance:
# * contact@openairinterface.org
# */
#---------------------------------------------------------------------
# Python for CI of OAI-eNB + COTS-UE
#
# Required Python Version
# Python 3.x
#
# Required Python Package
# pexpect
#---------------------------------------------------------------------
#-----------------------------------------------------------
# Import Libs
#-----------------------------------------------------------
import sys # arg
import re # reg
import yaml
import constants as CONST
#-----------------------------------------------------------
# Parsing Command Line Arguements
#-----------------------------------------------------------
def ArgsParse(argvs,CiTestObj,RAN,HTML,EPC,CONTAINERS,HELP,SCA,PHYSIM,CLUSTER):
py_param_file_present = False
py_params={}
force_local = False
while len(argvs) > 1:
myArgv = argvs.pop(1) # 0th is this file's name
#--help
if re.match('^\-\-help$', myArgv, re.IGNORECASE):
HELP.GenericHelp(CONST.Version)
sys.exit(0)
if re.match('^\-\-local$', myArgv, re.IGNORECASE):
force_local = True
#--apply=<filename> as parameters file, to replace inline parameters
elif re.match('^\-\-Apply=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-Apply=(.+)$', myArgv, re.IGNORECASE)
py_params_file = matchReg.group(1)
with open(py_params_file,'r') as file:
# The FullLoader parameter handles the conversion from YAML
# scalar values to Python dictionary format
py_params = yaml.load(file,Loader=yaml.FullLoader)
py_param_file_present = True #to be removed once validated
#AssignParams(py_params) #to be uncommented once validated
#consider inline parameters
elif re.match('^\-\-mode=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-mode=(.+)$', myArgv, re.IGNORECASE)
mode = matchReg.group(1)
elif re.match('^\-\-eNBRepository=(.+)$|^\-\-ranRepository(.+)$', myArgv, re.IGNORECASE):
if re.match('^\-\-eNBRepository=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBRepository=(.+)$', myArgv, re.IGNORECASE)
else:
matchReg = re.match('^\-\-ranRepository=(.+)$', myArgv, re.IGNORECASE)
CiTestObj.ranRepository = matchReg.group(1)
RAN.ranRepository=matchReg.group(1)
HTML.ranRepository=matchReg.group(1)
CONTAINERS.ranRepository=matchReg.group(1)
SCA.ranRepository=matchReg.group(1)
PHYSIM.ranRepository=matchReg.group(1)
CLUSTER.ranRepository=matchReg.group(1)
elif re.match('^\-\-eNB_AllowMerge=(.+)$|^\-\-ranAllowMerge=(.+)$', myArgv, re.IGNORECASE):
if re.match('^\-\-eNB_AllowMerge=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNB_AllowMerge=(.+)$', myArgv, re.IGNORECASE)
else:
matchReg = re.match('^\-\-ranAllowMerge=(.+)$', myArgv, re.IGNORECASE)
doMerge = matchReg.group(1)
if ((doMerge == 'true') or (doMerge == 'True')):
CiTestObj.ranAllowMerge = True
RAN.ranAllowMerge=True
HTML.ranAllowMerge=True
CONTAINERS.ranAllowMerge=True
SCA.ranAllowMerge=True
PHYSIM.ranAllowMerge=True
CLUSTER.ranAllowMerge=True
elif re.match('^\-\-eNBBranch=(.+)$|^\-\-ranBranch=(.+)$', myArgv, re.IGNORECASE):
if re.match('^\-\-eNBBranch=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBBranch=(.+)$', myArgv, re.IGNORECASE)
else:
matchReg = re.match('^\-\-ranBranch=(.+)$', myArgv, re.IGNORECASE)
CiTestObj.ranBranch = matchReg.group(1)
RAN.ranBranch=matchReg.group(1)
HTML.ranBranch=matchReg.group(1)
CONTAINERS.ranBranch=matchReg.group(1)
SCA.ranBranch=matchReg.group(1)
PHYSIM.ranBranch=matchReg.group(1)
CLUSTER.ranBranch=matchReg.group(1)
elif re.match('^\-\-eNBCommitID=(.*)$|^\-\-ranCommitID=(.*)$', myArgv, re.IGNORECASE):
if re.match('^\-\-eNBCommitID=(.*)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBCommitID=(.*)$', myArgv, re.IGNORECASE)
else:
matchReg = re.match('^\-\-ranCommitID=(.*)$', myArgv, re.IGNORECASE)
CiTestObj.ranCommitID = matchReg.group(1)
RAN.ranCommitID=matchReg.group(1)
HTML.ranCommitID=matchReg.group(1)
CONTAINERS.ranCommitID=matchReg.group(1)
SCA.ranCommitID=matchReg.group(1)
PHYSIM.ranCommitID=matchReg.group(1)
CLUSTER.ranCommitID=matchReg.group(1)
elif re.match('^\-\-eNBTargetBranch=(.*)$|^\-\-ranTargetBranch=(.*)$', myArgv, re.IGNORECASE):
if re.match('^\-\-eNBTargetBranch=(.*)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBTargetBranch=(.*)$', myArgv, re.IGNORECASE)
else:
matchReg = re.match('^\-\-ranTargetBranch=(.*)$', myArgv, re.IGNORECASE)
CiTestObj.ranTargetBranch = matchReg.group(1)
RAN.ranTargetBranch=matchReg.group(1)
HTML.ranTargetBranch=matchReg.group(1)
CONTAINERS.ranTargetBranch=matchReg.group(1)
SCA.ranTargetBranch=matchReg.group(1)
PHYSIM.ranTargetBranch=matchReg.group(1)
CLUSTER.ranTargetBranch=matchReg.group(1)
elif re.match('^\-\-eNBIPAddress=(.+)$|^\-\-eNB[1-2]IPAddress=(.+)$', myArgv, re.IGNORECASE):
if re.match('^\-\-eNBIPAddress=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBIPAddress=(.+)$', myArgv, re.IGNORECASE)
RAN.eNBIPAddress=matchReg.group(1)
CONTAINERS.eNBIPAddress=matchReg.group(1)
SCA.eNBIPAddress=matchReg.group(1)
PHYSIM.eNBIPAddress=matchReg.group(1)
CLUSTER.eNBIPAddress=matchReg.group(1)
elif re.match('^\-\-eNB1IPAddress=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNB1IPAddress=(.+)$', myArgv, re.IGNORECASE)
RAN.eNB1IPAddress=matchReg.group(1)
CONTAINERS.eNB1IPAddress=matchReg.group(1)
elif re.match('^\-\-eNB2IPAddress=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNB2IPAddress=(.+)$', myArgv, re.IGNORECASE)
RAN.eNB2IPAddress=matchReg.group(1)
CONTAINERS.eNB2IPAddress=matchReg.group(1)
elif re.match('^\-\-eNBUserName=(.+)$|^\-\-eNB[1-2]UserName=(.+)$', myArgv, re.IGNORECASE):
if re.match('^\-\-eNBUserName=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBUserName=(.+)$', myArgv, re.IGNORECASE)
RAN.eNBUserName=matchReg.group(1)
CONTAINERS.eNBUserName=matchReg.group(1)
SCA.eNBUserName=matchReg.group(1)
PHYSIM.eNBUserName=matchReg.group(1)
CLUSTER.eNBUserName=matchReg.group(1)
elif re.match('^\-\-eNB1UserName=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNB1UserName=(.+)$', myArgv, re.IGNORECASE)
RAN.eNB1UserName=matchReg.group(1)
CONTAINERS.eNB1UserName=matchReg.group(1)
elif re.match('^\-\-eNB2UserName=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNB2UserName=(.+)$', myArgv, re.IGNORECASE)
RAN.eNB2UserName=matchReg.group(1)
CONTAINERS.eNB2UserName=matchReg.group(1)
elif re.match('^\-\-eNBPassword=(.+)$|^\-\-eNB[1-2]Password=(.+)$', myArgv, re.IGNORECASE):
if re.match('^\-\-eNBPassword=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBPassword=(.+)$', myArgv, re.IGNORECASE)
RAN.eNBPassword=matchReg.group(1)
CONTAINERS.eNBPassword=matchReg.group(1)
SCA.eNBPassword=matchReg.group(1)
PHYSIM.eNBPassword=matchReg.group(1)
CLUSTER.eNBPassword=matchReg.group(1)
elif re.match('^\-\-eNB1Password=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNB1Password=(.+)$', myArgv, re.IGNORECASE)
RAN.eNB1Password=matchReg.group(1)
CONTAINERS.eNB1Password=matchReg.group(1)
elif re.match('^\-\-eNB2Password=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNB2Password=(.+)$', myArgv, re.IGNORECASE)
RAN.eNB2Password=matchReg.group(1)
CONTAINERS.eNB2Password=matchReg.group(1)
elif re.match('^\-\-eNBSourceCodePath=(.+)$|^\-\-eNB[1-2]SourceCodePath=(.+)$', myArgv, re.IGNORECASE):
if re.match('^\-\-eNBSourceCodePath=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNBSourceCodePath=(.+)$', myArgv, re.IGNORECASE)
RAN.eNBSourceCodePath=matchReg.group(1)
CONTAINERS.eNBSourceCodePath=matchReg.group(1)
SCA.eNBSourceCodePath=matchReg.group(1)
PHYSIM.eNBSourceCodePath=matchReg.group(1)
CLUSTER.eNBSourceCodePath=matchReg.group(1)
EPC.eNBSourceCodePath=matchReg.group(1)
elif re.match('^\-\-eNB1SourceCodePath=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNB1SourceCodePath=(.+)$', myArgv, re.IGNORECASE)
RAN.eNB1SourceCodePath=matchReg.group(1)
CONTAINERS.eNB1SourceCodePath=matchReg.group(1)
elif re.match('^\-\-eNB2SourceCodePath=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-eNB2SourceCodePath=(.+)$', myArgv, re.IGNORECASE)
RAN.eNB2SourceCodePath=matchReg.group(1)
CONTAINERS.eNB2SourceCodePath=matchReg.group(1)
elif re.match('^\-\-EPCIPAddress=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-EPCIPAddress=(.+)$', myArgv, re.IGNORECASE)
EPC.IPAddress=matchReg.group(1)
elif re.match('^\-\-EPCUserName=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-EPCUserName=(.+)$', myArgv, re.IGNORECASE)
EPC.UserName=matchReg.group(1)
elif re.match('^\-\-EPCPassword=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-EPCPassword=(.+)$', myArgv, re.IGNORECASE)
EPC.Password=matchReg.group(1)
elif re.match('^\-\-EPCSourceCodePath=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-EPCSourceCodePath=(.+)$', myArgv, re.IGNORECASE)
EPC.SourceCodePath=matchReg.group(1)
elif re.match('^\-\-EPCType=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-EPCType=(.+)$', myArgv, re.IGNORECASE)
if re.match('OAI', matchReg.group(1), re.IGNORECASE) or re.match('ltebox', matchReg.group(1), re.IGNORECASE) or re.match('OAI-Rel14-Docker', matchReg.group(1), re.IGNORECASE) or re.match('OC-OAI-CN5G', matchReg.group(1), re.IGNORECASE):
EPC.Type=matchReg.group(1)
else:
sys.exit('Invalid EPC Type: ' + matchReg.group(1) + ' -- (should be OAI or ltebox or OAI-Rel14-Docker or OC-OAI-CN5G)')
elif re.match('^\-\-EPCContainerPrefix=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-EPCContainerPrefix=(.+)$', myArgv, re.IGNORECASE)
EPC.ContainerPrefix=matchReg.group(1)
elif re.match('^\-\-XMLTestFile=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-XMLTestFile=(.+)$', myArgv, re.IGNORECASE)
CiTestObj.testXMLfiles.append(matchReg.group(1))
HTML.testXMLfiles.append(matchReg.group(1))
HTML.nbTestXMLfiles=HTML.nbTestXMLfiles+1
elif re.match('^\-\-UEIPAddress=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-UEIPAddress=(.+)$', myArgv, re.IGNORECASE)
CiTestObj.UEIPAddress = matchReg.group(1)
elif re.match('^\-\-UEUserName=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-UEUserName=(.+)$', myArgv, re.IGNORECASE)
CiTestObj.UEUserName = matchReg.group(1)
elif re.match('^\-\-UEPassword=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-UEPassword=(.+)$', myArgv, re.IGNORECASE)
CiTestObj.UEPassword = matchReg.group(1)
elif re.match('^\-\-UESourceCodePath=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-UESourceCodePath=(.+)$', myArgv, re.IGNORECASE)
CiTestObj.UESourceCodePath = matchReg.group(1)
elif re.match('^\-\-finalStatus=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-finalStatus=(.+)$', myArgv, re.IGNORECASE)
finalStatus = matchReg.group(1)
if ((finalStatus == 'true') or (finalStatus == 'True')):
CiTestObj.finalStatus = True
elif re.match('^\-\-OCUserName=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-OCUserName=(.+)$', myArgv, re.IGNORECASE)
PHYSIM.OCUserName = matchReg.group(1)
CLUSTER.OCUserName = matchReg.group(1)
EPC.OCUserName = matchReg.group(1)
elif re.match('^\-\-OCPassword=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-OCPassword=(.+)$', myArgv, re.IGNORECASE)
PHYSIM.OCPassword = matchReg.group(1)
CLUSTER.OCPassword = matchReg.group(1)
EPC.OCPassword = matchReg.group(1)
elif re.match('^\-\-OCProjectName=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-OCProjectName=(.+)$', myArgv, re.IGNORECASE)
PHYSIM.OCProjectName = matchReg.group(1)
CLUSTER.OCProjectName = matchReg.group(1)
EPC.OCProjectName = matchReg.group(1)
elif re.match('^\-\-OCUrl=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-OCUrl=(.+)$', myArgv, re.IGNORECASE)
CLUSTER.OCUrl = matchReg.group(1)
elif re.match('^\-\-OCRegistry=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-OCRegistry=(.+)$', myArgv, re.IGNORECASE)
CLUSTER.OCRegistry = matchReg.group(1)
elif re.match('^\-\-BuildId=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-BuildId=(.+)$', myArgv, re.IGNORECASE)
RAN.BuildId = matchReg.group(1)
elif re.match('^\-\-FlexRicTag=(.+)$', myArgv, re.IGNORECASE):
matchReg = re.match('^\-\-FlexRicTag=(.+)$', myArgv, re.IGNORECASE)
CONTAINERS.flexricTag = matchReg.group(1)
else:
HELP.GenericHelp(CONST.Version)
sys.exit('Invalid Parameter: ' + myArgv)
return py_param_file_present, py_params, mode, force_local
/* UE simulator configuration file version 2021-06-17
* LTE / 5G Non StandAlone
* Copyright (C) 2019-2021 Amarisoft
*/
{
#define N_ANTENNA_DL 1
#define TDD 1
log_options: "all.level=warn,all.max_size=0,nas.level=debug,nas.max_size=1,rrc.level=debug,rrc.max_size=1",
log_filename: "/tmp/ue1.log",
/* Enable remote API and Web interface */
com_addr: "0.0.0.0:9002",
include "rf_driver/1chan.cfg",
/* If true, allow the simulation of several UEs at the same time and
allow dynamic UE creation from remote API */
cell_groups: [{
group_type: "nr",
multi_ue: true,
cells: [{
rf_port: 0,
bandwidth: 20,
#if TDD == 1
band: 78,
dl_nr_arfcn:627360,
ssb_nr_arfcn:627360,
#else
band: 7,
dl_nr_arfcn: 536020,
ssb_nr_arfcn: 535930,
ssb_subcarrier_spacing: 15,
#endif
subcarrier_spacing: 30,
n_antenna_dl: N_ANTENNA_DL,
n_antenna_ul: 1,
rx_to_tx_latency:2,
}],
}],
ue_list: [
{
/* UE capabilities */
/* USIM data */
"ue_id" : 1,
"imsi": "001010000000100",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
/* Enable it to create a TUN interface for each UE PDN */
tun_setup_script: "ue-ifup",
},
{
/* UE capabilities */
/* USIM data */
"ue_id" : 2,
"imsi": "001010000000101",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
/* Enable it to create a TUN interface for each UE PDN */
tun_setup_script: "ue-ifup",
},
{
/* UE capabilities */
/* USIM data */
"ue_id" : 3,
"imsi": "001010000000102",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, sd: 66051, }, ],
default_pdu_session_snssai: { sst: 1, sd: 66051, },
/* Enable it to create a TUN interface for each UE PDN */
tun_setup_script: "ue-ifup",
},
{
/* UE capabilities */
/* USIM data */
"ue_id" : 4,
"imsi": "001010000000103",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, sd: 66051, }, ],
default_pdu_session_snssai: { sst: 1, sd: 66051, },
/* Enable it to create a TUN interface for each UE PDN */
tun_setup_script: "ue-ifup",
},
{
/* UE capabilities */
/* USIM data */
"ue_id" : 5,
"imsi": "001010000000104",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, sd: 66051, }, ],
default_pdu_session_snssai: { sst: 1, sd: 66051, },
/* Enable it to create a TUN interface for each UE PDN */
tun_setup_script: "ue-ifup",
},
{
/* UE capabilities */
/* USIM data */
"ue_id" : 6,
"imsi": "001010000000105",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, sd: 66051, }, ],
default_pdu_session_snssai: { sst: 1, sd: 66051, },
/* Enable it to create a TUN interface for each UE PDN */
tun_setup_script: "ue-ifup",
},
{
/* UE capabilities */
/* USIM data */
"ue_id" : 7,
"imsi": "001010000000106",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, sd: 66051, }, ],
default_pdu_session_snssai: { sst: 1, sd: 66051, },
/* Enable it to allow sim_events to be handled remotely */
//rue_addr: "127.1.0.0",
/* Enable it to create a TUN interface for each UE PDN */
tun_setup_script: "ue-ifup",
},
],
}
/* UE simulator configuration file version 2021-06-17
* LTE / 5G Non StandAlone
* Copyright (C) 2019-2021 Amarisoft
*/
{
#define N_ANTENNA_DL 1
#define N_ANTENNA_UL 1
#define DL_ARFCN 631296
#define TDD 1
log_options: "all.level=warn,all.max_size=0,nas.level=debug,nas.max_size=1,rrc.level=debug,rrc.max_size=1",
log_filename: "/tmp/ue.log",
/* Enable remote API and Web interface */
com_addr: "0.0.0.0:9002",
include "rf_driver_20/1chan.cfg",
/* If true, allow the simulation of several UEs at the same time and
allow dynamic UE creation from remote API */
cell_groups: [{
group_type: "nr",
multi_ue: true,
ldpc_max_its:8,
cells: [{
rf_port: 0,
bandwidth: 20,
band: 78,
dl_nr_arfcn: DL_ARFCN,
ssb_nr_arfcn: DL_ARFCN,
subcarrier_spacing: 30,
n_antenna_dl: N_ANTENNA_DL,
n_antenna_ul: N_ANTENNA_UL,
rx_to_tx_latency:2,
global_timing_advance:-1,
}],
}],
/* UE capabilities */
/* USIM data */
ue_list: [
{
"ue_id" : 1,
"imsi": "001020000000001",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 2,
"imsi": "001020000000002",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 3,
"imsi": "001020000000003",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 4,
"imsi": "001020000000004",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 5,
"imsi": "001020000000005",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 6,
"imsi": "001020000000006",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 7,
"imsi": "001020000000007",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 8,
"imsi": "001020000000008",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 9,
"imsi": "001020000000009",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 10,
"imsi": "001020000000010",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 11,
"imsi": "001020000000011",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 12,
"imsi": "001020000000012",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 13,
"imsi": "001020000000013",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 14,
"imsi": "001020000000014",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 15,
"imsi": "001020000000015",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 16,
"imsi": "001020000000016",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
],
}
/* UE simulator configuration file version 2021-06-17
* LTE / 5G Non StandAlone
* Copyright (C) 2019-2021 Amarisoft
*/
{
#define N_ANTENNA_DL 2
#define N_ANTENNA_UL 2
#define DL_ARFCN 631296
#define TDD 1
log_options: "all.level=warn,all.max_size=0,nas.level=debug,nas.max_size=1,rrc.level=debug,rrc.max_size=1",
log_filename: "/tmp/ue.log",
/* Enable remote API and Web interface */
com_addr: "0.0.0.0:9002",
include "rf_driver/1chan.cfg",
/* If true, allow the simulation of several UEs at the same time and
allow dynamic UE creation from remote API */
cell_groups: [{
group_type: "nr",
multi_ue: true,
cells: [{
rf_port: 0,
bandwidth: 20,
band: 78,
dl_nr_arfcn: DL_ARFCN,
ssb_nr_arfcn: DL_ARFCN,
subcarrier_spacing: 30,
n_antenna_dl: N_ANTENNA_DL,
n_antenna_ul: N_ANTENNA_UL,
rx_to_tx_latency:2,
}],
}],
/* UE capabilities */
/* USIM data */
ue_list: [
{
"ue_id" : 1,
"imsi": "001020000000001",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 2,
"imsi": "001020000000002",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 3,
"imsi": "001020000000003",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 4,
"imsi": "001020000000004",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 5,
"imsi": "001020000000005",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 6,
"imsi": "001020000000006",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 7,
"imsi": "001020000000007",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 8,
"imsi": "001020000000008",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 9,
"imsi": "001020000000009",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 10,
"imsi": "001020000000010",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 11,
"imsi": "001020000000011",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 12,
"imsi": "001020000000012",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 13,
"imsi": "001020000000013",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 14,
"imsi": "001020000000014",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 15,
"imsi": "001020000000015",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
{
"ue_id" : 16,
"imsi": "001020000000016",
"K": "fec86ba6eb707ed08905757b1bb44b8f",
"sim_algo":"milenage",
"op": "1006020f0a478bf6b699f15c062e42b3",
as_release: 15,
ue_category: "nr",
apn:"oai",
attach_pdn_type:"ipv4",
default_nssai: [ { sst: 1, }, ],
default_pdu_session_snssai: { sst: 1, },
tun_setup_script: "ue-ifup",
},
],
}
#!/bin/bash
ue_id="$1" # UE id
duration="$2" # Sim duration
shift
shift
function end
{
exit 0
}
trap end INT TERM
echo "ip netns exec $ue_id $@ > /tmp/test_${ue_id}.log"
ip netns exec $ue_id $@ > /tmp/test_$ue_id.log
# OAI is using a style that is similar to the Google style
--style=google
# long options can be written without the preceding '--'
# Convert tabs to spaces
convert-tabs
# Indentation is 2 spaces
indent=spaces=2
# Indent 'switch' blocks so that the 'case X:' statements are indented in the switch block.
indent-switches
# Indent C++ comments beginning in column one.
indent-col1-comments
# Pad empty lines around header blocks
break-blocks
delete-empty-lines
# Attach a pointer or reference operator (*, &, or ^) to the variable name (right)
align-pointer=name
# The code line length is 200 characters/columns
max-code-length=200
break-after-logical
lineend=linux
#!/bin/bash
function usage {
echo "OAI Local Build Check script"
echo " Original Author: Raphael Defosseux"
echo ""
echo "Usage:"
echo "------"
echo " buildLocally.sh [OPTIONS]"
echo ""
echo "Options:"
echo "--------"
echo " --workspace #### OR -ws ####"
echo " Specify the workspace"
echo ""
echo " --help OR -h"
echo " Print this help message."
echo ""
}
if [ $# -ne 2 ] && [ $# -ne 1 ]
then
echo "Syntax Error: not the correct number of arguments"
echo ""
usage
exit 1
fi
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
-h|--help)
shift
usage
exit 0
;;
-ws|--workspace)
JENKINS_WKSP="$2"
shift
shift
;;
*)
echo "Syntax Error: unknown option: $key"
echo ""
usage
exit 1
esac
done
cd $JENKINS_WKSP
############################################################
# Creating a tmp folder to store results and artifacts
############################################################
if [ ! -d $JENKINS_WKSP/archives ]
then
mkdir $JENKINS_WKSP/archives
fi
source oaienv
cd $JENKINS_WKSP/cmake_targets
############################################################
# Building eNb with USRP option
############################################################
ARCHIVES_LOC=$JENKINS_WKSP/archives/enb_usrp
if [ ! -d $ARCHIVES_LOC ]
then
mkdir $ARCHIVES_LOC
fi
./build_oai --eNB -w USRP -c
# Generated log files:
if [ -f $JENKINS_WKSP/cmake_targets/log/lte-softmodem.Rel14.txt ]
then
cp $JENKINS_WKSP/cmake_targets/log/lte-softmodem.Rel14.txt $ARCHIVES_LOC
fi
if [ -f $JENKINS_WKSP/cmake_targets/log/params_libconfig.Rel14.txt ]
then
cp $JENKINS_WKSP/cmake_targets/log/params_libconfig.Rel14.txt $ARCHIVES_LOC
fi
if [ -f $JENKINS_WKSP/cmake_targets/log/coding.Rel14.txt ]
then
cp $JENKINS_WKSP/cmake_targets/log/coding.Rel14.txt $ARCHIVES_LOC
fi
if [ -f $JENKINS_WKSP/cmake_targets/log/oai_usrpdevif.Rel14.txt ]
then
cp $JENKINS_WKSP/cmake_targets/log/oai_usrpdevif.Rel14.txt $ARCHIVES_LOC
fi
############################################################
# Building basic simulator
############################################################
ARCHIVES_LOC=$JENKINS_WKSP/archives/basic_sim
if [ ! -d $ARCHIVES_LOC ]
then
mkdir $ARCHIVES_LOC
fi
cd $JENKINS_WKSP/cmake_targets
./build_oai --basic-simulator -c
# Generated log files:
if [ -f $JENKINS_WKSP/cmake_targets/log/basic_simulator_enb.txt ]
then
cp $JENKINS_WKSP/cmake_targets/log/basic_simulator_enb.txt $ARCHIVES_LOC
fi
if [ -f $JENKINS_WKSP/cmake_targets/log/basic_simulator_ue.txt ]
then
cp $JENKINS_WKSP/cmake_targets/log/basic_simulator_ue.txt $ARCHIVES_LOC
fi
if [ -f $JENKINS_WKSP/cmake_targets/log/conf2uedata.Rel14.txt ]
then
cp $JENKINS_WKSP/cmake_targets/log/conf2uedata.Rel14.txt $ARCHIVES_LOC
fi
############################################################
# Creating a zip for Jenkins archiving
############################################################
cd $JENKINS_WKSP/archives/
zip -r local_build_logs.zip basic_sim enb_usrp
#!/bin/bash
#/*
# * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
# * contributor license agreements. See the NOTICE file distributed with
# * this work for additional information regarding copyright ownership.
# * The OpenAirInterface Software Alliance licenses this file to You under
# * the OAI Public License, Version 1.1 (the "License"); you may not use this file
# * except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.openairinterface.org/?page_id=698
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *-------------------------------------------------------------------------------
# * For more information about the OpenAirInterface (OAI) Software Alliance:
# * contact@openairinterface.org
# */
function usage {
echo "OAI Warning Check script"
echo " Original Author: Raphael Defosseux"
echo ""
echo "Usage:"
echo "------"
echo " checkAddedWarnings.sh [OPTIONS]"
echo ""
echo "Options:"
echo "--------"
echo " --src-branch #### OR -sb ####"
echo " Specify the source branch of the merge request."
echo ""
echo " --target-branch #### OR -tb ####"
echo " Specify the target branch of the merge request (usually develop)."
echo ""
echo " --help OR -h"
echo " Print this help message."
echo ""
}
if [ $# -ne 4 ] && [ $# -ne 1 ]
then
echo "Syntax Error: not the correct number of arguments"
echo ""
usage
exit 1
fi
checker=0
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
-h|--help)
shift
usage
exit 0
;;
-sb|--src-branch)
SOURCE_BRANCH="$2"
let "checker|=0x1"
shift
shift
;;
-tb|--target-branch)
TARGET_BRANCH="$2"
let "checker|=0x2"
shift
shift
;;
*)
echo "Syntax Error: unknown option: $key"
echo ""
usage
exit 1
esac
done
if [ $checker -ne 3 ]
then
echo "Source Branch is : $SOURCE_BRANCH"
echo "Target Branch is : $TARGET_BRANCH"
echo ""
echo "Syntax Error: missing option"
echo ""
usage
exit 1
fi
# Merge request scenario
MERGE_COMMMIT=`git log -n1 --pretty=format:%H`
TARGET_INIT_COMMIT=`cat .git/refs/remotes/origin/$TARGET_BRANCH`
echo " ---- Checking the modified files by the merge request ----"
echo ""
echo "Source Branch is : $SOURCE_BRANCH"
echo "Target Branch is : $TARGET_BRANCH"
echo "Merged Commit is : $MERGE_COMMMIT"
echo "Target Init is : $TARGET_INIT_COMMIT"
# Retrieve the list of modified files since the latest develop commit
MODIFIED_FILES=`git log $TARGET_INIT_COMMIT..$MERGE_COMMMIT --oneline --name-status | grep -E "^M|^A" | sed -e "s@^M\t*@@" -e "s@^A\t*@@" | sort | uniq`
NB_WARNINGS_FILES=0
# Retrieve list of warnings
LIST_WARNING_FILES=`grep -E "error:|warning:" archives/*/*.txt | grep -E -v "jobserver unavailable|Clock skew detected." | sed -e "s#^.*/home/ubuntu/tmp/##" -e "s#^.*/tmp/CI-eNB/##" -e "s#common/utils/.*/itti#common/utils/itti#" | awk -F ":" '{print $1}' | sort | uniq`
echo ""
echo "List of files that have been modified by the Merge Request AND"
echo " that have compilation warnings/errors"
echo "--------------------------------------------------------------------"
declare -a ARRAYNAME
for FULLFILE in $MODIFIED_FILES
do
filename=$(basename -- "$FULLFILE")
EXT="${filename##*.}"
if [ $EXT = "c" ] || [ $EXT = "h" ] || [ $EXT = "cpp" ] || [ $EXT = "hpp" ]
then
for WARNING_FILE in $LIST_WARNING_FILES
do
if [ $FULLFILE = $WARNING_FILE ]
then
echo $WARNING_FILE
ARRAYNAME[$NB_WARNINGS_FILES]=$WARNING_FILE
NB_WARNINGS_FILES=$((NB_WARNINGS_FILES + 1))
fi
done
fi
done
echo ""
echo "NB Files impacted by warnings/errors in Merge Request: $NB_WARNINGS_FILES"
echo $NB_WARNINGS_FILES > oai_warning_files.txt
echo ${ARRAYNAME[*]} > oai_warning_files_list.txt
exit 0
#!/bin/bash
#/*
# * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
# * contributor license agreements. See the NOTICE file distributed with
# * this work for additional information regarding copyright ownership.
# * The OpenAirInterface Software Alliance licenses this file to You under
# * the OAI Public License, Version 1.1 (the "License"); you may not use this file
# * except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.openairinterface.org/?page_id=698
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *-------------------------------------------------------------------------------
# * For more information about the OpenAirInterface (OAI) Software Alliance:
# * contact@openairinterface.org
# */
function usage {
echo "OAI Coding / Formatting Guideline Check script"
echo " Original Author: Raphael Defosseux"
echo ""
echo " Requirement: astyle shall be installed"
echo ""
echo " By default (no options) the complete repository will be checked"
echo " In case of merge request, provided source and target branch,"
echo " the script will check only the modified files"
......@@ -37,11 +55,35 @@ fi
if [ $# -eq 0 ]
then
echo " ---- Checking the whole repository ----"
echo ""
NB_FILES_TO_FORMAT=`astyle --dry-run --options=ci-scripts/astyle-options.txt --recursive *.c *.h | grep -c Formatted `
echo "Nb Files that do NOT follow OAI rules: $NB_FILES_TO_FORMAT"
echo $NB_FILES_TO_FORMAT > ./oai_rules_result.txt
# in this file we previously had a list of files that were not properly
# formatted. At the time of this MR, the Jenkinsfile expects this file, so
# we simply produce an empty one
touch ./oai_rules_result.txt
# Testing Circular Dependencies protection
awk '/#[ \t]*ifndef/ { gsub("^.*ifndef *",""); if (names[$1]!="") print "files with same {define ", FILENAME, names[$1]; names[$1]=FILENAME } /#[ \t]*define/ { gsub("^.*define *",""); if(names[$1]!=FILENAME) print "error in declaration", FILENAME, $1, names[$1]; nextfile }' `find openair* common targets executables -name *.h |grep -v LFDS` > header-files-w-incorrect-define.txt
# Testing if explicit GNU GPL license banner
grep -E -irl --exclude-dir=.git --include=*.cpp --include=*.c --include=*.h "General Public License" . | grep -E -v "openair3/NAS/COMMON/milenage.h" > files-w-gnu-gpl-license-banner.txt
# Looking at exotic/suspect banner
LIST_OF_FILES_W_BANNER=`grep -E -irl --exclude-dir=.git --include=*.cpp --include=*.c --include=*.h "Copyright|copyleft" .`
if [ -f ./files-w-suspect-banner.txt ]; then rm -f ./files-w-suspect-banner.txt; fi
for FILE in $LIST_OF_FILES_W_BANNER
do
IS_NFAPI=`echo $FILE | grep -E -c "nfapi/open-nFAPI|nfapi/oai_integration/vendor_ext" || true`
IS_OAI_LICENCE_PRESENT=`grep -E -c "OAI Public License" $FILE || true`
IS_BSD_LICENCE_PRESENT=`grep -E -c "the terms of the BSD Licence|License-Identifier: BSD-2-Clause|License-Identifier: BSD-3-Clause" $FILE || true`
IS_MIT_LICENCE_PRESENT=`grep -E -c "MIT License" $FILE || true`
IS_EXCEPTION=`echo $FILE | grep -E -c "common/utils/collection/tree.h|common/utils/collection/queue.h|openair2/UTIL/OPT/packet-rohc.h|openair3/NAS/COMMON/milenage.h|openair1/PHY/CODING/crc.h|openair1/PHY/CODING/crcext.h|openair1/PHY/CODING/types.h" || true`
if [ $IS_OAI_LICENCE_PRESENT -eq 0 ] && [ $IS_BSD_LICENCE_PRESENT -eq 0 ] && [ $IS_MIT_LICENCE_PRESENT -eq 0 ]
then
if [ $IS_NFAPI -eq 0 ] && [ $IS_EXCEPTION -eq 0 ]
then
echo $FILE >> ./files-w-suspect-banner.txt
fi
fi
done
exit 0
fi
......@@ -91,8 +133,8 @@ fi
# Merge request scenario
MERGE_COMMMIT=`git log -n1 | grep commit | sed -e "s@commit @@"`
TARGET_INIT_COMMIT=`cat .git/refs/remotes/origin/$TARGET_BRANCH`
MERGE_COMMMIT=`git log -n1 --pretty=format:%H`
TARGET_INIT_COMMIT=`git log -n1 --pretty=format:%H origin/$TARGET_BRANCH`
echo " ---- Checking the modified files by the merge request ----"
echo ""
......@@ -100,22 +142,71 @@ echo "Source Branch is : $SOURCE_BRANCH"
echo "Target Branch is : $TARGET_BRANCH"
echo "Merged Commit is : $MERGE_COMMMIT"
echo "Target Init is : $TARGET_INIT_COMMIT"
echo ""
echo " ----------------------------------------------------------"
echo ""
# Retrieve the list of modified files since the latest develop commit
MODIFIED_FILES=`git log $TARGET_INIT_COMMIT..$MERGE_COMMMIT --oneline --name-status | egrep "^M|^A" | sed -e "s@^M\t*@@" -e "s@^A\t*@@" | sort | uniq`
MODIFIED_FILES=`git log $TARGET_INIT_COMMIT..$MERGE_COMMMIT --oneline --name-status | grep -E "^M|^A" | sed -e "s@^M\t*@@" -e "s@^A\t*@@" | sort | uniq`
NB_TO_FORMAT=0
if [ -f header-files-w-incorrect-define.txt ]
then
rm -f header-files-w-incorrect-define.txt
fi
if [ -f files-w-gnu-gpl-license-banner.txt ]
then
rm -f files-w-gnu-gpl-license-banner.txt
fi
if [ -f files-w-suspect-banner.txt ]
then
rm -f files-w-suspect-banner.txt
fi
awk '/#[ \t]*ifndef/ { gsub("^.*ifndef *",""); if (names[$1]!="") print "files with same {define ", FILENAME, names[$1]; names[$1]=FILENAME } /#[ \t]*define/ { gsub("^.*define *",""); if(names[$1]!=FILENAME) print "error in declaration", FILENAME, $1, names[$1]; nextfile }' `find openair* common targets executables -name *.h |grep -v LFDS` > header-files-w-incorrect-define-tmp.txt
for FULLFILE in $MODIFIED_FILES
do
echo $FULLFILE
# sometimes, we remove files
if [ ! -f $FULLFILE ]; then continue; fi
filename=$(basename -- "$FULLFILE")
EXT="${filename##*.}"
if [ $EXT = "c" ] || [ $EXT = "h" ] || [ $EXT = "cpp" ] || [ $EXT = "hpp" ]
then
TO_FORMAT=`astyle --dry-run --options=ci-scripts/astyle-options.txt $FULLFILE | grep -c Formatted `
NB_TO_FORMAT=$((NB_TO_FORMAT + TO_FORMAT))
# Testing if explicit GNU GPL license banner
GNU_EXCEPTION=`echo $FULLFILE | grep -E -c "openair3/NAS/COMMON/milenage.h" || true`
if [ $GNU_EXCEPTION -eq 0 ]
then
grep -E -il "General Public License" $FULLFILE >> files-w-gnu-gpl-license-banner.txt
fi
# Looking at exotic/suspect banner
IS_BANNER=`grep -E -i -c "Copyright|copyleft" $FULLFILE || true`
if [ $IS_BANNER -ne 0 ]
then
IS_NFAPI=`echo $FULLFILE | grep -E -c "nfapi/open-nFAPI|nfapi/oai_integration/vendor_ext" || true`
IS_OAI_LICENCE_PRESENT=`grep -E -c "OAI Public License" $FULLFILE || true`
IS_BSD_LICENCE_PRESENT=`grep -E -c "the terms of the BSD Licence|License-Identifier: BSD-2-Clause|License-Identifier: BSD-3-Clause" $FULLFILE || true`
IS_MIT_LICENCE_PRESENT=`grep -E -c "MIT License" $FULLFILE || true`
IS_EXCEPTION=`echo $FULLFILE | grep -E -c "common/utils/collection/tree.h|common/utils/collection/queue.h|openair2/UTIL/OPT/packet-rohc.h|openair3/NAS/COMMON/milenage.h|openair1/PHY/CODING/crc.h|openair1/PHY/CODING/crcext.h|openair1/PHY/CODING/types.h" || true`
if [ $IS_OAI_LICENCE_PRESENT -eq 0 ] && [ $IS_BSD_LICENCE_PRESENT -eq 0 ] && [ $IS_MIT_LICENCE_PRESENT -eq 0 ]
then
if [ $IS_NFAPI -eq 0 ] && [ $IS_EXCEPTION -eq 0 ]
then
echo $FULLFILE >> ./files-w-suspect-banner.txt
fi
fi
fi
fi
# Testing Circular Dependencies protection
if [ $EXT = "h" ] || [ $EXT = "hpp" ]
then
grep $FULLFILE header-files-w-incorrect-define-tmp.txt >> header-files-w-incorrect-define.txt
fi
done
echo "Nb Files that do NOT follow OAI rules: $NB_TO_FORMAT"
echo $NB_TO_FORMAT > ./oai_rules_result.txt
rm -f header-files-w-incorrect-define-tmp.txt
# in this script we previously produced a list of files that were not properly
# formatted. At the time of this MR, the Jenkinsfile expects this file, so
# we simply produce an empty file
touch ./oai_rules_result.txt
exit 0
#!/bin/bash
#/*
# * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
# * contributor license agreements. See the NOTICE file distributed with
# * this work for additional information regarding copyright ownership.
# * The OpenAirInterface Software Alliance licenses this file to You under
# * the OAI Public License, Version 1.1 (the "License"); you may not use this file
# * except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.openairinterface.org/?page_id=698
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *-------------------------------------------------------------------------------
# * For more information about the OpenAirInterface (OAI) Software Alliance:
# * contact@openairinterface.org
# */
function usage {
echo "OAI GitLab merge request applying script"
echo " Original Author: Raphael Defosseux"
echo ""
echo "Usage:"
echo "------"
echo ""
echo " checkGitLabMergeRequestLabels.sh [OPTIONS]"
echo ""
echo "Options:"
echo "------------------"
echo ""
echo " --mr-id ####"
echo " Specify the ID of the merge request."
echo ""
echo " --help OR -h"
echo " Print this help message."
echo ""
}
if [ $# -ne 2 ] && [ $# -ne 1 ]
then
echo "Syntax Error: not the correct number of arguments"
echo ""
usage
exit 1
fi
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
-h|--help)
shift
usage
exit 0
;;
--mr-id)
MERGE_REQUEST_ID="$2"
shift
shift
;;
*)
echo "Syntax Error: unknown option: $key"
echo ""
usage
exit 1
esac
done
LABELS=`curl --silent "https://gitlab.eurecom.fr/api/v4/projects/oai%2Fopenairinterface5g/merge_requests/$MERGE_REQUEST_ID" | jq '.labels' || true`
IS_MR_DOCUMENTATION=`echo $LABELS | grep -ic documentation`
IS_MR_BUILD_ONLY=`echo $LABELS | grep -c BUILD-ONLY`
IS_MR_CI=`echo $LABELS | grep -c CI`
IS_MR_4G=`echo $LABELS | grep -c 4G-LTE`
IS_MR_5G=`echo $LABELS | grep -c 5G-NR`
IS_MR_5G_UE=`echo $LABELS | grep -c nrUE`
# none is present! No CI
if [ $IS_MR_BUILD_ONLY -eq 0 ] && [ $IS_MR_CI -eq 0 ] && [ $IS_MR_4G -eq 0 ] && [ $IS_MR_5G -eq 0 ] && [ $IS_MR_DOCUMENTATION -eq 0 ] && [ $IS_MR_5G_UE -eq 0 ]
then
echo "NONE"
exit 0
fi
# 4G and 5G or CI labels: run everything (4G, 5G)
if [ $IS_MR_4G -eq 1 ] && [ $IS_MR_5G -eq 1 ] || [ $IS_MR_CI -eq 1 ]
then
echo "FULL"
exit 0
fi
if [ $IS_MR_5G_UE -eq 1 ] && [ $IS_MR_4G -eq 1 ]
then
echo "SHORTEN-4G-5G-UE"
exit 0
fi
# 4G is present: run only 4G
if [ $IS_MR_4G -eq 1 ]
then
echo "SHORTEN-4G"
exit 0
fi
# 5G is present: run only 5G
if [ $IS_MR_5G -eq 1 ]
then
echo "SHORTEN-5G"
exit 0
fi
if [ $IS_MR_5G_UE -eq 1 ]
then
echo "SHORTEN-5G-UE"
exit 0
fi
# BUILD-ONLY is present: only build stages
if [ $IS_MR_BUILD_ONLY -eq 1 ]
then
echo "BUILD-ONLY"
exit 0
fi
# Documentation is present: don't do anything
if [ $IS_MR_DOCUMENTATION -eq 1 ]
then
echo "documentation"
exit 0
fi
#!/bin/bash
usage() {
echo "usage: $0 <command> <id>"
echo "available commands: initialize, attach, detach, terminate, check"
}
if [ $# -ne 2 ]; then
usage
exit 1
fi
cmd=$1
id=$2
flightmode_off() {
set +x
adb -s $id shell "/data/local/tmp/on"
}
flightmode_on() {
set +x
adb -s $id shell "/data/local/tmp/off"
}
initialize() {
set +x
adb -s $id shell "svc data enable" # make sure data services are enabled
flightmode_on
}
terminate() {
echo "terminate: does nothing"
}
check() {
declare -A service=( ["0"]="IN_SERVICE" ["1"]="OUT_OF_SERVICE" ["2"]="EMERGENCY_ONLY" ["3"]="RADIO_POWERED_OFF")
declare -A data=( ["0"]="DISCONNECTED" ["1"]="CONNECTING" ["2"]="CONNECTED" ["3"]="SUSPENDED")
serv_idx=$(adb -s $id shell "dumpsys telephony.registry" | sed -n 's/.*mServiceState=\([0-3]\).*/\1/p')
data_idx=$(adb -s $id shell "dumpsys telephony.registry" | sed -n 's/.*mDataConnectionState=\([0-3]\).*/\1/p')
data_reason=$(adb -s $id shell "dumpsys telephony.registry" | sed -n 's/.*mDataConnectionReason=\([0-9a-zA-Z_]\+\).*/\1/p')
#echo "Status Check UE $id"
echo "Service State: ${service[$serv_idx]}"
echo "Data State: ${data[$data_idx]}"
echo "Data Reason: ${data_reason}"
}
case "${cmd}" in
initialize) initialize;;
attach) flightmode_off;;
detach) flightmode_on;;
terminate) terminate;;
check) check;;
*) echo "Invalid command $cmd"; usage; exit 1;;
esac
# * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
# * contributor license agreements. See the NOTICE file distributed with
# * this work for additional information regarding copyright ownership.
# * The OpenAirInterface Software Alliance licenses this file to You under
# * the OAI Public License, Version 1.1 (the "License"); you may not use this file
# * except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.openairinterface.org/?page_id=698
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *-------------------------------------------------------------------------------
# * For more information about the OpenAirInterface (OAI) Software Alliance:
# * contact@openairinterface.org
# */
#---------------------------------------------------------------------
#
# Required Python Version
# Python 3.x
#
#---------------------------------------------------------------------
#usage example:
#sudo python3 ci_ctl_qtel.py /dev/ttyUSB2 wup
#sudo python3 ci_ctl_qtel.py /dev/ttyUSB2 detach
import sys
import time
import serial
class qtel_ctl:
#---------------
#private methods
#---------------
def __init__(self, usb_port_at):
self.QUECTEL_USB_PORT_AT = usb_port_at #ex : '/dev/ttyUSB2'
self.modem = serial.Serial(self.QUECTEL_USB_PORT_AT, timeout=1)
self.cmd_dict= {"wup": self.wup,"detach":self.detach}#dictionary of function pointers
def __set_modem_state(self,ser,state):
self.__send_command(ser,"AT+CFUN={}\r".format(state))
def __send_command(self,ser,com):
ser.write(com.encode())
time.sleep(0.1)
ret=[]
while ser.inWaiting()>0:
print("waiting")
msg=ser.readline()
msg=msg.decode("utf-8")
msg=msg.replace("\r","")
msg=msg.replace("\n","")
print(msg)
if msg!="":
ret.append(msg)
else:
print("msg empty")
return ret
#--------------
#public methods
#--------------
def wup(self):#sending AT+CFUN=0, then AT+CFUN=1
self.__set_modem_state(self.modem,'0')
time.sleep(3)
self.__set_modem_state(self.modem,'1')
def detach(self):#sending AT+CFUN=0
self.__set_modem_state(self.modem,'0')
if __name__ == "__main__":
#argv[1] : usb port
#argv[2] : qtel command (see function pointers dict "wup", "detach" etc ...)
if len(sys.argv) >= 3:
command = sys.argv[2]
print(command)
Module=qtel_ctl(sys.argv[1])
#calling the function to be applied
Module.cmd_dict[command]()
print(Module.cmd_dict[command])
else:
print("To many arguments")
idefix:
Host: idefix
InitScript: sudo stdbuf -oL /home/oaicicd/quectel-CM/quectel-CM -4 -s oai.ipv4 &> /tmp/quectel-cm.log &
TermScript: sudo -S killall --signal SIGKILL quectel-CM
AttachScript: sudo python3 ci_ctl_qtel.py /dev/ttyUSB2 wup
DetachScript: sudo python3 ci_ctl_qtel.py /dev/ttyUSB2 detach
NetworkScript: ip a show dev wwan0
IF: wwan0
MTU: 1500
Trace: True
LogStore: /media/usb-drive/ci_qlogs
up2:
Host: up2
AttachScript: sudo /opt/mbim/start_quectel_mbim.sh
DetachScript: sudo /opt/mbim/stop_quectel_mbim.sh
NetworkScript: ip a show dev wwan0
IF: wwan0
MTU: 1500
up2-fhi72:
Host: up2
AttachScript: sudo /opt/mbim-fhi72/start_quectel_mbim.sh
DetachScript: sudo /opt/mbim-fhi72/stop_quectel_mbim.sh
NetworkScript: ip a show dev wwan0
IF: wwan0
MTU: 1500
up2-aerial:
Host: up2
AttachScript: sudo /opt/mbim-fhi72/start_quectel_mbim.sh
DetachScript: sudo /opt/mbim-fhi72/stop_quectel_mbim.sh
NetworkScript: ip a show dev wwan0
IF: wwan0
MTU: 1500
adb_ue_1:
Host: nano
InitScript: /home/oaicicd/ci_ctl_adb.sh initialize R3CM40LZPHT
TermScript: /home/oaicicd/ci_ctl_adb.sh terminate R3CM40LZPHT
AttachScript: /home/oaicicd/ci_ctl_adb.sh attach R3CM40LZPHT
DetachScript: /home/oaicicd/ci_ctl_adb.sh detach R3CM40LZPHT
CheckStatusScript: /home/oaicicd/ci_ctl_adb.sh check R3CM40LZPHT
DataEnableScript: adb -s R3CM40LZPHT shell "svc data enable"
DataDisableScript: adb -s R3CM40LZPHT shell "svc data disable"
NetworkScript: adb -s R3CM40LZPHT shell "ip address show | grep rmnet_data0"
CmdPrefix: adb -s R3CM40LZPHT shell
MTU: 1500
LogStore: /media/usb-drive/ci_adb_1-logs
adb_ue_2:
Host: nano
InitScript: /home/oaicicd/ci_ctl_adb.sh initialize 5200c00db4413517
TermScript: /home/oaicicd/ci_ctl_adb.sh terminate 5200c00db4413517
AttachScript: /home/oaicicd/ci_ctl_adb.sh attach 5200c00db4413517
DetachScript: /home/oaicicd/ci_ctl_adb.sh detach 5200c00db4413517
CheckStatusScript: /home/oaicicd/ci_ctl_adb.sh check 5200c00db4413517
DataEnableScript: adb -s 5200c00db4413517 shell "svc data enable"
DataDisableScript: adb -s 5200c00db4413517 shell "svc data disable"
NetworkScript: adb -s 5200c00db4413517 shell "ip address show | grep rmnet"
CmdPrefix: adb -s 5200c00db4413517 shell
MTU: 1500
LogStore: /media/usb-drive/ci_adb_2-logs
oc-cn5g:
Host: avra
Namespace: "oaicicd-core-for-ci-ran"
CNPath: "/opt/oai-cn5g-fed-develop-2025-jan"
NetworkScript: echo "inet 172.21.6.102"
RunIperf3Server: False
oc-cn5g-20897:
Host: cacofonix
Namespace: "oaicicd-core-for-fhi72"
CNPath: "/opt/oai-cn5g-fed-develop-2025-jan"
NetworkScript: echo "inet 172.21.6.105"
RunIperf3Server: False
oc-cn5g-20897-aerial2:
Host: aerial2
Namespace: "oaicicd-core-for-nvidia-aerial"
CNPath: "/opt/oai-cn5g-fed-develop-2025-jan"
NetworkScript: echo "inet 172.21.6.105"
RunIperf3Server: False
matix-cn5g:
Host: matix
NetworkScript: docker exec prod-trf-gen ip a show dev eth0
CmdPrefix: docker exec prod-trf-gen
RunIperf3Server: False
porcepix-cn4g:
Host: porcepix
NetworkScript: docker exec prod-trf-gen ip a show dev eth0
CmdPrefix: docker exec prod-trf-gen
porcepix-cn5g:
Host: porcepix
NetworkScript: docker exec prod-trf-gen ip a show dev eth0
CmdPrefix: docker exec prod-trf-gen
RunIperf3Server: False
nano-cn4g:
Host: nano
NetworkScript: docker exec prod-trf-gen ip a show dev eth0
CmdPrefix: docker exec prod-trf-gen
ltebox-nano:
Host: nano
NetworkScript: ip a show dev tun1
sabox-nepes:
Host: nepes
NetworkScript: ip a show dev tun1
ltebox-nepes:
Host: nepes
NetworkScript: ip a show dev tun1
amarisoft_ue:
Host: amariue
InitScript: /root/lteue-linux-2023-10-27/ue/lteue /root/oaicicd/ran_sa_aw2s_asue/aw2s-multi-00102-20.cfg &
TermScript: /root/2023-10-27/libs/ws.js -t 10 127.0.0.1:9002 '{"message":"quit"}' || killall -KILL lteue-avx2
amarisoft_ue_2x2:
Host: amariue
InitScript: /root/lteue-linux-2023-10-27/ue/lteue /root/oaicicd/ran_sa_aw2s_asue_2x2/aw2s-multi-00102-2x2-v2.cfg &
TermScript: /root/2023-10-27/libs/ws.js -t 10 127.0.0.1:9002 '{"message":"quit"}' || killall -KILL lteue-avx2
amarisoft_ue_fhi72:
Host: amariue
InitScript: /root/lteue-linux-2023-10-27/ue/lteue /root/oaicicd/ran_sa_fhi72_asue_2x2_benetel550/fhi72-multi-20897-2x2.cfg &
TermScript: /root/2023-10-27/libs/ws.js -t 10 127.0.0.1:9002 '{"message":"quit"}' || killall -KILL lteue-avx2
amarisoft_ue_1:
Host: amariue
AttachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_on","ue_id":1}'
DetachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_off","ue_id":1}'
NetworkScript: ip netns exec ue1 ip a show dev pdn0
CmdPrefix: ip netns exec ue1
MTU: 1500
amarisoft_ue_2:
Host: amariue
AttachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_on","ue_id":2}'
DetachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_off","ue_id":2}'
NetworkScript: ip netns exec ue2 ip a show dev pdn0
CmdPrefix: ip netns exec ue2
MTU: 1500
amarisoft_ue_3:
Host: amariue
AttachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_on","ue_id":3}'
DetachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_off","ue_id":3}'
NetworkScript: ip netns exec ue3 ip a show dev pdn0
CmdPrefix: ip netns exec ue3
MTU: 1500
amarisoft_ue_4:
Host: amariue
AttachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_on","ue_id":4}'
DetachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_off","ue_id":4}'
NetworkScript: ip netns exec ue4 ip a show dev pdn0
CmdPrefix: ip netns exec ue4
MTU: 1500
amarisoft_ue_5:
Host: amariue
AttachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_on","ue_id":5}'
DetachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_off","ue_id":5}'
NetworkScript: ip netns exec ue5 ip a show dev pdn0
CmdPrefix: ip netns exec ue5
MTU: 1500
amarisoft_ue_6:
Host: amariue
AttachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_on","ue_id":6}'
DetachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_off","ue_id":6}'
NetworkScript: ip netns exec ue6 ip a show dev pdn0
CmdPrefix: ip netns exec ue6
MTU: 1500
amarisoft_ue_7:
Host: amariue
AttachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_on","ue_id":7}'
DetachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_off","ue_id":7}'
NetworkScript: ip netns exec ue7 ip a show dev pdn0
CmdPrefix: ip netns exec ue7
MTU: 1500
amarisoft_ue_8:
Host: amariue
AttachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_on","ue_id":8}'
DetachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_off","ue_id":8}'
NetworkScript: ip netns exec ue8 ip a show dev pdn0
CmdPrefix: ip netns exec ue8
MTU: 1500
amarisoft_ue_9:
Host: amariue
AttachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_on","ue_id":9}'
DetachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_off","ue_id":9}'
NetworkScript: ip netns exec ue9 ip a show dev pdn0
CmdPrefix: ip netns exec ue9
MTU: 1500
amarisoft_ue_10:
Host: amariue
AttachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_on","ue_id":10}'
DetachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_off","ue_id":10}'
NetworkScript: ip netns exec ue10 ip a show dev pdn0
CmdPrefix: ip netns exec ue10
MTU: 1500
amarisoft_ue_11:
Host: amariue
AttachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_on","ue_id":11}'
DetachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_off","ue_id":11}'
NetworkScript: ip netns exec ue11 ip a show dev pdn0
CmdPrefix: ip netns exec ue11
MTU: 1500
amarisoft_ue_12:
Host: amariue
AttachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_on","ue_id":12}'
DetachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_off","ue_id":12}'
NetworkScript: ip netns exec ue12 ip a show dev pdn0
CmdPrefix: ip netns exec ue12
MTU: 1500
amarisoft_ue_13:
Host: amariue
AttachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_on","ue_id":13}'
DetachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_off","ue_id":13}'
NetworkScript: ip netns exec ue13 ip a show dev pdn0
CmdPrefix: ip netns exec ue13
MTU: 1500
amarisoft_ue_14:
Host: amariue
AttachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_on","ue_id":14}'
DetachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_off","ue_id":14}'
NetworkScript: ip netns exec ue14 ip a show dev pdn0
CmdPrefix: ip netns exec ue14
MTU: 1500
amarisoft_ue_15:
Host: amariue
AttachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_on","ue_id":15}'
DetachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_off","ue_id":15}'
NetworkScript: ip netns exec ue15 ip a show dev pdn0
CmdPrefix: ip netns exec ue15
MTU: 1500
amarisoft_ue_16:
Host: amariue
AttachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_on","ue_id":16}'
DetachScript: /root/2023-10-27/libs/ws.js 127.0.0.1:9002 '{"message":"power_off","ue_id":16}'
NetworkScript: ip netns exec ue16 ip a show dev pdn0
CmdPrefix: ip netns exec ue16
MTU: 1500
oai_ue_caracal:
Host: caracal
AttachScript: 'docker start oai-nr-ue'
DetachScript: 'docker stop oai-nr-ue'
NetworkScript: ip a show dev oaitun_ue1
IF: oaitun_ue1
MTU: 1500
lte_oai_ue_carabe:
Host: carabe
NetworkScript: docker exec lte-b200-ue-fdd-10Mhz-tm1 ip a show dev oaitun_ue1
CmdPrefix: docker exec lte-b200-ue-fdd-10Mhz-tm1
IF: oaitun_ue1
MTU: 1500
rfsim5g_gnb_nos1:
Host: "%%current_host%%"
NetworkScript: docker exec rfsim5g-oai-gnb ip a show dev oaitun_enb1
CmdPrefix: docker exec rfsim5g-oai-gnb
IF: oaitun_enb1
rfsim5g_ue:
Host: "%%current_host%%"
NetworkScript: docker exec rfsim5g-oai-nr-ue ip a show dev oaitun_ue1
CmdPrefix: docker exec rfsim5g-oai-nr-ue
IF: oaitun_ue1
AttachScript: docker start rfsim5g-oai-nr-ue
DetachScript: docker stop rfsim5g-oai-nr-ue
MTU: 1500
rfsim5g_ue2:
Host: "%%current_host%%"
NetworkScript: docker exec rfsim5g-oai-nr-ue2 ip a show dev oaitun_ue1
CmdPrefix: docker exec rfsim5g-oai-nr-ue2
IF: oaitun_ue1
AttachScript: docker start rfsim5g-oai-nr-ue2
DetachScript: docker stop rfsim5g-oai-nr-ue2
MTU: 1500
rfsim5g_ue3:
Host: "%%current_host%%"
NetworkScript: docker exec rfsim5g-oai-nr-ue3 ip a show dev oaitun_ue1
CmdPrefix: docker exec rfsim5g-oai-nr-ue3
IF: oaitun_ue1
AttachScript: docker start rfsim5g-oai-nr-ue3
DetachScript: docker stop rfsim5g-oai-nr-ue3
MTU: 1500
rfsim5g_ue4:
Host: "%%current_host%%"
NetworkScript: docker exec rfsim5g-oai-nr-ue4 ip a show dev oaitun_ue1
CmdPrefix: docker exec rfsim5g-oai-nr-ue4
IF: oaitun_ue1
AttachScript: docker start rfsim5g-oai-nr-ue4
DetachScript: docker stop rfsim5g-oai-nr-ue4
MTU: 1500
rfsim5g_ue5:
Host: "%%current_host%%"
NetworkScript: docker exec rfsim5g-oai-nr-ue5 ip a show dev oaitun_ue1
CmdPrefix: docker exec rfsim5g-oai-nr-ue5
IF: oaitun_ue1
AttachScript: docker start rfsim5g-oai-nr-ue5
DetachScript: docker stop rfsim5g-oai-nr-ue5
MTU: 1500
rfsim5g_ue6:
Host: "%%current_host%%"
NetworkScript: docker exec rfsim5g-oai-nr-ue6 ip a show dev oaitun_ue1
CmdPrefix: docker exec rfsim5g-oai-nr-ue6
IF: oaitun_ue1
AttachScript: docker start rfsim5g-oai-nr-ue6
DetachScript: docker stop rfsim5g-oai-nr-ue6
MTU: 1500
rfsim5g_ue7:
Host: "%%current_host%%"
NetworkScript: docker exec rfsim5g-oai-nr-ue7 ip a show dev oaitun_ue1
CmdPrefix: docker exec rfsim5g-oai-nr-ue7
IF: oaitun_ue1
AttachScript: docker start rfsim5g-oai-nr-ue7
DetachScript: docker stop rfsim5g-oai-nr-ue7
MTU: 1500
rfsim5g_ue8:
Host: "%%current_host%%"
NetworkScript: docker exec rfsim5g-oai-nr-ue8 ip a show dev oaitun_ue1
CmdPrefix: docker exec rfsim5g-oai-nr-ue8
IF: oaitun_ue1
AttachScript: docker start rfsim5g-oai-nr-ue8
DetachScript: docker stop rfsim5g-oai-nr-ue8
MTU: 1500
rfsim5g_ue9:
Host: "%%current_host%%"
NetworkScript: docker exec rfsim5g-oai-nr-ue9 ip a show dev oaitun_ue1
CmdPrefix: docker exec rfsim5g-oai-nr-ue9
IF: oaitun_ue1
AttachScript: docker start rfsim5g-oai-nr-ue9
DetachScript: docker stop rfsim5g-oai-nr-ue9
MTU: 1500
rfsim5g_ue10:
Host: "%%current_host%%"
NetworkScript: docker exec rfsim5g-oai-nr-ue10 ip a show dev oaitun_ue1
CmdPrefix: docker exec rfsim5g-oai-nr-ue10
IF: oaitun_ue1
AttachScript: docker start rfsim5g-oai-nr-ue10
DetachScript: docker stop rfsim5g-oai-nr-ue10
MTU: 1500
rfsim5g_ext_dn:
Host: "%%current_host%%"
NetworkScript: docker exec rfsim5g-oai-ext-dn ip a show dev eth0
CmdPrefix: docker exec rfsim5g-oai-ext-dn
rfsim4g_ue:
Host: "%%current_host%%"
NetworkScript: docker exec rfsim4g-oai-lte-ue0 ip a show dev oaitun_ue1
CmdPrefix: docker exec rfsim4g-oai-lte-ue0
IF: oaitun_ue1
AttachScript: docker start rfsim4g-oai-lte-ue0
DetachScript: docker stop rfsim4g-oai-lte-ue0
MTU: 1500
rfsim4g_ext_dn:
Host: "%%current_host%%"
NetworkScript: docker exec rfsim4g-trf-gen ip a show dev eth0
CmdPrefix: docker exec rfsim4g-trf-gen
rfsim4g_enb_nos1:
Host: "%%current_host%%"
NetworkScript: docker exec rfsim4g-oai-enb ip a show dev oaitun_enb1
CmdPrefix: docker exec rfsim4g-oai-enb
IF: oaitun_enb1
rfsim4g_enb_fembms:
Host: "%%current_host%%"
NetworkScript: docker exec rfsim4g-oai-enb ip a show dev oaitun_enm1
CmdPrefix: docker exec rfsim4g-oai-enb
IF: oaitun_enm1
rfsim4g_ue_fembms:
Host: "%%current_host%%"
NetworkScript: docker exec rfsim4g-oai-lte-ue0 ip a show dev oaitun_uem1
CmdPrefix: docker exec rfsim4g-oai-lte-ue0
IF: oaitun_uem1
l2sim4g_ue:
Host: "%%current_host%%"
AttachScript: docker start l2sim4g-oai-lte-ue1
DetachScript: docker stop l2sim4g-oai-lte-ue1
NetworkScript: docker exec l2sim4g-oai-lte-ue1 ip a show dev oaitun_ue1
CmdPrefix: docker exec l2sim4g-oai-lte-ue1
IF: oaitun_ue1
MTU: 1500
l2sim4g_ext_dn:
Host: "%%current_host%%"
NetworkScript: docker exec l2sim4g-trf-gen ip a show dev eth0
CmdPrefix: docker exec l2sim4g-trf-gen
test:
Host: localhost
NetworkScript: echo "inet 127.0.0.1 mtu 1500"
#/*
# * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
# * contributor license agreements. See the NOTICE file distributed with
# * this work for additional information regarding copyright ownership.
# * The OpenAirInterface Software Alliance licenses this file to You under
# * the OAI Public License, Version 1.1 (the "License"); you may not use this file
# * except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.openairinterface.org/?page_id=698
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *-------------------------------------------------------------------------------
# * For more information about the OpenAirInterface (OAI) Software Alliance:
# * contact@openairinterface.org
# */
#---------------------------------------------------------------------
import logging
import re
# Define the mapping of physim_test values to search patterns
PHYSIM_PATTERN_MAPPING = {
'nr_ulsim': [
r'(Total PHY proc rx)\s+(\d+\.\d+)\s+us', # Pattern for RX PHY processing time
r'(ULSCH total decoding time)\s+(\d+\.\d+)\s+us', # Pattern for ULSCH decoding time
],
'nr_dlsim': [
r'(PHY proc tx)\s+(\d+\.\d+)\s+us', # Pattern for TX PHY processing time
r'(DLSCH encoding time)\s+(\d+\.\d+)\s+us', # Pattern for DLSCH encoding time
],
'ldpctest': [
r'(Encoding time mean):\s+(\d+\.\d+)\s+us', # Pattern for encoding time mean
r'(Decoding time mean):\s+(\d+\.\d+)\s+us', # Pattern for decoding time mean
],
}
# Define test conditions based on the simulation type
PHYSIM_TEST_CONDITION = {
'nr_ulsim': 'test OK', # For nr_ulsim, check if 'test OK' is present
'nr_dlsim': 'test OK', # For nr_dlsim, check if 'test OK' is present
'ldpctest': None, # No condition for ldpctest, just process the patterns
}
class Analysis():
def analyze_physim(log, physim_test, options, threshold):
search_patterns = PHYSIM_PATTERN_MAPPING.get(physim_test)
test_condition = PHYSIM_TEST_CONDITION.get(physim_test)
success = False
msg = ''
try:
with open(log, 'r') as f:
log_content = f.read()
except FileNotFoundError as e:
msg = f'{log} file not found, exception: {e}'
return False, msg
except Exception as e:
msg = f'Error while parsing log file {log}: exception: {e}'
return False, msg
if test_condition and test_condition not in log_content:
msg = f"Test did not succeed, '{test_condition}' not found in log file {log}."
return False, msg
time1_match = re.search(search_patterns[0], log_content)
time2_match = re.search(search_patterns[1], log_content)
if not(time1_match and time2_match):
msg = f"Processing time not found in log file {log}."
return False, msg
success = float(time2_match.group(2)) < float(threshold)
if success:
msg = f'{time1_match.group(1)}: {time1_match.group(2)} us\n{time2_match.group(1)}: {time2_match.group(2)} us'
else:
msg = f'{time1_match.group(1)}: {time1_match.group(2)} us\n{time2_match.group(1)}: {time2_match.group(2)} us exceeds a limit of {threshold} us'
return success,msg
#/*
# * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
# * contributor license agreements. See the NOTICE file distributed with
# * this work for additional information regarding copyright ownership.
# * The OpenAirInterface Software Alliance licenses this file to You under
# * the OAI Public License, Version 1.1 (the "License"); you may not use this file
# * except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.openairinterface.org/?page_id=698
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *-------------------------------------------------------------------------------
# * For more information about the OpenAirInterface (OAI) Software Alliance:
# * contact@openairinterface.org
# */
#---------------------------------------------------------------------
#
# Required Python Version
# Python 3.x
#
#---------------------------------------------------------------------
#-----------------------------------------------------------
# Import
#-----------------------------------------------------------
import logging
import re
import time
import cls_oai_html
import constants as CONST
import helpreadme as HELP
import cls_containerize
import cls_cmd
IMAGE_REGISTRY_SERVICE_NAME = "image-registry.openshift-image-registry.svc"
NAMESPACE = "oaicicd-ran"
OCUrl = "https://api.oai.cs.eurecom.fr:6443"
OCRegistry = "default-route-openshift-image-registry.apps.oai.cs.eurecom.fr"
CI_OC_RAN_NAMESPACE = "oaicicd-ran"
CN_IMAGES = ["mysql", "oai-nrf", "oai-amf", "oai-smf", "oai-upf", "oai-ausf", "oai-udm", "oai-udr", "oai-traffic-server"]
CN_CONTAINERS = ["", "-c nrf", "-c amf", "-c smf", "-c upf", "-c ausf", "-c udm", "-c udr", ""]
def OC_login(cmd, ocUserName, ocPassword, ocProjectName):
if ocUserName == '' or ocPassword == '' or ocProjectName == '':
HELP.GenericHelp(CONST.Version)
raise ValueError('Insufficient Parameter: no OC Credentials')
if OCRegistry.startswith("http") or OCRegistry.endswith("/"):
raise ValueError(f'ocRegistry {OCRegistry} should not start with http:// or https:// and not end on a slash /')
ret = cmd.run(f'oc login -u {ocUserName} -p {ocPassword} --server {OCUrl}')
if ret.returncode != 0:
logging.error('\u001B[1m OC Cluster Login Failed\u001B[0m')
return False
ret = cmd.run(f'oc project {ocProjectName}')
if ret.returncode != 0:
logging.error(f'\u001B[1mUnable to access OC project {ocProjectName}\u001B[0m')
OC_logout(cmd)
return False
return True
def OC_logout(cmd):
cmd.run(f'oc logout')
def OC_deploy_CN(cmd, ocUserName, ocPassword, ocNamespace, path):
logging.debug(f'OC OAI CN5G: Deploying OAI CN5G on Openshift Cluster: {ocNamespace}')
succeeded = OC_login(cmd, ocUserName, ocPassword, ocNamespace)
if not succeeded:
return False, CONST.OC_LOGIN_FAIL
cmd.run(f'helm list -aq -n {ocNamespace} | xargs -r helm uninstall -n {ocNamespace} --wait')
ret = cmd.run(f'helm install --wait oai5gcn {path}/ci-scripts/charts/oai-5g-basic/.')
if ret.returncode != 0:
logging.error('OC OAI CN5G: Deployment failed')
OC_logout(cmd)
return False, CONST.OC_PROJECT_FAIL
report = cmd.run('oc get pods')
OC_logout(cmd)
return True, report
def OC_undeploy_CN(cmd, ocUserName, ocPassword, ocNamespace, path):
logging.debug(f'OC OAI CN5G: Terminating CN on Openshift Cluster: {ocNamespace}')
succeeded = OC_login(cmd, ocUserName, ocPassword, ocNamespace)
if not succeeded:
return False, CONST.OC_LOGIN_FAIL
cmd.run(f'rm -Rf {path}/logs')
cmd.run(f'mkdir -p {path}/logs')
logging.debug('OC OAI CN5G: Collecting log files to workspace')
cmd.run(f'oc describe pod &> {path}/logs/describe-pods-post-test.log')
cmd.run(f'oc get pods.metrics.k8s &> {path}/logs/nf-resource-consumption.log')
for ii, ci in zip(CN_IMAGES, CN_CONTAINERS):
podName = cmd.run(f"oc get pods | grep {ii} | awk \'{{print $1}}\'").stdout.strip()
if not podName:
logging.debug(f'{ii} pod not found!')
else:
cmd.run(f'oc logs -f {podName} {ci} &> {path}/logs/{ii}.log &')
cmd.run(f'cd {path}/logs && zip -r -qq test_logs_CN.zip *.log')
cmd.copyin(f'{path}/logs/test_logs_CN.zip','test_logs_CN.zip')
ret = cmd.run(f'helm list -aq -n {ocNamespace} | xargs -r helm uninstall -n {ocNamespace} --wait')
if ret.returncode != 0:
logging.error('OC OAI CN5G: Undeployment failed')
cmd.run(f'helm list -aq -n {ocNamespace} | xargs -r helm uninstall -n {ocNamespace} --wait')
OC_logout(cmd)
return False, CONST.OC_PROJECT_FAIL
report = cmd.run('oc get pods')
OC_logout(cmd)
return True, report
class Cluster:
def __init__(self):
self.eNBIPAddress = ""
self.eNBSourceCodePath = ""
self.forcedWorkspaceCleanup = False
self.OCUserName = ""
self.OCPassword = ""
self.OCProjectName = ""
self.OCUrl = OCUrl
self.OCRegistry = OCRegistry
self.ranRepository = ""
self.ranBranch = ""
self.ranCommitID = ""
self.ranAllowMerge = False
self.ranTargetBranch = ""
self.cmd = None
def _recreate_entitlements(self):
# recreating entitlements, don't care if deletion fails
self.cmd.run(f'oc delete secret etc-pki-entitlement')
ret = self.cmd.run(f"oc get secret etc-pki-entitlement -n openshift-config-managed -o json | jq 'del(.metadata.resourceVersion)' | jq 'del(.metadata.creationTimestamp)' | jq 'del(.metadata.uid)' | jq 'del(.metadata.namespace)' | oc create -f -", silent=True)
if ret.returncode != 0:
logging.error("could not create secret/etc-pki-entitlement")
return False
return True
def _recreate_bc(self, name, newTag, filename):
self._retag_image_statement(name, name, newTag, filename)
self.cmd.run(f'oc delete -f {filename}')
ret = self.cmd.run(f'oc create -f {filename}')
if re.search('buildconfig.build.openshift.io/[a-zA-Z\-0-9]+ created', ret.stdout) is not None:
return True
logging.error('error while creating buildconfig: ' + ret.stdout)
return False
def _recreate_is_tag(self, name, newTag, filename):
ret = self.cmd.run(f'oc describe is {name}')
if ret.returncode != 0:
ret = self.cmd.run(f'oc create -f {filename}')
if ret.returncode != 0:
logging.error(f'error while creating imagestream: {ret.stdout}')
return False
else:
logging.debug(f'-> imagestream {name} found')
image = f'{name}:{newTag}'
self.cmd.run(f'oc delete istag {image}', reportNonZero=False) # we don't care if this fails, e.g., if it is missing
ret = self.cmd.run(f'oc create istag {image}')
if ret.returncode == 0:
return True
logging.error(f'error while creating imagestreamtag: {ret.stdout}')
return False
def _start_build(self, name):
# will return "immediately" but build runs in background
# if multiple builds are started at the same time, this can take some time, however
ret = self.cmd.run(f'oc start-build {name} --from-dir={self.eNBSourceCodePath} --exclude=""')
regres = re.search(r'build.build.openshift.io/(?P<jobname>[a-zA-Z0-9\-]+) started', ret.stdout)
if ret.returncode != 0 or ret.stdout.count('Uploading finished') != 1 or regres is None:
logging.error(f"error during oc start-build: {ret.stdout}")
return None
return regres.group('jobname') + '-build'
def _wait_build_end(self, jobs, timeout_sec, check_interval_sec = 5):
logging.debug(f"waiting for jobs {jobs} to finish building")
while timeout_sec > 0:
# check status
for j in jobs:
ret = self.cmd.run(f'oc get pods | grep {j}', silent = True)
if ret.stdout.count('Completed') > 0: jobs.remove(j)
if ret.stdout.count('Error') > 0:
logging.error(f'error for job {j}: {ret.stdout}')
return False
if jobs == []:
logging.debug('all jobs completed')
return True
time.sleep(check_interval_sec)
timeout_sec -= check_interval_sec
logging.error(f"timeout while waiting for end of build of {jobs}")
return False
def _retag_image_statement(self, oldImage, newImage, newTag, filename):
self.cmd.run(f'sed -i -e "s#{oldImage}:latest#{newImage}:{newTag}#" {filename}')
def _get_image_size(self, image, tag):
# get the SHA of the image we built using the image name and its tag
ret = self.cmd.run(f'oc describe is {image} | grep -A4 {tag}')
result = re.search(f'{IMAGE_REGISTRY_SERVICE_NAME}:5000/{NAMESPACE}/(?P<imageSha>{image}@sha256:[a-f0-9]+)', ret.stdout)
if result is None:
return -1
imageSha = result.group("imageSha")
# retrieve the size
ret = self.cmd.run(f'oc get -o json isimage {imageSha} | jq -Mc "{{dockerImageSize: .image.dockerImageMetadata.Size}}"')
result = re.search('{"dockerImageSize":(?P<size>[0-9]+)}', ret.stdout)
if result is None:
return -1
return int(result.group("size"))
def _deploy_pod(self, filename, timeout = 120):
ret = self.cmd.run(f'oc create -f {filename}')
result = re.search(f'pod/(?P<pod>[a-zA-Z0-9_\-]+) created', ret.stdout)
if result is None:
logging.error(f'could not deploy pod: {ret.stdout}')
return None
pod = result.group("pod")
logging.debug(f'checking if pod {pod} is in Running state')
ret = self.cmd.run(f'oc wait --for=condition=ready pod {pod} --timeout={timeout}s', silent=True)
if ret.returncode == 0:
return pod
logging.error(f'pod {pod} did not reach Running state')
self._undeploy_pod(filename)
return None
def _undeploy_pod(self, filename):
self.cmd.run(f'oc delete -f {filename}')
def PullClusterImage(self, HTML, node, images, tag_prefix):
logging.debug(f'Pull OC image {images} to server {node}')
self.testCase_id = HTML.testCase_id
with cls_cmd.getConnection(node) as cmd:
succeeded = OC_login(cmd, self.OCUserName, self.OCPassword, CI_OC_RAN_NAMESPACE)
if not succeeded:
HTML.CreateHtmlTestRow('N/A', 'KO', CONST.OC_LOGIN_FAIL)
return False
ret = cmd.run(f'oc whoami -t | docker login -u oaicicd --password-stdin {self.OCRegistry}')
if ret.returncode != 0:
logging.error(f'cannot authenticate at registry')
OC_logout(cmd)
HTML.CreateHtmlTestRow('N/A', 'KO', CONST.OC_LOGIN_FAIL)
return False
tag = cls_containerize.CreateTag(self.ranCommitID, self.ranBranch, self.ranAllowMerge)
registry = f'{self.OCRegistry}/{CI_OC_RAN_NAMESPACE}'
success, msg = cls_containerize.Containerize.Pull_Image(cmd, images, tag, tag_prefix, registry, None, None)
OC_logout(cmd)
param = f"on node {node}"
if success:
HTML.CreateHtmlTestRowQueue(param, 'OK', [msg])
else:
HTML.CreateHtmlTestRowQueue(param, 'KO', [msg])
return success
def BuildClusterImage(self, HTML):
if self.ranRepository == '' or self.ranBranch == '' or self.ranCommitID == '':
HELP.GenericHelp(CONST.Version)
raise ValueError(f'Insufficient Parameter: ranRepository {self.ranRepository} ranBranch {ranBranch} ranCommitID {self.ranCommitID}')
lIpAddr = self.eNBIPAddress
lSourcePath = self.eNBSourceCodePath
if lIpAddr == '' or lSourcePath == '':
raise ValueError('Insufficient Parameter: eNBSourceCodePath missing')
ocUserName = self.OCUserName
ocPassword = self.OCPassword
ocProjectName = self.OCProjectName
if ocUserName == '' or ocPassword == '' or ocProjectName == '':
HELP.GenericHelp(CONST.Version)
raise ValueError('Insufficient Parameter: no OC Credentials')
if self.OCRegistry.startswith("http") or self.OCRegistry.endswith("/"):
raise ValueError(f'ocRegistry {self.OCRegistry} should not start with http:// or https:// and not end on a slash /')
logging.debug(f'Building on cluster triggered from server: {lIpAddr}')
self.cmd = cls_cmd.RemoteCmd(lIpAddr)
self.testCase_id = HTML.testCase_id
# Workaround for some servers, we need to erase completely the workspace
self.cmd.cd(lSourcePath)
# to reduce the amount of data send to OpenShift, we
# manually delete all generated files in the workspace
self.cmd.run(f'rm -rf {lSourcePath}/cmake_targets/ran_build');
baseTag = 'develop'
forceBaseImageBuild = False
if self.ranAllowMerge: # merging MR branch into develop -> temporary image
branchName = self.ranBranch.replace('/','-')
imageTag = f'{branchName}-{self.ranCommitID[0:8]}'
if self.ranTargetBranch == 'develop':
ret = self.cmd.run(f'git diff HEAD..origin/develop -- cmake_targets/build_oai cmake_targets/tools/build_helper docker/Dockerfile.base.rhel9 | grep --colour=never -i INDEX')
result = re.search('index', ret.stdout)
if result is not None:
forceBaseImageBuild = True
baseTag = 'ci-temp'
# if the branch name contains integration_20xx_wyy, let rebuild ran-base
result = re.search('integration_20([0-9]{2})_w([0-9]{2})', self.ranBranch)
if not forceBaseImageBuild and result is not None:
forceBaseImageBuild = True
baseTag = 'ci-temp'
else:
imageTag = f'develop-{self.ranCommitID[0:8]}'
forceBaseImageBuild = True
# logging to OC Cluster and then switch to corresponding project
ret = self.cmd.run(f'oc login -u {ocUserName} -p {ocPassword} --server {self.OCUrl}')
if ret.returncode != 0:
logging.error('\u001B[1m OC Cluster Login Failed\u001B[0m')
HTML.CreateHtmlTestRow('N/A', 'KO', CONST.OC_LOGIN_FAIL)
return False
ret = self.cmd.run(f'oc project {ocProjectName}')
if ret.returncode != 0:
logging.error(f'\u001B[1mUnable to access OC project {ocProjectName}\u001B[0m')
self.cmd.run('oc logout')
HTML.CreateHtmlTestRow('N/A', 'KO', CONST.OC_PROJECT_FAIL)
return False
# delete old images by Sagar Arora <sagar.arora@openairinterface.org>:
# 1. retrieve all images and their timestamp
# 2. awk retrieves those whose timestamp is older than 3 weeks
# 3. issue delete command on corresponding istags (the images are dangling and will be cleaned by the registry)
delete_cmd = "oc get istag -o go-template --template '{{range .items}}{{.metadata.name}} {{.metadata.creationTimestamp}}{{\"\\n\"}}{{end}}' | awk '$2 <= \"'$(date -d '-3weeks' -Ins --utc | sed 's/+0000/Z/')'\" { print $1 }' | xargs --no-run-if-empty oc delete istag"
response = self.cmd.run(delete_cmd)
logging.debug(f"deleted images:\n{response.stdout}")
self._recreate_entitlements()
status = True # flag to abandon compiling if any image fails
attemptedImages = []
if forceBaseImageBuild:
self._recreate_is_tag('ran-base', baseTag, 'openshift/ran-base-is.yaml')
self._recreate_bc('ran-base', baseTag, 'openshift/ran-base-bc.yaml')
ranbase_job = self._start_build('ran-base')
attemptedImages += ['ran-base']
status = ranbase_job is not None and self._wait_build_end([ranbase_job], 1000)
if not status: logging.error('failure during build of ran-base')
self.cmd.run(f'oc logs {ranbase_job} &> cmake_targets/log/ran-base.log') # cannot use cmd.run because of redirect
# recover logs by mounting image
self._retag_image_statement('ran-base', 'ran-base', baseTag, 'openshift/ran-base-log-retrieval.yaml')
pod = self._deploy_pod('openshift/ran-base-log-retrieval.yaml')
if pod is not None:
self.cmd.run(f'mkdir -p cmake_targets/log/ran-base')
self.cmd.run(f'oc rsync {pod}:/oai-ran/cmake_targets/log/ cmake_targets/log/ran-base')
self._undeploy_pod('openshift/ran-base-log-retrieval.yaml')
else:
status = False
if status:
self._recreate_is_tag('oai-physim', imageTag, 'openshift/oai-physim-is.yaml')
self._recreate_bc('oai-physim', imageTag, 'openshift/oai-physim-bc.yaml')
self._retag_image_statement('ran-base', 'image-registry.openshift-image-registry.svc:5000/oaicicd-ran/ran-base', baseTag, 'docker/Dockerfile.phySim.rhel9')
physim_job = self._start_build('oai-physim')
attemptedImages += ['oai-physim']
self._recreate_is_tag('ran-build', imageTag, 'openshift/ran-build-is.yaml')
self._recreate_bc('ran-build', imageTag, 'openshift/ran-build-bc.yaml')
self._retag_image_statement('ran-base', 'image-registry.openshift-image-registry.svc:5000/oaicicd-ran/ran-base', baseTag, 'docker/Dockerfile.build.rhel9')
ranbuild_job = self._start_build('ran-build')
attemptedImages += ['ran-build']
self._recreate_is_tag('oai-clang', imageTag, 'openshift/oai-clang-is.yaml')
self._recreate_bc('oai-clang', imageTag, 'openshift/oai-clang-bc.yaml')
self._retag_image_statement('ran-base', 'image-registry.openshift-image-registry.svc:5000/oaicicd-ran/ran-base', baseTag, 'docker/Dockerfile.clang.rhel9')
clang_job = self._start_build('oai-clang')
attemptedImages += ['oai-clang']
wait = ranbuild_job is not None and physim_job is not None and clang_job is not None and self._wait_build_end([ranbuild_job, physim_job, clang_job], 1200)
if not wait: logging.error('error during build of ranbuild_job or physim_job or clang_job')
status = status and wait
self.cmd.run(f'oc logs {ranbuild_job} &> cmake_targets/log/ran-build.log')
self.cmd.run(f'oc logs {physim_job} &> cmake_targets/log/oai-physim.log')
self.cmd.run(f'oc logs {clang_job} &> cmake_targets/log/oai-clang.log')
self.cmd.run(f'oc get pods.metrics.k8s.io &>> cmake_targets/log/build-metrics.log', '\$', 10)
if status:
self._recreate_is_tag('oai-enb', imageTag, 'openshift/oai-enb-is.yaml')
self._recreate_bc('oai-enb', imageTag, 'openshift/oai-enb-bc.yaml')
self._retag_image_statement('ran-base', 'image-registry.openshift-image-registry.svc:5000/oaicicd-ran/ran-base', baseTag, 'docker/Dockerfile.eNB.rhel9')
self._retag_image_statement('ran-build', 'image-registry.openshift-image-registry.svc:5000/oaicicd-ran/ran-build', imageTag, 'docker/Dockerfile.eNB.rhel9')
enb_job = self._start_build('oai-enb')
attemptedImages += ['oai-enb']
self._recreate_is_tag('oai-gnb', imageTag, 'openshift/oai-gnb-is.yaml')
self._recreate_bc('oai-gnb', imageTag, 'openshift/oai-gnb-bc.yaml')
self._retag_image_statement('ran-base', 'image-registry.openshift-image-registry.svc:5000/oaicicd-ran/ran-base', baseTag, 'docker/Dockerfile.gNB.rhel9')
self._retag_image_statement('ran-build', 'image-registry.openshift-image-registry.svc:5000/oaicicd-ran/ran-build', imageTag, 'docker/Dockerfile.gNB.rhel9')
gnb_job = self._start_build('oai-gnb')
attemptedImages += ['oai-gnb']
self._recreate_is_tag('oai-gnb-aw2s', imageTag, 'openshift/oai-gnb-aw2s-is.yaml')
self._recreate_bc('oai-gnb-aw2s', imageTag, 'openshift/oai-gnb-aw2s-bc.yaml')
self._retag_image_statement('ran-base', 'image-registry.openshift-image-registry.svc:5000/oaicicd-ran/ran-base', baseTag, 'docker/Dockerfile.gNB.aw2s.rhel9')
self._retag_image_statement('ran-build', 'image-registry.openshift-image-registry.svc:5000/oaicicd-ran/ran-build', imageTag, 'docker/Dockerfile.gNB.aw2s.rhel9')
gnb_aw2s_job = self._start_build('oai-gnb-aw2s')
attemptedImages += ['oai-gnb-aw2s']
wait = enb_job is not None and gnb_job is not None and gnb_aw2s_job is not None and self._wait_build_end([enb_job, gnb_job, gnb_aw2s_job], 800)
if not wait: logging.error('error during build of eNB/gNB')
status = status and wait
# recover logs
self.cmd.run(f'oc logs {enb_job} &> cmake_targets/log/oai-enb.log')
self.cmd.run(f'oc logs {gnb_job} &> cmake_targets/log/oai-gnb.log')
self.cmd.run(f'oc logs {gnb_aw2s_job} &> cmake_targets/log/oai-gnb-aw2s.log')
self._recreate_is_tag('oai-nr-cuup', imageTag, 'openshift/oai-nr-cuup-is.yaml')
self._recreate_bc('oai-nr-cuup', imageTag, 'openshift/oai-nr-cuup-bc.yaml')
self._retag_image_statement('ran-base', 'image-registry.openshift-image-registry.svc:5000/oaicicd-ran/ran-base', baseTag, 'docker/Dockerfile.nr-cuup.rhel9')
self._retag_image_statement('ran-build', 'image-registry.openshift-image-registry.svc:5000/oaicicd-ran/ran-build', imageTag, 'docker/Dockerfile.nr-cuup.rhel9')
nr_cuup_job = self._start_build('oai-nr-cuup')
attemptedImages += ['oai-nr-cuup']
self._recreate_is_tag('oai-lte-ue', imageTag, 'openshift/oai-lte-ue-is.yaml')
self._recreate_bc('oai-lte-ue', imageTag, 'openshift/oai-lte-ue-bc.yaml')
self._retag_image_statement('ran-base', 'image-registry.openshift-image-registry.svc:5000/oaicicd-ran/ran-base', baseTag, 'docker/Dockerfile.lteUE.rhel9')
self._retag_image_statement('ran-build', 'image-registry.openshift-image-registry.svc:5000/oaicicd-ran/ran-build', imageTag, 'docker/Dockerfile.lteUE.rhel9')
lteue_job = self._start_build('oai-lte-ue')
attemptedImages += ['oai-lte-ue']
self._recreate_is_tag('oai-nr-ue', imageTag, 'openshift/oai-nr-ue-is.yaml')
self._recreate_bc('oai-nr-ue', imageTag, 'openshift/oai-nr-ue-bc.yaml')
self._retag_image_statement('ran-base', 'image-registry.openshift-image-registry.svc:5000/oaicicd-ran/ran-base', baseTag, 'docker/Dockerfile.nrUE.rhel9')
self._retag_image_statement('ran-build', 'image-registry.openshift-image-registry.svc:5000/oaicicd-ran/ran-build', imageTag, 'docker/Dockerfile.nrUE.rhel9')
nrue_job = self._start_build('oai-nr-ue')
attemptedImages += ['oai-nr-ue']
wait = nr_cuup_job is not None and lteue_job is not None and nrue_job is not None and self._wait_build_end([nr_cuup_job, lteue_job, nrue_job], 800)
if not wait: logging.error('error during build of nr-cuup/lteUE/nrUE')
status = status and wait
# recover logs
self.cmd.run(f'oc logs {nr_cuup_job} &> cmake_targets/log/oai-nr-cuup.log')
self.cmd.run(f'oc logs {lteue_job} &> cmake_targets/log/oai-lte-ue.log')
self.cmd.run(f'oc logs {nrue_job} &> cmake_targets/log/oai-nr-ue.log')
self.cmd.run(f'oc get pods.metrics.k8s.io &>> cmake_targets/log/build-metrics.log', '\$', 10)
if status:
self._recreate_is_tag('ran-build-fhi72', imageTag, 'openshift/ran-build-fhi72-is.yaml')
self._recreate_bc('ran-build-fhi72', imageTag, 'openshift/ran-build-fhi72-bc.yaml')
self._retag_image_statement('ran-base', 'image-registry.openshift-image-registry.svc:5000/oaicicd-ran/ran-base', baseTag, 'docker/Dockerfile.build.fhi72.rhel9')
ranbuildfhi72_job = self._start_build('ran-build-fhi72')
attemptedImages += ['ran-build-fhi72']
wait = ranbuildfhi72_job is not None and self._wait_build_end([ranbuildfhi72_job], 1200)
if not wait: logging.error('error during build of ranbuildfhi72_job')
status = status and wait
self.cmd.run(f'oc logs {ranbuildfhi72_job} &> cmake_targets/log/ran-build-fhi72.log')
self.cmd.run(f'oc get pods.metrics.k8s.io &>> cmake_targets/log/build-metrics.log', '\$', 10)
if status:
self._recreate_is_tag('oai-gnb-fhi72', imageTag, 'openshift/oai-gnb-fhi72-is.yaml')
self._recreate_bc('oai-gnb-fhi72', imageTag, 'openshift/oai-gnb-fhi72-bc.yaml')
self._retag_image_statement('ran-base', 'image-registry.openshift-image-registry.svc:5000/oaicicd-ran/ran-base', baseTag, 'docker/Dockerfile.gNB.fhi72.rhel9')
self._retag_image_statement('ran-build-fhi72', 'image-registry.openshift-image-registry.svc:5000/oaicicd-ran/ran-build-fhi72', imageTag, 'docker/Dockerfile.gNB.fhi72.rhel9')
gnb_fhi72_job = self._start_build('oai-gnb-fhi72')
attemptedImages += ['oai-gnb-fhi72']
wait = gnb_fhi72_job is not None and self._wait_build_end([gnb_fhi72_job], 600)
if not wait: logging.error('error during build of gNB-fhi72')
status = status and wait
# recover logs
self.cmd.run(f'oc logs {gnb_fhi72_job} &> cmake_targets/log/oai-gnb-fhi72.log')
self.cmd.run(f'oc get pods.metrics.k8s.io &>> cmake_targets/log/build-metrics.log', '\$', 10)
# split and analyze logs
imageSize = {}
for image in attemptedImages:
self.cmd.run(f'mkdir -p cmake_targets/log/{image}')
tag = imageTag if image != 'ran-base' else baseTag
size = self._get_image_size(image, tag)
if size <= 0:
imageSize[image] = 'unknown -- BUILD FAILED'
status = False
else:
sizeMb = float(size) / 1000000
imageSize[image] = f'{sizeMb:.1f} Mbytes (uncompressed: ~{sizeMb*2.5:.1f} Mbytes)'
logging.info(f'\u001B[1m{image} size is {imageSize[image]}\u001B[0m')
grep_exp = "\|".join(attemptedImages)
self.cmd.run(f'oc get images | grep -e \'{grep_exp}\' &> cmake_targets/log/image_registry.log');
self.cmd.run(f'for pod in $(oc get pods | tail -n +2 | awk \'{{print $1}}\'); do oc get pod $pod -o json &>> cmake_targets/log/build_pod_summary.log; done', '\$', 60)
build_log_name = f'build_log_{self.testCase_id}'
cls_containerize.CopyLogsToExecutor(self.cmd, lSourcePath, build_log_name)
self.cmd.run('for pod in $(oc get pods | tail -n +2 | awk \'{print $1}\'); do oc delete pod ${pod}; done')
# logout will return eventually, but we don't care when -> start in background
self.cmd.run(f'oc logout')
self.cmd.close()
# Analyze the logs
collectInfo = cls_containerize.AnalyzeBuildLogs(build_log_name, attemptedImages, status)
for img in collectInfo:
for f in collectInfo[img]:
status = status and collectInfo[img][f]['status']
if not status:
logging.debug(collectInfo)
if status:
logging.info('\u001B[1m Building OAI Image(s) Pass\u001B[0m')
HTML.CreateHtmlTestRow('all', 'OK', CONST.ALL_PROCESSES_OK)
else:
logging.error('\u001B[1m Building OAI Images Failed\u001B[0m')
HTML.CreateHtmlTestRow('all', 'KO', CONST.ALL_PROCESSES_OK)
HTML.CreateHtmlNextTabHeaderTestRow(collectInfo, imageSize)
return status
#/*
# * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
# * contributor license agreements. See the NOTICE file distributed with
# * this work for additional information regarding copyright ownership.
# * The OpenAirInterface Software Alliance licenses this file to You under
# * the OAI Public License, Version 1.1 (the "License"); you may not use this file
# * except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.openairinterface.org/?page_id=698
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *-------------------------------------------------------------------------------
# * For more information about the OpenAirInterface (OAI) Software Alliance:
# * contact@openairinterface.org
# */
#---------------------------------------------------------------------
#
# Required Python Version
# Python 3.x
#
#---------------------------------------------------------------------
import abc
import logging
import subprocess as sp
import os
import paramiko
import uuid
import time
SSHTIMEOUT=7
def is_local(host):
return host is None or host.lower() in ["", "none", "localhost"]
# helper that returns either LocalCmd or RemoteCmd based on passed host name
def getConnection(host, d=None):
if is_local(host):
return LocalCmd(d=d)
else:
return RemoteCmd(host, d=d)
def runScript(host, path, timeout, parameters=None, redirect=None, silent=False):
if is_local(host):
return LocalCmd.exec_script(path, timeout, parameters, redirect, silent)
else:
return RemoteCmd.exec_script(host, path, timeout, parameters, redirect, silent)
# provides a partial interface for the legacy SSHconnection class (getBefore(), command())
class Cmd(metaclass=abc.ABCMeta):
def cd(self, d, silent=False):
if d == None or d == '':
self.cwd = None
elif d[0] == '/':
self.cwd = d
else:
if not self.cwd:
# no cwd set: get current working directory
self.cwd = self.run('pwd').stdout.strip()
self.cwd += f"/{d}"
if not silent:
logging.debug(f'cd {self.cwd}')
@abc.abstractmethod
def exec_script(path, timeout, parameters=None, redirect=None, silent=False):
return
@abc.abstractmethod
def run(self, line, timeout=300, silent=False):
return
def command(self, commandline, expectedline=None, timeout=300, silent=False, resync=False):
splitted = commandline.split(' ')
if splitted[0] == 'cd':
self.cd(' '.join(splitted[1:]), silent)
else:
self.run(commandline, timeout, silent)
return 0
@abc.abstractmethod
def close(self):
return
@abc.abstractmethod
def getBefore(self):
return
@abc.abstractmethod
def copyin(self, scpIp, scpUser, scpPw, src, tgt):
return
@abc.abstractmethod
def copyout(self, scpIp, scpUser, scpPw, src, tgt):
return
class LocalCmd(Cmd):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.close()
def __init__(self, d = None):
self.cwd = d
if self.cwd is not None:
logging.debug(f'Working dir is {self.cwd}')
self.cp = sp.CompletedProcess(args='', returncode=0, stdout='')
def exec_script(path, timeout, parameters=None, redirect=None, silent=False):
if redirect and not redirect.startswith("/"):
raise ValueError(f"redirect must be absolute, but is {redirect}")
c = f"{path} {parameters}" if parameters else path
if not redirect:
ret = sp.run(c, shell=True, timeout=timeout, stdout=sp.PIPE, stderr=sp.STDOUT)
ret.stdout = ret.stdout.decode('utf-8').strip()
else:
with open(redirect, "w") as f:
ret = sp.run(c, shell=True, timeout=timeout, stdout=f, stderr=f)
ret.args += f" &> {redirect}"
ret.stdout = ""
if not silent:
logging.info(f"local> {ret.args}")
return ret
def run(self, line, timeout=300, silent=False, reportNonZero=True):
if not silent:
logging.info(f"local> {line}")
try:
if line.strip().endswith('&'):
# if we wait for stdout, subprocess does not return before the end of the command
# however, we don't want to wait for commands with &, so just return fake command
ret = sp.run(line, shell=True, cwd=self.cwd, timeout=5, executable='/bin/bash')
time.sleep(0.1)
else:
ret = sp.run(line, shell=True, cwd=self.cwd, stdout=sp.PIPE, stderr=sp.STDOUT, timeout=timeout, executable='/bin/bash')
except Exception as e:
ret = sp.CompletedProcess(args=line, returncode=255, stdout=f'Exception: {str(e)}'.encode('utf-8'))
if ret.stdout is None:
ret.stdout = b''
ret.stdout = ret.stdout.decode('utf-8').strip()
if reportNonZero and ret.returncode != 0:
logging.warning(f'command "{ret.args}" returned non-zero returncode {ret.returncode}: output:\n{ret.stdout}')
self.cp = ret
return ret
def close(self):
pass
def getBefore(self):
return self.cp.stdout
def copyin(self, src, tgt, recursive=False):
if src[0] != '/' or tgt[0] != '/':
raise Exception(f'support only absolute file paths (src {src} tgt {tgt})!')
if src == tgt:
return # nothing to copy, file is already where it should go
opt = '-r' if recursive else ''
return self.run(f'cp {opt} {src} {tgt}').returncode == 0
def copyout(self, src, tgt, recursive=False):
return self.copyin(src, tgt, recursive)
def PutFile(client, src, tgt):
success = True
sftp = client.open_sftp()
try:
sftp.put(src, tgt)
except FileNotFoundError as error:
logging.error(f"error while putting {src}: {error}")
success = False
sftp.close()
return success
def GetFile(client, src, tgt):
success = True
sftp = client.open_sftp()
try:
sftp.get(src, tgt)
except FileNotFoundError as error:
logging.error(f"error while getting {src}: {error}")
success = False
sftp.close()
return success
class RemoteCmd(Cmd):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.close()
def __init__(self, hostname, d=None):
cIdx = 0
self.hostname = hostname
self.client = RemoteCmd._ssh_init()
cfg = RemoteCmd._lookup_ssh_config(hostname)
self.cwd = d
self.cp = sp.CompletedProcess(args='', returncode=0, stdout='')
while cIdx < 3:
try:
self.client.connect(**cfg)
return
except:
logging.error(f'Could not connect to {hostname}, tried for {cIdx} time')
cIdx +=1
raise Exception ("Error: max retries, did not connect to host")
def _ssh_init():
logging.getLogger('paramiko').setLevel(logging.ERROR) # prevent spamming through Paramiko
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
return client
def _lookup_ssh_config(hostname):
if is_local(hostname):
raise ValueError("Using localhost as SSH target is not allowed: use LocalCmd instead.")
ssh_config = paramiko.SSHConfig()
user_config_file = os.path.expanduser("~/.ssh/config")
if os.path.exists(user_config_file):
with open(user_config_file) as f:
ssh_config.parse(f)
else:
raise FileNotFoundError('class needs SSH config at ~/.ssh/config')
ucfg = ssh_config.lookup(hostname)
if 'identityfile' not in ucfg or 'user' not in ucfg:
raise KeyError(f'no identityfile or user in SSH config for host {hostname}')
cfg = {'hostname':hostname, 'username':ucfg['user'], 'key_filename':ucfg['identityfile'], 'timeout':SSHTIMEOUT}
if 'hostname' in ucfg:
cfg['hostname'] = ucfg['hostname'] # override user-given hostname with what is in config
if 'port' in ucfg:
cfg['port'] = int(ucfg['port'])
if 'proxycommand' in ucfg:
cfg['sock'] = paramiko.ProxyCommand(ucfg['proxycommand'])
return cfg
def exec_script(host, path, timeout, parameters=None, redirect=None, silent=False):
if redirect and not redirect.startswith("/"):
raise ValueError(f"redirect must be absolute, but is {redirect}")
p = parameters if parameters else ""
r = f"> {redirect}" if redirect else ""
if not silent:
logging.info(f"local> ssh {host} bash -s {p} < {path} {r} # {path} from localhost")
client = RemoteCmd._ssh_init()
cfg = RemoteCmd._lookup_ssh_config(host)
client.connect(**cfg)
bash_opt = 'BASH_XTRACEFD=1' # write bash set -x output to stdout, see bash(1)
stdin, stdout, stderr = client.exec_command(f"{bash_opt} bash -s {p} {r}", timeout=timeout)
# open() the file f at path, read() it and write() it into the stdin of the bash -s cmd
with open(path) as f:
stdin.write(f.read())
stdin.close()
cmd = path
if parameters: cmd += f" {parameters}"
if redirect: cmd += f" &> {redirect}"
ret = sp.CompletedProcess(args=cmd, returncode=stdout.channel.recv_exit_status(), stdout=stdout.read(size=None) + stderr.read(size=None))
ret.stdout = ret.stdout.decode('utf-8').strip()
client.close()
return ret
def run(self, line, timeout=300, silent=False, reportNonZero=True):
if not silent:
logging.info(f"ssh[{self.hostname}]> {line}")
if self.cwd:
line = f"cd {self.cwd} && {line}"
try:
if line.strip().endswith('&'):
# if we wait for stdout, Paramiko does not return before the end of the command
# however, we don't want to wait for commands with &, so just return fake command
self.client.exec_command(line, timeout = 5)
ret = sp.CompletedProcess(args=line, returncode=0, stdout=b'')
time.sleep(0.1)
else:
stdin, stdout, stderr = self.client.exec_command(line, timeout=timeout)
ret = sp.CompletedProcess(args=line, returncode=stdout.channel.recv_exit_status(), stdout=stdout.read(size=None) + stderr.read(size=None))
except Exception as e:
ret = sp.CompletedProcess(args=line, returncode=255, stdout=f'Exception: {str(e)}'.encode('utf-8'))
ret.stdout = ret.stdout.decode('utf-8').strip()
if reportNonZero and ret.returncode != 0:
logging.warning(f'command "{line}" returned non-zero returncode {ret.returncode}: output:\n{ret.stdout}')
self.cp = ret
return ret
def close(self):
self.client.close()
def getBefore(self):
return self.cp.stdout
# if recursive is True, tgt must be a directory (and src is file or directory)
# if recursive is False, tgt and src must be a file name
def copyout(self, src, tgt, recursive=False):
logging.debug(f"copyout: local:{src} -> {self.hostname}:{tgt}")
if recursive:
tmpfile = f"{uuid.uuid4()}.tar"
abstmpfile = f"/tmp/{tmpfile}"
with LocalCmd() as cmd:
if cmd.run(f"tar -cf {abstmpfile} {src}").returncode != 0:
return False
if not PutFile(self.client, abstmpfile, abstmpfile):
return False
cmd.run(f"rm {abstmpfile}")
ret = self.run(f"mv {abstmpfile} {tgt}; cd {tgt} && tar -xf {tmpfile} && rm {tmpfile}")
return ret.returncode == 0
else:
return PutFile(self.client, src, tgt)
# if recursive is True, tgt must be a directory (and src is file or directory)
# if recursive is False, tgt and src must be a file name
def copyin(self, src, tgt, recursive=False):
logging.debug(f"copyin: {self.hostname}:{src} -> local:{tgt}")
if recursive:
tmpfile = f"{uuid.uuid4()}.tar"
abstmpfile = f"/tmp/{tmpfile}"
if self.run(f"tar -cf {abstmpfile} {src}").returncode != 0:
return False
if not GetFile(self.client, abstmpfile, abstmpfile):
return False
self.run(f"rm {abstmpfile}")
with LocalCmd() as cmd:
ret = cmd.run(f"mv {abstmpfile} {tgt}; cd {tgt} && tar -xf {tmpfile} && rm {tmpfile}")
return ret.returncode == 0
else:
return GetFile(self.client, src, tgt)
#/*
# * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
# * contributor license agreements. See the NOTICE file distributed with
# * this work for additional information regarding copyright ownership.
# * The OpenAirInterface Software Alliance licenses this file to You under
# * the OAI Public License, Version 1.1 (the "License"); you may not use this file
# * except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.openairinterface.org/?page_id=698
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *-------------------------------------------------------------------------------
# * For more information about the OpenAirInterface (OAI) Software Alliance:
# * contact@openairinterface.org
# */
#---------------------------------------------------------------------
# Python for CI of OAI-eNB + COTS-UE
#
# Required Python Version
# Python 3.x
#
# Required Python Package
# pexpect
#---------------------------------------------------------------------
#-----------------------------------------------------------
# Import
#-----------------------------------------------------------
import sys # arg
import re # reg
import logging
import os
import shutil
import time
from zipfile import ZipFile
#-----------------------------------------------------------
# OAI Testing modules
#-----------------------------------------------------------
import cls_cmd
import helpreadme as HELP
import constants as CONST
import cls_oaicitest
#-----------------------------------------------------------
# Helper functions used here and in other classes
# (e.g., cls_cluster.py)
#-----------------------------------------------------------
IMAGES = ['oai-enb', 'oai-lte-ru', 'oai-lte-ue', 'oai-gnb', 'oai-nr-cuup', 'oai-gnb-aw2s', 'oai-nr-ue', 'oai-enb-asan', 'oai-gnb-asan', 'oai-lte-ue-asan', 'oai-nr-ue-asan', 'oai-nr-cuup-asan', 'oai-gnb-aerial', 'oai-gnb-fhi72']
def CreateWorkspace(host, sourcePath, ranRepository, ranCommitID, ranTargetBranch, ranAllowMerge):
if ranCommitID == '':
logging.error('need ranCommitID in CreateWorkspace()')
raise ValueError('Insufficient Parameter in CreateWorkspace(): need ranCommitID')
script = "scripts/create_workspace.sh"
options = f"{sourcePath} {ranRepository} {ranCommitID}"
if ranAllowMerge:
if ranTargetBranch == '':
ranTargetBranch = 'develop'
options += f" {ranTargetBranch}"
logging.info(f'execute "{script}" with options "{options}" on node {host}')
ret = cls_cmd.runScript(host, script, 90, options)
logging.debug(f'"{script}" finished with code {ret.returncode}, output:\n{ret.stdout}')
return ret.returncode == 0
def CreateTag(ranCommitID, ranBranch, ranAllowMerge):
if ranCommitID == 'develop':
return 'develop'
shortCommit = ranCommitID[0:8]
if ranAllowMerge:
# Allowing contributor to have a name/branchName format
branchName = ranBranch.replace('/','-')
tagToUse = f'{branchName}-{shortCommit}'
else:
tagToUse = f'develop-{shortCommit}'
return tagToUse
def CopyLogsToExecutor(cmd, sourcePath, log_name):
cmd.cd(f'{sourcePath}/cmake_targets')
cmd.run(f'rm -f {log_name}.zip')
cmd.run(f'mkdir -p {log_name}')
cmd.run(f'mv log/* {log_name}')
cmd.run(f'zip -r -qq {log_name}.zip {log_name}')
# copy zip to executor for analysis
if (os.path.isfile(f'./{log_name}.zip')):
os.remove(f'./{log_name}.zip')
if (os.path.isdir(f'./{log_name}')):
shutil.rmtree(f'./{log_name}')
cmd.copyin(src=f'{sourcePath}/cmake_targets/{log_name}.zip', tgt=f'{os.getcwd()}/{log_name}.zip')
cmd.run(f'rm -f {log_name}.zip')
ZipFile(f'{log_name}.zip').extractall('.')
def AnalyzeBuildLogs(buildRoot, images, globalStatus):
collectInfo = {}
for image in images:
files = {}
file_list = [f for f in os.listdir(f'{buildRoot}/{image}') if os.path.isfile(os.path.join(f'{buildRoot}/{image}', f)) and f.endswith('.txt')]
# Analyze the "sub-logs" of every target image
for fil in file_list:
errorandwarnings = {}
warningsNo = 0
errorsNo = 0
with open(f'{buildRoot}/{image}/{fil}', mode='r') as inputfile:
for line in inputfile:
result = re.search(' ERROR ', str(line))
if result is not None:
errorsNo += 1
result = re.search(' error:', str(line))
if result is not None:
errorsNo += 1
result = re.search(' WARNING ', str(line))
if result is not None:
warningsNo += 1
result = re.search(' warning:', str(line))
if result is not None:
warningsNo += 1
errorandwarnings['errors'] = errorsNo
errorandwarnings['warnings'] = warningsNo
errorandwarnings['status'] = globalStatus
files[fil] = errorandwarnings
# Analyze the target image
if os.path.isfile(f'{buildRoot}/{image}.log'):
errorandwarnings = {}
committed = False
tagged = False
with open(f'{buildRoot}/{image}.log', mode='r') as inputfile:
for line in inputfile:
lineHasTag = re.search(f'Successfully tagged {image}:', str(line)) is not None
lineHasTag2 = re.search(f'naming to docker.io/library/{image}:', str(line)) is not None
tagged = tagged or lineHasTag or lineHasTag2
# the OpenShift Cluster builder prepends image registry URL
lineHasCommit = re.search(f'COMMIT [a-zA-Z0-9\.:/\-]*{image}', str(line)) is not None
committed = committed or lineHasCommit
errorandwarnings['errors'] = 0 if committed or tagged else 1
errorandwarnings['warnings'] = 0
errorandwarnings['status'] = committed or tagged
files['Target Image Creation'] = errorandwarnings
collectInfo[image] = files
return collectInfo
def GetImageName(ssh, svcName, file):
ret = ssh.run(f"docker compose -f {file} config --format json {svcName} | jq -r '.services.\"{svcName}\".image'", silent=True)
if ret.returncode != 0:
return f"cannot retrieve image info for {containerName}: {ret.stdout}"
else:
return ret.stdout.strip()
def GetServiceHealth(ssh, svcName, file):
if svcName is None:
return False, f"Service {svcName} not found in {file}"
image = GetImageName(ssh, svcName, file)
if 'db_init' in svcName or 'db-init' in svcName: # exits with 0, there cannot be healthy
return True, f"Service {svcName} healthy, image {image}"
for _ in range(8):
result = ssh.run(f"docker compose -f {file} ps --format json {svcName} | jq -r 'if type==\"array\" then .[0].Health else .Health end'", silent=True)
if result.stdout == 'healthy':
return True, f"Service {svcName} healthy, image {image}"
time.sleep(5)
return False, f"Failed to deploy: service {svcName}"
def ExistEnvFilePrint(ssh, wd, prompt='env vars in existing'):
ret = ssh.run(f'cat {wd}/.env', silent=True, reportNonZero=False)
if ret.returncode != 0:
return False
env_vars = ret.stdout.strip().splitlines()
logging.info(f'{prompt} {wd}/.env: {env_vars}')
return True
def WriteEnvFile(ssh, services, wd, tag, flexric_tag):
ret = ssh.run(f'cat {wd}/.env', silent=True, reportNonZero=False)
registry = "oai-ci" # pull_images() gives us this registry path
envs = {"REGISTRY":registry, "TAG": tag, "FLEXRIC_TAG": flexric_tag}
if ret.returncode == 0: # it exists, we have to update
# transforms env file to dictionary
old_envs = {}
for l in ret.stdout.strip().splitlines():
var, val = l.split('=', 1)
old_envs[var] = val.strip('"')
# will retain the old environment variables
envs = {**envs, **old_envs}
for svc in services.split():
# In some scenarios we have the choice of either pulling normal images
# or -asan images. We need to detect which kind we did pull.
fullImageName = GetImageName(ssh, svc, f"{wd}/docker-compose.y*ml")
image = fullImageName.split("/")[-1].split(":")[0]
checkimg = f"{registry}/{image}-asan:{tag}"
ret = ssh.run(f'docker image inspect {checkimg}', reportNonZero=False)
if ret.returncode == 0:
logging.info(f"detected pulled image {checkimg}")
if "oai-enb" in image: envs["ENB_IMG"] = "oai-enb-asan"
elif "oai-gnb" in image: envs["GNB_IMG"] = "oai-gnb-asan"
elif "oai-lte-ue" in image: envs["LTEUE_IMG"] = "oai-lte-ue-asan"
elif "oai-nr-ue" in image: envs["NRUE_IMG"] = "oai-nr-ue-asan"
elif "oai-nr-cuup" in image: envs["NRCUUP_IMG"] = "oai-nr-cuup-asan"
else: logging.warning("undetected image format {image}, cannot use asan")
env_string = "\n".join([f"{var}=\"{val}\"" for var,val in envs.items()])
ssh.run(f'echo -e \'{env_string}\' > {wd}/.env', silent=True)
ExistEnvFilePrint(ssh, wd, prompt='New env vars in file')
def GetServices(ssh, requested, file):
if requested == [] or requested is None or requested == "":
logging.warning('No service name given: starting all services in docker-compose.yml!')
ret = ssh.run(f'docker compose -f {file} config --services')
if ret.returncode != 0:
return ""
else:
return ' '.join(ret.stdout.splitlines())
else:
return requested
def CopyinServiceLog(ssh, lSourcePath, yaml, svcName, wd_yaml, filename):
remote_filename = f"{lSourcePath}/cmake_targets/log/{filename}"
ssh.run(f'docker compose -f {wd_yaml} logs {svcName} --no-log-prefix &> {remote_filename}')
local_dir = f"{os.getcwd()}/../cmake_targets/log/{yaml}"
local_filename = f"{local_dir}/{filename}"
return ssh.copyin(remote_filename, local_filename)
def GetRunningServices(ssh, file):
ret = ssh.run(f'docker compose -f {file} config --services')
if ret.returncode != 0:
return None
allServices = ret.stdout.splitlines()
running_services = []
for s in allServices:
# outputs the hash if the container is running
ret = ssh.run(f'docker compose -f {file} ps --all --quiet -- {s}')
if ret.returncode != 0:
logging.info(f"service {s}: {ret.stdout}")
elif ret.stdout == "":
logging.warning(f"could not retrieve information for service {s}")
else:
c = ret.stdout
logging.debug(f'running service {s} with container id {c}')
running_services.append((s, c))
logging.info(f'stopping services: {running_services}')
return running_services
def CheckLogs(self, yaml, service_name, HTML, RAN):
logPath = f'{os.getcwd()}/../cmake_targets/log/{yaml}'
filename = f'{logPath}/{service_name}-{HTML.testCase_id}.log'
success = True
if (any(sub in service_name for sub in ['oai_ue','oai-nr-ue','lte_ue'])):
logging.debug(f'\u001B[1m Analyzing UE logfile {filename} \u001B[0m')
logStatus = cls_oaicitest.OaiCiTest().AnalyzeLogFile_UE(filename, HTML, RAN)
opt = f"UE log analysis for service {service_name}"
# usage of htmlUEFailureMsg/htmleNBFailureMsg is because Analyze log files
# abuse HTML to store their reports, and we here want to put custom options,
# which is not possible with CreateHtmlTestRow
# solution: use HTML templates, where we don't need different HTML write funcs
if (logStatus < 0):
HTML.CreateHtmlTestRowQueue(opt, 'KO', [HTML.htmlUEFailureMsg])
success = False
else:
HTML.CreateHtmlTestRowQueue(opt, 'OK', [HTML.htmlUEFailureMsg])
HTML.htmlUEFailureMsg = ""
elif service_name == 'nv-cubb':
msg = 'Undeploy PNF/Nvidia CUBB'
HTML.CreateHtmlTestRow(msg, 'OK', CONST.ALL_PROCESSES_OK)
elif (any(sub in service_name for sub in ['enb','rru','rcc','cu','du','gnb'])):
logging.debug(f'\u001B[1m Analyzing XnB logfile {filename}\u001B[0m')
logStatus = RAN.AnalyzeLogFile_eNB(filename, HTML, self.ran_checkers)
opt = f"xNB log analysis for service {service_name}"
if (logStatus < 0):
HTML.CreateHtmlTestRowQueue(opt, 'KO', [HTML.htmleNBFailureMsg])
success = False
else:
HTML.CreateHtmlTestRowQueue(opt, 'OK', [HTML.htmleNBFailureMsg])
HTML.htmleNBFailureMsg = ""
else:
logging.info(f'Skipping to analyze log for service name {service_name}')
HTML.CreateHtmlTestRowQueue(f"service {service_name}", 'OK', ["no analysis function"])
logging.debug(f"log check: service {service_name} passed analysis {success}")
return success
#-----------------------------------------------------------
# Class Declaration
#-----------------------------------------------------------
class Containerize():
def __init__(self):
self.ranRepository = ''
self.ranBranch = ''
self.ranAllowMerge = False
self.ranCommitID = ''
self.ranTargetBranch = ''
self.eNBIPAddress = ''
self.eNBUserName = ''
self.eNBPassword = ''
self.eNBSourceCodePath = ''
self.eNB1IPAddress = ''
self.eNB1UserName = ''
self.eNB1Password = ''
self.eNB1SourceCodePath = ''
self.eNB2IPAddress = ''
self.eNB2UserName = ''
self.eNB2Password = ''
self.eNB2SourceCodePath = ''
self.forcedWorkspaceCleanup = False
self.imageKind = ''
self.proxyCommit = None
self.eNB_instance = 0
self.eNB_serverId = ['', '', '']
self.yamlPath = ['', '', '']
self.services = ['', '', '']
self.deploymentTag = ''
self.eNB_logFile = ['', '', '']
self.testCase_id = ''
self.cli = ''
self.cliBuildOptions = ''
self.dockerfileprefix = ''
self.host = ''
self.deployedContainers = []
self.tsharkStarted = False
self.pingContName = ''
self.pingOptions = ''
self.pingLossThreshold = ''
self.svrContName = ''
self.svrOptions = ''
self.cliContName = ''
self.cliOptions = ''
self.imageToCopy = ''
#checkers from xml
self.ran_checkers={}
self.num_attempts = 1
self.flexricTag = ''
#-----------------------------------------------------------
# Container management functions
#-----------------------------------------------------------
def GetCredentials(self, server_id):
if server_id == '0':
ip, path = self.eNBIPAddress, self.eNBSourceCodePath
elif server_id == '1':
ip, path = self.eNB1IPAddress, self.eNB1SourceCodePath
elif server_id == '2':
ip, path = self.eNB2IPAddress, self.eNB2SourceCodePath
else:
raise ValueError(f"unknown server ID '{server_id}'")
if ip == '' or path == '':
HELP.GenericHelp(CONST.Version)
raise ValueError(f'Insufficient Parameter: IP/node {ip}, path {path}')
return (ip, path)
def BuildImage(self, HTML):
svr = self.eNB_serverId[self.eNB_instance]
lIpAddr, lSourcePath = self.GetCredentials(svr)
logging.debug('Building on server: ' + lIpAddr)
cmd = cls_cmd.getConnection(lIpAddr)
# Checking the hostname to get adapted on cli and dockerfileprefixes
cmd.run('hostnamectl')
result = re.search('Ubuntu|Red Hat', cmd.getBefore())
self.host = result.group(0)
if self.host == 'Ubuntu':
self.cli = 'docker'
self.dockerfileprefix = '.ubuntu22'
self.cliBuildOptions = ''
elif self.host == 'Red Hat':
self.cli = 'sudo podman'
self.dockerfileprefix = '.rhel9'
self.cliBuildOptions = '--disable-compression'
# we always build the ran-build image with all targets
# Creating a tupple with the imageName, the DockerFile prefix pattern, targetName and sanitized option
imageNames = [('ran-build', 'build', 'ran-build', '')]
result = re.search('eNB', self.imageKind)
if result is not None:
imageNames.append(('oai-enb', 'eNB', 'oai-enb', ''))
result = re.search('gNB', self.imageKind)
if result is not None:
imageNames.append(('oai-gnb', 'gNB', 'oai-gnb', ''))
result = re.search('all', self.imageKind)
if result is not None:
imageNames.append(('oai-enb', 'eNB', 'oai-enb', ''))
imageNames.append(('oai-gnb', 'gNB', 'oai-gnb', ''))
imageNames.append(('oai-nr-cuup', 'nr-cuup', 'oai-nr-cuup', ''))
imageNames.append(('oai-lte-ue', 'lteUE', 'oai-lte-ue', ''))
imageNames.append(('oai-nr-ue', 'nrUE', 'oai-nr-ue', ''))
if self.host == 'Red Hat':
imageNames.append(('oai-physim', 'phySim', 'oai-physim', ''))
if self.host == 'Ubuntu':
imageNames.append(('oai-lte-ru', 'lteRU', 'oai-lte-ru', ''))
imageNames.append(('oai-gnb-aerial', 'gNB.aerial', 'oai-gnb-aerial', ''))
# Building again the 5G images with Address Sanitizer
imageNames.append(('ran-build', 'build', 'ran-build-asan', '--build-arg "BUILD_OPTION=--sanitize"'))
imageNames.append(('oai-enb', 'eNB', 'oai-enb-asan', '--build-arg "BUILD_OPTION=--sanitize"'))
imageNames.append(('oai-gnb', 'gNB', 'oai-gnb-asan', '--build-arg "BUILD_OPTION=--sanitize"'))
imageNames.append(('oai-lte-ue', 'lteUE', 'oai-lte-ue-asan', '--build-arg "BUILD_OPTION=--sanitize"'))
imageNames.append(('oai-nr-ue', 'nrUE', 'oai-nr-ue-asan', '--build-arg "BUILD_OPTION=--sanitize"'))
imageNames.append(('oai-nr-cuup', 'nr-cuup', 'oai-nr-cuup-asan', '--build-arg "BUILD_OPTION=--sanitize"'))
imageNames.append(('ran-build-fhi72', 'build.fhi72', 'ran-build-fhi72', ''))
imageNames.append(('oai-gnb', 'gNB.fhi72', 'oai-gnb-fhi72', ''))
result = re.search('build_cross_arm64', self.imageKind)
if result is not None:
self.dockerfileprefix = '.ubuntu22.cross-arm64'
result = re.search('native_arm', self.imageKind)
if result is not None:
imageNames.append(('oai-gnb', 'gNB', 'oai-gnb', ''))
imageNames.append(('oai-nr-cuup', 'nr-cuup', 'oai-nr-cuup', ''))
imageNames.append(('oai-nr-ue', 'nrUE', 'oai-nr-ue', ''))
imageNames.append(('oai-gnb-aerial', 'gNB.aerial', 'oai-gnb-aerial', ''))
self.testCase_id = HTML.testCase_id
cmd.cd(lSourcePath)
# if asterix, copy the entitlement and subscription manager configurations
if self.host == 'Red Hat':
cmd.run('mkdir -p ./etc-pki-entitlement')
cmd.run('cp /etc/pki/entitlement/*.pem ./etc-pki-entitlement/')
baseImage = 'ran-base'
baseTag = 'develop'
forceBaseImageBuild = False
imageTag = 'develop'
if (self.ranAllowMerge):
imageTag = 'ci-temp'
if self.ranTargetBranch == 'develop':
cmd.run(f'git diff HEAD..origin/develop -- cmake_targets/build_oai cmake_targets/tools/build_helper docker/Dockerfile.base{self.dockerfileprefix} | grep --colour=never -i INDEX')
result = re.search('index', cmd.getBefore())
if result is not None:
forceBaseImageBuild = True
baseTag = 'ci-temp'
# if the branch name contains integration_20xx_wyy, let rebuild ran-base
result = re.search('integration_20([0-9]{2})_w([0-9]{2})', self.ranBranch)
if not forceBaseImageBuild and result is not None:
forceBaseImageBuild = True
baseTag = 'ci-temp'
else:
forceBaseImageBuild = True
# Let's remove any previous run artifacts if still there
cmd.run(f"{self.cli} image prune --force")
for image,pattern,name,option in imageNames:
cmd.run(f"{self.cli} image rm {name}:{imageTag}")
# Build the base image only on Push Events (not on Merge Requests)
# On when the base image docker file is being modified.
if forceBaseImageBuild:
cmd.run(f"{self.cli} image rm {baseImage}:{baseTag}")
cmd.run(f"{self.cli} build {self.cliBuildOptions} --target {baseImage} --tag {baseImage}:{baseTag} --file docker/Dockerfile.base{self.dockerfileprefix} . &> cmake_targets/log/ran-base.log", timeout=1600)
# First verify if the base image was properly created.
ret = cmd.run(f"{self.cli} image inspect --format=\'Size = {{{{.Size}}}} bytes\' {baseImage}:{baseTag}")
allImagesSize = {}
if ret.returncode != 0:
logging.error('\u001B[1m Could not build properly ran-base\u001B[0m')
# Recover the name of the failed container?
cmd.run(f"{self.cli} ps --quiet --filter \"status=exited\" -n1 | xargs --no-run-if-empty {self.cli} rm -f")
cmd.run(f"{self.cli} image prune --force")
cmd.close()
logging.error('\u001B[1m Building OAI Images Failed\u001B[0m')
HTML.CreateHtmlTestRow(self.imageKind, 'KO', CONST.ALL_PROCESSES_OK)
HTML.CreateHtmlTabFooter(False)
return False
else:
result = re.search('Size *= *(?P<size>[0-9\-]+) *bytes', cmd.getBefore())
if result is not None:
size = float(result.group("size")) / 1000000
imageSizeStr = f'{size:.1f}'
logging.debug(f'\u001B[1m ran-base size is {imageSizeStr} Mbytes\u001B[0m')
allImagesSize['ran-base'] = f'{imageSizeStr} Mbytes'
else:
logging.debug('ran-base size is unknown')
# Recover build logs, for the moment only possible when build is successful
cmd.run(f"{self.cli} create --name test {baseImage}:{baseTag}")
cmd.run("mkdir -p cmake_targets/log/ran-base")
cmd.run(f"{self.cli} cp test:/oai-ran/cmake_targets/log/. cmake_targets/log/ran-base")
cmd.run(f"{self.cli} rm -f test")
# Build the target image(s)
status = True
attemptedImages = ['ran-base']
for image,pattern,name,option in imageNames:
attemptedImages += [name]
# the archived Dockerfiles have "ran-base:latest" as base image
# we need to update them with proper tag
cmd.run(f'git checkout -- docker/Dockerfile.{pattern}{self.dockerfileprefix}')
cmd.run(f'sed -i -e "s#{baseImage}:latest#{baseImage}:{baseTag}#" docker/Dockerfile.{pattern}{self.dockerfileprefix}')
# target images should use the proper ran-build image
if image != 'ran-build' and "-asan" in name:
cmd.run(f'sed -i -e "s#ran-build:latest#ran-build-asan:{imageTag}#" docker/Dockerfile.{pattern}{self.dockerfileprefix}')
elif "fhi72" in name:
cmd.run(f'sed -i -e "s#ran-build-fhi72:latest#ran-build-fhi72:{imageTag}#" docker/Dockerfile.{pattern}{self.dockerfileprefix}')
elif image != 'ran-build':
cmd.run(f'sed -i -e "s#ran-build:latest#ran-build:{imageTag}#" docker/Dockerfile.{pattern}{self.dockerfileprefix}')
if image == 'oai-gnb-aerial':
cmd.run('cp -f /opt/nvidia-ipc/nvipc_src.*.tar.gz .')
ret = cmd.run(f'{self.cli} build {self.cliBuildOptions} --target {image} --tag {name}:{imageTag} --file docker/Dockerfile.{pattern}{self.dockerfileprefix} {option} . > cmake_targets/log/{name}.log 2>&1', timeout=1200)
if image == 'oai-gnb-aerial':
cmd.run('rm -f nvipc_src.*.tar.gz')
if image == 'ran-build' and ret.returncode == 0:
cmd.run(f"docker run --name test-log -d {name}:{imageTag} /bin/true")
cmd.run(f"docker cp test-log:/oai-ran/cmake_targets/log/ cmake_targets/log/{name}/")
cmd.run(f"docker rm -f test-log")
else:
cmd.run(f"mkdir -p cmake_targets/log/{name}")
# check the status of the build
ret = cmd.run(f"{self.cli} image inspect --format=\'Size = {{{{.Size}}}} bytes\' {name}:{imageTag}")
if ret.returncode != 0:
logging.error('\u001B[1m Could not build properly ' + name + '\u001B[0m')
status = False
# Here we should check if the last container corresponds to a failed command and destroy it
cmd.run(f"{self.cli} ps --quiet --filter \"status=exited\" -n1 | xargs --no-run-if-empty {self.cli} rm -f")
allImagesSize[name] = 'N/A -- Build Failed'
break
else:
result = re.search('Size *= *(?P<size>[0-9\-]+) *bytes', cmd.getBefore())
if result is not None:
size = float(result.group("size")) / 1000000 # convert to MB
imageSizeStr = f'{size:.1f}'
logging.debug(f'\u001B[1m {name} size is {imageSizeStr} Mbytes\u001B[0m')
allImagesSize[name] = f'{imageSizeStr} Mbytes'
else:
logging.debug(f'{name} size is unknown')
allImagesSize[name] = 'unknown'
# Now pruning dangling images in between target builds
cmd.run(f"{self.cli} image prune --force")
# Remove all intermediate build images and clean up
cmd.run(f"{self.cli} image rm ran-build:{imageTag} ran-build-asan:{imageTag} ran-build-fhi72:{imageTag} || true")
cmd.run(f"{self.cli} volume prune --force")
# Remove some cached artifacts to prevent out of diskspace problem
logging.debug(cmd.run("df -h").stdout)
logging.debug(cmd.run("docker system df").stdout)
cmd.run(f"{self.cli} buildx prune --filter until=1h --force")
logging.debug(cmd.run("df -h").stdout)
logging.debug(cmd.run("docker system df").stdout)
# create a zip with all logs
build_log_name = f'build_log_{self.testCase_id}'
CopyLogsToExecutor(cmd, lSourcePath, build_log_name)
cmd.close()
# Analyze the logs
collectInfo = AnalyzeBuildLogs(build_log_name, attemptedImages, status)
if status:
logging.info('\u001B[1m Building OAI Image(s) Pass\u001B[0m')
HTML.CreateHtmlTestRow(self.imageKind, 'OK', CONST.ALL_PROCESSES_OK)
HTML.CreateHtmlNextTabHeaderTestRow(collectInfo, allImagesSize)
return True
else:
logging.error('\u001B[1m Building OAI Images Failed\u001B[0m')
HTML.CreateHtmlTestRow(self.imageKind, 'KO', CONST.ALL_PROCESSES_OK)
HTML.CreateHtmlNextTabHeaderTestRow(collectInfo, allImagesSize)
HTML.CreateHtmlTabFooter(False)
return False
def BuildProxy(self, HTML):
svr = self.eNB_serverId[self.eNB_instance]
lIpAddr, lSourcePath = self.GetCredentials(svr)
logging.debug('Building on server: ' + lIpAddr)
ssh = cls_cmd.getConnection(lIpAddr)
self.testCase_id = HTML.testCase_id
oldRanCommidID = self.ranCommitID
oldRanRepository = self.ranRepository
oldRanAllowMerge = self.ranAllowMerge
oldRanTargetBranch = self.ranTargetBranch
self.ranCommitID = self.proxyCommit
self.ranRepository = 'https://github.com/EpiSci/oai-lte-5g-multi-ue-proxy.git'
self.ranAllowMerge = False
self.ranTargetBranch = 'master'
# Let's remove any previous run artifacts if still there
ssh.run('docker image prune --force')
# Remove any previous proxy image
ssh.run('docker image rm oai-lte-multi-ue-proxy:latest')
tag = self.proxyCommit
logging.debug('building L2sim proxy image for tag ' + tag)
# check if the corresponding proxy image with tag exists. If not, build it
ret = ssh.run(f'docker image inspect --format=\'Size = {{{{.Size}}}} bytes\' proxy:{tag}')
buildProxy = ret.returncode != 0 # if no image, build new proxy
if buildProxy:
ssh.run(f'rm -Rf {lSourcePath}')
success = CreateWorkspace(lIpAddr, lSourcePath, self.ranRepository, self.ranCommitID, self.ranTargetBranch, self.ranAllowMerge)
if not success:
raise Exception("could not clone proxy repository")
filename = f'build_log_{self.testCase_id}'
fullpath = f'{lSourcePath}/{filename}'
ssh.run(f'docker build --target oai-lte-multi-ue-proxy --tag proxy:{tag} --file {lSourcePath}/docker/Dockerfile.ubuntu18.04 {lSourcePath} > {fullpath} 2>&1')
ssh.run(f'zip -r -qq {fullpath}.zip {fullpath}')
local_file = f"{os.getcwd()}/../cmake_targets/log/{filename}.zip"
ssh.copyin(f'{fullpath}.zip', local_file)
# don't delete such that we might recover the zips
#ssh.run(f'rm -f {fullpath}.zip')
ssh.run('docker image prune --force')
ret = ssh.run(f'docker image inspect --format=\'Size = {{{{.Size}}}} bytes\' proxy:{tag}')
if ret.returncode != 0:
logging.error('\u001B[1m Build of L2sim proxy failed\u001B[0m')
ssh.close()
HTML.CreateHtmlTestRow('commit ' + tag, 'KO', CONST.ALL_PROCESSES_OK)
HTML.CreateHtmlTabFooter(False)
return False
else:
logging.debug('L2sim proxy image for tag ' + tag + ' already exists, skipping build')
# retag the build images to that we pick it up later
ssh.run(f'docker image tag proxy:{tag} oai-lte-multi-ue-proxy:latest')
# we assume that the host on which this is built will also run the proxy. The proxy
# currently requires the following command, and the docker-compose up mechanism of
# the CI does not allow to run arbitrary commands. Note that the following actually
# belongs to the deployment, not the build of the proxy...
logging.warning('the following command belongs to deployment, but no mechanism exists to exec it there!')
ssh.run('sudo ifconfig lo: 127.0.0.2 netmask 255.0.0.0 up')
# to prevent accidentally overwriting data that might be used later
self.ranCommitID = oldRanCommidID
self.ranRepository = oldRanRepository
self.ranAllowMerge = oldRanAllowMerge
self.ranTargetBranch = oldRanTargetBranch
# we do not analyze the logs (we assume the proxy builds fine at this stage),
# but need to have the following information to correctly display the HTML
files = {}
errorandwarnings = {}
errorandwarnings['errors'] = 0
errorandwarnings['warnings'] = 0
errorandwarnings['status'] = True
files['Target Image Creation'] = errorandwarnings
collectInfo = {}
collectInfo['proxy'] = files
ret = ssh.run(f'docker image inspect --format=\'Size = {{{{.Size}}}} bytes\' proxy:{tag}')
result = re.search('Size *= *(?P<size>[0-9\-]+) *bytes', ret.stdout)
# Cleaning any created tmp volume
ssh.run('docker volume prune --force')
ssh.close()
allImagesSize = {}
if result is not None:
imageSize = float(result.group('size')) / 1000000
logging.debug('\u001B[1m proxy size is ' + ('%.0f' % imageSize) + ' Mbytes\u001B[0m')
allImagesSize['proxy'] = str(round(imageSize,1)) + ' Mbytes'
logging.info('\u001B[1m Building L2sim Proxy Image Pass\u001B[0m')
HTML.CreateHtmlTestRow('commit ' + tag, 'OK', CONST.ALL_PROCESSES_OK)
HTML.CreateHtmlNextTabHeaderTestRow(collectInfo, allImagesSize)
return True
else:
logging.error('proxy size is unknown')
allImagesSize['proxy'] = 'unknown'
logging.error('\u001B[1m Build of L2sim proxy failed\u001B[0m')
HTML.CreateHtmlTestRow('commit ' + tag, 'KO', CONST.ALL_PROCESSES_OK)
HTML.CreateHtmlTabFooter(False)
return False
def BuildRunTests(self, HTML):
svr = self.eNB_serverId[self.eNB_instance]
lIpAddr, lSourcePath = self.GetCredentials(svr)
logging.debug('Building on server: ' + lIpAddr)
cmd = cls_cmd.RemoteCmd(lIpAddr)
cmd.cd(lSourcePath)
ret = cmd.run('hostnamectl')
result = re.search('Ubuntu', ret.stdout)
host = result.group(0)
if host != 'Ubuntu':
cmd.close()
raise Exception("Can build unit tests only on Ubuntu server")
logging.debug('running on Ubuntu as expected')
if self.forcedWorkspaceCleanup:
cmd.run(f'sudo -S rm -Rf {lSourcePath}')
self.testCase_id = HTML.testCase_id
# check that ran-base image exists as we expect it
baseImage = 'ran-base'
baseTag = 'develop'
if self.ranAllowMerge:
if self.ranTargetBranch == 'develop':
cmd.run(f'git diff HEAD..origin/develop -- cmake_targets/build_oai cmake_targets/tools/build_helper docker/Dockerfile.base{self.dockerfileprefix} | grep --colour=never -i INDEX')
result = re.search('index', cmd.getBefore())
if result is not None:
baseTag = 'ci-temp'
ret = cmd.run(f"docker image inspect --format=\'Size = {{{{.Size}}}} bytes\' {baseImage}:{baseTag}")
if ret.returncode != 0:
logging.error(f'No {baseImage} image present, cannot build tests')
HTML.CreateHtmlTestRow(self.imageKind, 'KO', CONST.ALL_PROCESSES_OK)
HTML.CreateHtmlTabFooter(False)
return False
# build ran-unittests image
dockerfile = "ci-scripts/docker/Dockerfile.unittest.ubuntu22"
ret = cmd.run(f'docker build --progress=plain --tag ran-unittests:{baseTag} --file {dockerfile} . &> {lSourcePath}/cmake_targets/log/unittest-build.log')
if ret.returncode != 0:
build_log_name = f'build_log_{self.testCase_id}'
CopyLogsToExecutor(cmd, lSourcePath, build_log_name)
logging.error(f'Cannot build unit tests')
HTML.CreateHtmlTestRow("Unit test build failed", 'KO', [dockerfile])
HTML.CreateHtmlTabFooter(False)
return False
HTML.CreateHtmlTestRowQueue("Build unit tests", 'OK', [dockerfile])
# it worked, build and execute tests, and close connection
ret = cmd.run(f'docker run -a STDOUT --workdir /oai-ran/build/ --env LD_LIBRARY_PATH=/oai-ran/build/ --rm ran-unittests:{baseTag} ctest --output-on-failure --no-label-summary -j$(nproc)')
cmd.run(f'docker rmi ran-unittests:{baseTag}')
build_log_name = f'build_log_{self.testCase_id}'
CopyLogsToExecutor(cmd, lSourcePath, build_log_name)
cmd.close()
if ret.returncode == 0:
HTML.CreateHtmlTestRowQueue('Unit tests succeeded', 'OK', [ret.stdout])
HTML.CreateHtmlTabFooter(True)
return True
else:
HTML.CreateHtmlTestRowQueue('Unit tests failed (see also doc/UnitTests.md)', 'KO', [ret.stdout])
HTML.CreateHtmlTabFooter(False)
return False
def Push_Image_to_Local_Registry(self, HTML, svr_id, tag_prefix=""):
lIpAddr, lSourcePath = self.GetCredentials(svr_id)
logging.debug('Pushing images to server: ' + lIpAddr)
ssh = cls_cmd.getConnection(lIpAddr)
imagePrefix = 'porcepix.sboai.cs.eurecom.fr'
ret = ssh.run(f'docker login -u oaicicd -p oaicicd {imagePrefix}')
if ret.returncode != 0:
msg = 'Could not log into local registry'
logging.error(msg)
ssh.close()
HTML.CreateHtmlTestRow(msg, 'KO', CONST.ALL_PROCESSES_OK)
return False
orgTag = 'develop'
if self.ranAllowMerge:
orgTag = 'ci-temp'
for image in IMAGES:
tagToUse = tag_prefix + CreateTag(self.ranCommitID, self.ranBranch, self.ranAllowMerge)
imageTag = f"{image}:{tagToUse}"
ret = ssh.run(f'docker image tag {image}:{orgTag} {imagePrefix}/{imageTag}')
if ret.returncode != 0:
continue
ret = ssh.run(f'docker push {imagePrefix}/{imageTag}')
if ret.returncode != 0:
msg = f'Could not push {image} to local registry : {imageTag}'
logging.error(msg)
ssh.close()
HTML.CreateHtmlTestRow(msg, 'KO', CONST.ALL_PROCESSES_OK)
return False
# Creating a develop tag on the local private registry
if not self.ranAllowMerge:
devTag = f"{tag_prefix}develop"
ssh.run(f'docker image tag {image}:{orgTag} {imagePrefix}/{image}:{devTag}')
ssh.run(f'docker push {imagePrefix}/{image}:{devTag}')
ssh.run(f'docker rmi {imagePrefix}/{image}:{devTag}')
ssh.run(f'docker rmi {imagePrefix}/{imageTag} {image}:{orgTag}')
ret = ssh.run(f'docker logout {imagePrefix}')
if ret.returncode != 0:
msg = 'Could not log off from local registry'
logging.error(msg)
ssh.close()
HTML.CreateHtmlTestRow(msg, 'KO', CONST.ALL_PROCESSES_OK)
return False
ssh.close()
HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
return True
def Pull_Image(cmd, images, tag, tag_prefix, registry, username, password):
if username is not None and password is not None:
logging.info(f"logging into registry {username}@{registry}")
response = cmd.run(f'docker login -u {username} -p {password} {registry}', silent=True, reportNonZero=False)
if response.returncode != 0:
msg = f'Could not log into registry {username}@{registry}'
logging.error(msg)
return False, msg
pulled_images = []
for image in images:
imagePrefTag = f"{image}:{tag_prefix}{tag}"
imageTag = f"{image}:{tag}"
response = cmd.run(f'docker pull {registry}/{imagePrefTag}')
if response.returncode != 0:
msg = f'Could not pull {image} from local registry: {imagePrefTag}'
logging.error(msg)
return False, msg
cmd.run(f'docker tag {registry}/{imagePrefTag} oai-ci/{imageTag}')
cmd.run(f'docker rmi {registry}/{imagePrefTag}')
pulled_images += [f"oai-ci/{imageTag}"]
if username is not None and password is not None:
response = cmd.run(f'docker logout {registry}')
# we have the images, if logout fails it's no problem
msg = "Pulled Images:\n" + '\n'.join(pulled_images)
return True, msg
def Pull_Image_from_Registry(self, HTML, svr_id, images, tag=None, tag_prefix="", registry="porcepix.sboai.cs.eurecom.fr", username="oaicicd", password="oaicicd"):
lIpAddr, lSourcePath = self.GetCredentials(svr_id)
logging.debug('\u001B[1m Pulling image(s) on server: ' + lIpAddr + '\u001B[0m')
if not tag:
tag = CreateTag(self.ranCommitID, self.ranBranch, self.ranAllowMerge)
with cls_cmd.getConnection(lIpAddr) as cmd:
success, msg = Containerize.Pull_Image(cmd, images, tag, tag_prefix, registry, username, password)
param = f"on node {lIpAddr}"
if success:
HTML.CreateHtmlTestRowQueue(param, 'OK', [msg])
else:
HTML.CreateHtmlTestRowQueue(param, 'KO', [msg])
return success
def Clean_Test_Server_Images(self, HTML, svr_id, images, tag=None):
lIpAddr, lSourcePath = self.GetCredentials(svr_id)
logging.debug(f'\u001B[1m Cleaning image(s) from server: {lIpAddr}\u001B[0m')
if not tag:
tag = CreateTag(self.ranCommitID, self.ranBranch, self.ranAllowMerge)
status = True
with cls_cmd.getConnection(lIpAddr) as myCmd:
removed_images = []
for image in images:
fullImage = f"oai-ci/{image}:{tag}"
cmd = f'docker rmi {fullImage}'
if myCmd.run(cmd).returncode != 0:
status = False
removed_images += [fullImage]
msg = "Removed Images:\n" + '\n'.join(removed_images)
s = 'OK' if status else 'KO'
param = f"on node {lIpAddr}"
HTML.CreateHtmlTestRowQueue(param, s, [msg])
return status
def Create_Workspace(self,HTML):
svr = self.eNB_serverId[self.eNB_instance]
lIpAddr, lSourcePath = self.GetCredentials(svr)
success = CreateWorkspace(lIpAddr, lSourcePath, self.ranRepository, self.ranCommitID, self.ranTargetBranch, self.ranAllowMerge)
if success:
HTML.CreateHtmlTestRowQueue('N/A', 'OK', [f"created workspace {lSourcePath}"])
else:
HTML.CreateHtmlTestRowQueue('N/A', 'KO', ["cannot create workspace"])
return success
def DeployObject(self, HTML):
svr = self.eNB_serverId[self.eNB_instance]
num_attempts = self.num_attempts
lIpAddr, lSourcePath = self.GetCredentials(svr)
logging.debug(f'Deploying OAI Object on server: {lIpAddr}')
yaml = self.yamlPath[self.eNB_instance].strip('/')
# creating the log folder by default
local_dir = f"{os.getcwd()}/../cmake_targets/log/{yaml.split('/')[-1]}"
os.system(f'mkdir -p {local_dir}')
wd = f'{lSourcePath}/{yaml}'
wd_yaml = f'{wd}/docker-compose.y*ml'
yaml_dir = yaml.split('/')[-1]
with cls_cmd.getConnection(lIpAddr) as ssh:
services = GetServices(ssh, self.services[self.eNB_instance], wd_yaml)
if services == [] or services == ' ' or services == None:
msg = 'Cannot determine services to start'
logging.error(msg)
HTML.CreateHtmlTestRowQueue('N/A', 'KO', [msg])
return False
ExistEnvFilePrint(ssh, wd)
WriteEnvFile(ssh, services, wd, self.deploymentTag, self.flexricTag)
if num_attempts <= 0:
raise ValueError(f'Invalid value for num_attempts: {num_attempts}, must be greater than 0')
for attempt in range(num_attempts):
imagesInfo = []
healthInfo = []
logging.info(f'will start services {services}')
status = ssh.run(f'docker compose -f {wd_yaml} up -d -- {services}')
if status.returncode != 0:
msg = f'cannot deploy services {services}: {status.stdout}'
logging.error(msg)
HTML.CreateHtmlTestRowQueue('N/A', 'NOK', [msg])
return False
for svc in services.split():
health, msg = GetServiceHealth(ssh, svc, f'{wd_yaml}')
logging.info(msg)
imagesInfo.append(msg)
healthInfo.append(health)
deployed = all(healthInfo)
if deployed:
break
elif (attempt < num_attempts - 1):
logging.warning(f'Failed to deploy on attempt {attempt}, restart services {services}')
for svc in services.split():
CopyinServiceLog(ssh, lSourcePath, yaml_dir, svc, wd_yaml, f'{svc}-{HTML.testCase_id}-attempt{attempt}.log')
ssh.run(f'docker compose -f {wd_yaml} down -- {services}')
if deployed:
HTML.CreateHtmlTestRowQueue('N/A', 'OK', ['\n'.join(imagesInfo)])
else:
HTML.CreateHtmlTestRowQueue('N/A', 'KO', ['\n'.join(imagesInfo)])
return deployed
def UndeployObject(self, HTML, RAN):
svr = self.eNB_serverId[self.eNB_instance]
lIpAddr, lSourcePath = self.GetCredentials(svr)
logging.debug(f'\u001B[1m Undeploying OAI Object from server: {lIpAddr}\u001B[0m')
yaml = self.yamlPath[self.eNB_instance].strip('/')
wd = f'{lSourcePath}/{yaml}'
yaml_dir = yaml.split('/')[-1]
with cls_cmd.getConnection(lIpAddr) as ssh:
ExistEnvFilePrint(ssh, wd)
services = GetRunningServices(ssh, f"{wd}/docker-compose.y*ml")
copyin_res = None
if services is not None:
all_serv = " ".join([s for s, _ in services])
ssh.run(f'docker compose -f {wd}/docker-compose.y*ml stop -- {all_serv}')
copyin_res = all(CopyinServiceLog(ssh, lSourcePath, yaml_dir, s, f"{wd}/docker-compose.y*ml", f'{s}-{HTML.testCase_id}.log') for s, c in services)
else:
logging.warning('could not identify services to stop => no log file')
ssh.run(f'docker compose -f {wd}/docker-compose.y*ml down -v')
ssh.run(f'rm {wd}/.env')
if not copyin_res:
HTML.CreateHtmlTestRowQueue('N/A', 'KO', ['Could not copy logfile(s)'])
return False
else:
log_results = [CheckLogs(self, yaml_dir, s, HTML, RAN) for s, _ in services]
success = all(log_results)
if success:
logging.info('\u001B[1m Undeploying OAI Object Pass\u001B[0m')
else:
logging.error('\u001B[1m Undeploying OAI Object Failed\u001B[0m')
return success
# * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
# * contributor license agreements. See the NOTICE file distributed with
# * this work for additional information regarding copyright ownership.
# * The OpenAirInterface Software Alliance licenses this file to You under
# * the OAI Public License, Version 1.1 (the "License"); you may not use this file
# * except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.openairinterface.org/?page_id=698
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# *-------------------------------------------------------------------------------
# * For more information about the OpenAirInterface (OAI) Software Alliance:
# * contact@openairinterface.org
# */
#---------------------------------------------------------------------
#
# Required Python Version
# Python 3.x
#
#---------------------------------------------------------------------
#to use isfile
import logging
#time.sleep
import time
import re
import yaml
import cls_cmd
class Module_UE:
def __init__(self, module_name, node=None, filename="ci_infra.yaml"):
with open(filename, 'r') as f:
all_ues = yaml.load(f, Loader=yaml.FullLoader)
m = all_ues.get(module_name)
if m is None:
raise Exception(f'no such module name "{module_name}" in "{filename}"')
self.module_name = module_name
self.host = m['Host'] if m['Host'] != "%%current_host%%" else None
if node is None and self.host is None:
raise Exception(f'node not provided when needed')
elif node is not None and self.host is None:
self.host = node
self.cmd_dict = {
"attach": m.get('AttachScript'),
"detach": m.get('DetachScript'),
"initialize": m.get('InitScript'),
"terminate": m.get('TermScript'),
"getNetwork": m.get('NetworkScript'),
"check": m.get('CheckStatusScript'),
"dataEnable": m.get('DataEnableScript'),
"dataDisable": m.get('DataDisableScript'),
}
self.interface = m.get('IF')
self.MTU = m.get('MTU')
self.trace = m.get('trace') == True
self.logStore = m.get('LogStore')
self.cmd_prefix = m.get('CmdPrefix')
self.runIperf3Server = m.get('RunIperf3Server', True)
self.namespace = m.get('Namespace')
self.cnPath = m.get('CNPath')
logging.info(f'initialized {self.module_name}@{self.host} from {filename}')
def __str__(self):
return f"{self.module_name}@{self.host} [IP: {self.getIP()}]"
def __repr__(self):
return self.__str__()
def _command(self, cmd, silent = False):
if cmd is None:
raise Exception("no command provided")
if self.host == "" or self.host == "localhost":
c = cls_cmd.LocalCmd()
else:
c = cls_cmd.RemoteCmd(self.host)
response = c.run(cmd, silent=silent)
c.close()
return response
#-----------------$
#PUBLIC Methods$
#-----------------$
def initialize(self):
if self.trace:
raise Exception("UE tracing not implemented yet")
self._enableTrace()
# we first terminate to make sure the UE has been stopped
if self.cmd_dict["detach"]:
self._command(self.cmd_dict["detach"], silent=True)
self._command(self.cmd_dict["terminate"], silent=True)
ret = self._command(self.cmd_dict["initialize"])
logging.info(f'For command: {ret.args} | return output: {ret.stdout} | Code: {ret.returncode}')
# Here each UE returns differently for the successful initialization, requires check based on UE
return ret.returncode == 0
def terminate(self):
self._command(self.cmd_dict["terminate"])
if self.trace:
raise Exception("UE tracing not implemented yet")
self._disableTrace()
return self._logCollect()
return None
def attach(self, attach_tries = 4, attach_timeout = 60):
ip = None
while attach_tries > 0:
self._command(self.cmd_dict["attach"])
timeout = attach_timeout
logging.debug("Waiting for IP address to be assigned")
while timeout > 0 and not ip:
time.sleep(5)
timeout -= 5
ip = self.getIP()
if ip:
break
logging.warning(f"UE did not receive IP address after {attach_timeout} s, detaching")
attach_tries -= 1
self._command(self.cmd_dict["detach"])
time.sleep(5)
if ip:
logging.debug(f'\u001B[1mUE IP Address for UE {self.module_name} is {ip}\u001B[0m')
else:
logging.debug(f'\u001B[1;37;41mUE IP Address for UE {self.module_name} Not Found!\u001B[0m')
return ip
def detach(self):
self._command(self.cmd_dict["detach"])
def check(self):
cmd = self.cmd_dict["check"]
if cmd:
return self._command(cmd).stdout
else:
logging.warning(f"requested status check of UE {self.getName()}, but operation is not supported")
return f"UE {self.getName()} does not support status checking"
def dataEnable(self):
cmd = self.cmd_dict["dataEnable"]
if cmd:
self._command(cmd)
return True
else:
message = f"requested enabling data of UE {self.getName()}, but operation is not supported"
logging.error(message)
return False
def dataDisable(self):
cmd = self.cmd_dict["dataDisable"]
if cmd:
self._command(cmd)
return True
else:
message = f"requested disabling data of UE {self.getName()}, but operation is not supported"
logging.error(message)
return False
def getIP(self):
output = self._command(self.cmd_dict["getNetwork"], silent=True)
result = re.search('inet (?P<ip>[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)', output.stdout)
if result and result.group('ip'):
ip = result.group('ip')
return ip
return None
def checkMTU(self):
output = self._command(self.cmd_dict["getNetwork"], silent=True)
result = re.search('mtu (?P<mtu>[0-9]+)', output.stdout)
if result and result.group('mtu') and int(result.group('mtu')) == self.MTU:
logging.debug(f'\u001B[1mUE Module {self.module_name} NIC MTU is {self.MTU} as expected\u001B[0m')
return True
else:
logging.debug(f'\u001B[1;37;41m UE module {self.module_name} has incorrect Module NIC MTU or MTU not found! Expected: {self.MTU} \u001B[0m')
return False
def getName(self):
return self.module_name
def getIFName(self):
return self.interface
def getHost(self):
return self.host
def getNamespace(self):
return self.namespace
def getCNPath(self):
return self.cnPath
def getRunIperf3Server(self):
return self.runIperf3Server
def getCmdPrefix(self):
return self.cmd_prefix if self.cmd_prefix else ""
def _enableTrace(self):
raise Exception("not implemented")
def _disableTrace(self):
raise Exception("not implemented")
def _logCollect(self):
raise Exception("not implemented")