diff --git a/ci-scripts/Jenkinsfile-GitLab-Container b/ci-scripts/Jenkinsfile-GitLab-Container
index b41361758095cd4c11c090383c56f707d86dcf59..da85a8a282f0fd850392223d63b6e65dbd7f5c2e 100644
--- a/ci-scripts/Jenkinsfile-GitLab-Container
+++ b/ci-scripts/Jenkinsfile-GitLab-Container
@@ -218,6 +218,26 @@ pipeline {
             }
           }
         }
+        stage ("NSA B200 Sanity Check") {
+          when { expression {doMandatoryTests} }
+          steps {
+            script {
+              triggerSlaveJob ('RAN-NSA-B200-Module-LTEBOX-Container', 'Test-NSA-B200')
+            }
+          }
+          post {
+            always {
+              script {
+                finalizeSlaveJob('RAN-NSA-B200-Module-LTEBOX-Container')
+              }
+            }
+            failure {
+              script {
+                currentBuild.result = 'FAILURE'
+              }
+            }
+          }
+        }
       }
     }
     stage ("Images Push to Registries") {
@@ -227,8 +247,14 @@ pipeline {
           triggerSlaveJob ('RAN-DockerHub-Push', 'Push-to-Docker-Hub')
         }
         post {
+          always {
+            script {
+              echo "Push to Docker-Hub OK"
+            }
+          }
           failure {
             script {
+              echo "Push to Docker-Hub KO"
               currentBuild.result = 'FAILURE'
             }
           }
@@ -281,6 +307,11 @@ pipeline {
 // ----  Slave Job functions
 
 def triggerSlaveJob (jobName, gitlabStatusName) {
+  if ("MERGE".equals(env.gitlabActionType)) {
+    MR_NUMBER = env.gitlabMergeRequestIid
+  } else {
+    MR_NUMBER = 'develop'
+  }
   // Workaround for the "cancelled" GitLab pipeline notification
   // The slave job is triggered with the propagate false so the following commands are executed
   // Its status is now PASS/SUCCESS from a stage pipeline point of view
@@ -290,6 +321,7 @@ def triggerSlaveJob (jobName, gitlabStatusName) {
       string(name: 'eNB_Repository', value: String.valueOf(GIT_URL)),
       string(name: 'eNB_Branch', value: String.valueOf(env.gitlabSourceBranch)),
       string(name: 'eNB_CommitID', value: String.valueOf(env.gitlabMergeRequestLastCommit)),
+      string(name: 'eNB_MR', value: String.valueOf(MR_NUMBER)),
       booleanParam(name: 'eNB_mergeRequest', value: "MERGE".equals(env.gitlabActionType)),
       string(name: 'eNB_TargetBranch', value: String.valueOf(env.gitlabTargetBranch))
     ], propagate: false
@@ -306,6 +338,11 @@ def triggerSlaveJob (jobName, gitlabStatusName) {
 }
 
 def triggerSlaveJobNoGitLab (jobName) {
+  if ("MERGE".equals(env.gitlabActionType)) {
+    MR_NUMBER = env.gitlabMergeRequestIid
+  } else {
+    MR_NUMBER = 'develop'
+  }
   // Workaround for the "cancelled" GitLab pipeline notification
   // The slave job is triggered with the propagate false so the following commands are executed
   // Its status is now PASS/SUCCESS from a stage pipeline point of view
@@ -315,6 +352,7 @@ def triggerSlaveJobNoGitLab (jobName) {
       string(name: 'eNB_Repository', value: String.valueOf(GIT_URL)),
       string(name: 'eNB_Branch', value: String.valueOf(env.gitlabSourceBranch)),
       string(name: 'eNB_CommitID', value: String.valueOf(env.gitlabMergeRequestLastCommit)),
+      string(name: 'eNB_MR', value: String.valueOf(MR_NUMBER)),
       booleanParam(name: 'eNB_mergeRequest', value: "MERGE".equals(env.gitlabActionType)),
       string(name: 'eNB_TargetBranch', value: String.valueOf(env.gitlabTargetBranch))
     ], propagate: false
diff --git a/ci-scripts/Jenkinsfile-tmp-multi-enb-benetel b/ci-scripts/Jenkinsfile-tmp-multi-enb-benetel
index 8ac2e6adffa3a5d5d3f434e8f936b6de33da667f..195456da11c6971792dd30ed499f5fb072837214 100644
--- a/ci-scripts/Jenkinsfile-tmp-multi-enb-benetel
+++ b/ci-scripts/Jenkinsfile-tmp-multi-enb-benetel
@@ -134,8 +134,8 @@ pipeline {
                         eNB_CommitID = params.eNB_CommitID
                     }
                     echo "eNB_CommitID          :   ${eNB_CommitID}"
-                    if (params.eNB_AllowMergeRequestProcess!= null) {
-                        eNB_AllowMergeRequestProcess = params.eNB_AllowMergeRequestProcess
+                    if (params.eNB_mergeRequest!= null) {
+                        eNB_AllowMergeRequestProcess = params.eNB_mergeRequest
                         if (eNB_AllowMergeRequestProcess) {
                             if (params.eNB_TargetBranch != null) {
                                 eNB_TargetBranch = params.eNB_TargetBranch
diff --git a/ci-scripts/Jenkinsfile-tmp-multi-enb-benetel-long b/ci-scripts/Jenkinsfile-tmp-multi-enb-benetel-long
index fb7f3738d2a763bdf2e204ab53f3c0dbdc8da125..3a002b374811e41bc7ad4b834b4fe24cb299cb0c 100644
--- a/ci-scripts/Jenkinsfile-tmp-multi-enb-benetel-long
+++ b/ci-scripts/Jenkinsfile-tmp-multi-enb-benetel-long
@@ -145,8 +145,8 @@ pipeline {
                     //    eNB_CommitID = params.eNB_CommitID
                     //}
                     echo "eNB_CommitID          :   ${eNB_CommitID}"
-                    if (params.eNB_AllowMergeRequestProcess!= null) {
-                        eNB_AllowMergeRequestProcess = params.eNB_AllowMergeRequestProcess
+                    if (params.eNB_mergeRequest!= null) {
+                        eNB_AllowMergeRequestProcess = params.eNB_mergeRequest
                         if (eNB_AllowMergeRequestProcess) {
                             if (params.eNB_TargetBranch != null) {
                                 eNB_TargetBranch = params.eNB_TargetBranch
diff --git a/ci-scripts/Jenkinsfile-tmp-multi-enb-nsa b/ci-scripts/Jenkinsfile-tmp-multi-enb-nsa
index 3243d1645d8d929d4d79cd54de447552d89e6a14..dd9fbbd03c81e54d9dd8306adb5d9ea8461ab88e 100644
--- a/ci-scripts/Jenkinsfile-tmp-multi-enb-nsa
+++ b/ci-scripts/Jenkinsfile-tmp-multi-enb-nsa
@@ -51,270 +51,310 @@ def StatusForDb = ""
 
 
 pipeline {
-    agent {label pythonExecutor}
-    options {
-        disableConcurrentBuilds()
-        ansiColor('xterm')
-        lock(extra: [[resource: ciSmartPhonesResource2]], resource: ciSmartPhonesResource1)
+  agent {label pythonExecutor}
+  options {
+    disableConcurrentBuilds()
+    ansiColor('xterm')
+    lock(extra: [[resource: ciSmartPhonesResource2]], resource: ciSmartPhonesResource1)
+  }
+  stages {
+    stage("Build Init") {
+      steps {
+        // update the build name and description
+        buildName "${params.eNB_MR}"
+        buildDescription "Branch : ${params.eNB_Branch}"
+      }
     }
-    stages {
-        stage("Build Init") {
-            steps {
-                // update the build name and description
-                buildName "${params.eNB_MR}"
-                buildDescription "Branch : ${params.eNB_Branch}"
-            }
-        }
-        stage ("Verify Parameters") {
-            steps {
-                script {
-                    echo '\u2705 \u001B[32mVerify Parameters\u001B[0m'
-                    def allParametersPresent = true
+    stage ("Verify Parameters") {
+      steps {
+        script {
+          echo '\u2705 \u001B[32mVerify Parameters\u001B[0m'
+          def allParametersPresent = true
 
-                    // It is already to late to check it
-                    if (params.pythonExecutor != null) {
-                        echo "eNB CI executor node  :   ${pythonExecutor}"
-                    }
-                    // If not present picking a default Stage Name
-                    if (params.pipelineTestStageName == null) {
-                        // picking default
-                        testStageName = 'Template Test Stage'
-                    }
+          // It is already to late to check it
+          if (params.pythonExecutor != null) {
+            echo "eNB CI executor node  :   ${pythonExecutor}"
+          }
+          // If not present picking a default Stage Name
+          if (params.pipelineTestStageName == null) {
+            // picking default
+            testStageName = 'Template Test Stage'
+          }
 
-                    if (params.SmartPhonesResource1 == null) {
-                        allParametersPresent = false
-                    }
-                    if (params.SmartPhonesResource2 == null) {
-                        allParametersPresent = false
-                    }
-                    // 1st eNB parameters
-                    if (params.eNB_IPAddress == null) {
-                        allParametersPresent = false
-                    }
-                    if (params.eNB_SourceCodePath == null) {
-                        allParametersPresent = false
-                    }
-                    if (params.eNB_Credentials == null) {
-                        allParametersPresent = false
-                    }
-                    // 2nd eNB parameters
-                    if (params.eNB1_IPAddress == null) {
-                        allParametersPresent = false
-                    }
-                    if (params.eNB1_SourceCodePath == null) {
-                        allParametersPresent = false
-                    }
-                    if (params.eNB1_Credentials == null) {
-                        allParametersPresent = false
-                    }
-                    // 3rd eNB parameters
-                    if (params.eNB2_IPAddress == null) {
-                        allParametersPresent = false
-                    }
-                    if (params.eNB2_SourceCodePath == null) {
-                        allParametersPresent = false
-                    }
-                    if (params.eNB2_Credentials == null) {
-                        allParametersPresent = false
-                    }
-                    // the following 4 parameters should be pushed by the master trigger
-                    // if not present, take the job GIT variables (used for developing)
-                    if (params.eNB_Repository == null) {
-                        eNB_Repository = env.GIT_URL
-                    } else {
-                        eNB_Repository = params.eNB_Repository
-                    }
-                    echo "eNB_Repository        :   ${eNB_Repository}"
-                    if (params.eNB_Branch == null) {
-                        eNB_Branch = env.GIT_BRANCH
-                    } else {
-                        eNB_Branch = params.eNB_Branch
-                    }
-                    echo "eNB_Branch            :   ${eNB_Branch}"
-                    if (params.eNB_CommitID == null) {
-                        eNB_CommitID = env.GIT_COMMIT
-                    } else {
-                        eNB_CommitID = params.eNB_CommitID
-                    }
-                    echo "eNB_CommitID          :   ${eNB_CommitID}"
-                    if (params.eNB_AllowMergeRequestProcess!= null) {
-                        eNB_AllowMergeRequestProcess = params.eNB_AllowMergeRequestProcess
-                        if (eNB_AllowMergeRequestProcess) {
-                            if (params.eNB_TargetBranch != null) {
-                                eNB_TargetBranch = params.eNB_TargetBranch
-                            } else {
-                                eNB_TargetBranch = 'develop'
-                            }
-                            echo "eNB_TargetBranch      :   ${eNB_TargetBranch}"
-                        }
-                    }
+          if (params.SmartPhonesResource1 == null) {
+            allParametersPresent = false
+          }
+          if (params.SmartPhonesResource2 == null) {
+            allParametersPresent = false
+          }
+          // 1st eNB parameters
+          if (params.eNB_IPAddress == null) {
+            allParametersPresent = false
+          }
+          if (params.eNB_SourceCodePath == null) {
+            allParametersPresent = false
+          }
+          if (params.eNB_Credentials == null) {
+            allParametersPresent = false
+          }
+          // 2nd eNB parameters
+          if (params.eNB1_IPAddress == null) {
+            allParametersPresent = false
+          }
+          if (params.eNB1_SourceCodePath == null) {
+            allParametersPresent = false
+          }
+          if (params.eNB1_Credentials == null) {
+            allParametersPresent = false
+          }
+          // 3rd eNB parameters
+          if (params.eNB2_IPAddress == null) {
+            allParametersPresent = false
+          }
+          if (params.eNB2_SourceCodePath == null) {
+            allParametersPresent = false
+          }
+          if (params.eNB2_Credentials == null) {
+            allParametersPresent = false
+          }
+          // the following 4 parameters should be pushed by the master trigger
+          // if not present, take the job GIT variables (used for developing)
+          if (params.eNB_Repository == null) {
+            eNB_Repository = env.GIT_URL
+          } else {
+            eNB_Repository = params.eNB_Repository
+          }
+          echo "eNB_Repository        :   ${eNB_Repository}"
+          if (params.eNB_Branch == null) {
+            eNB_Branch = env.GIT_BRANCH
+          } else {
+            eNB_Branch = params.eNB_Branch
+          }
+          echo "eNB_Branch            :   ${eNB_Branch}"
+          if (params.eNB_CommitID == null) {
+            eNB_CommitID = env.GIT_COMMIT
+          } else {
+            eNB_CommitID = params.eNB_CommitID
+          }
+          echo "eNB_CommitID          :   ${eNB_CommitID}"
+          if (params.eNB_mergeRequest!= null) {
+            eNB_AllowMergeRequestProcess = params.eNB_mergeRequest
+            if (eNB_AllowMergeRequestProcess) {
+              if (params.eNB_TargetBranch != null) {
+                eNB_TargetBranch = params.eNB_TargetBranch
+              } else {
+                eNB_TargetBranch = 'develop'
+              }
+              echo "eNB_TargetBranch      :   ${eNB_TargetBranch}"
+            }
+          }
 
-                    if (params.EPC_IPAddress == null) {
-                        allParametersPresent = false
-                    }
-                    if (params.EPC_Type == null) {
-                        allParametersPresent = false
-                    }
-                    if (params.EPC_SourceCodePath == null) {
-                        allParametersPresent = false
-                    }
-                    if (params.EPC_Credentials == null) {
-                        allParametersPresent = false
-                    }
+          if (params.EPC_IPAddress == null) {
+            allParametersPresent = false
+          }
+          if (params.EPC_Type == null) {
+            allParametersPresent = false
+          }
+          if (params.EPC_SourceCodePath == null) {
+            allParametersPresent = false
+          }
+          if (params.EPC_Credentials == null) {
+            allParametersPresent = false
+          }
 
-                    if (params.ADB_IPAddress == null) {
-                        allParametersPresent = false
-                    }
-                    if (params.ADB_Credentials == null) {
-                        allParametersPresent = false
-                    }
+          if (params.ADB_IPAddress == null) {
+            allParametersPresent = false
+          }
+          if (params.ADB_Credentials == null) {
+            allParametersPresent = false
+          }
+          if (params.DataBaseHost == "none") {
+            DataBaseHost = pythonExecutor
+          }
 
-                    if (allParametersPresent) {
-                        echo "All parameters are present"
-                        if (eNB_AllowMergeRequestProcess) {
-                            sh "git fetch"
-                            sh "./ci-scripts/doGitLabMerge.sh --src-branch ${eNB_Branch} --src-commit ${eNB_CommitID} --target-branch ${eNB_TargetBranch} --target-commit latest"
-                        } else {
-                            sh "git fetch"
-                            sh "git checkout -f ${eNB_CommitID}"
-                        }
-                    } else {
-                        echo "Some parameters are missing"
-                        sh "./ci-scripts/fail.sh"
-                    }
-                }
+          if (allParametersPresent) {
+            echo "All parameters are present"
+            if (eNB_AllowMergeRequestProcess) {
+              sh "git fetch"
+              sh "./ci-scripts/doGitLabMerge.sh --src-branch ${eNB_Branch} --src-commit ${eNB_CommitID} --target-branch ${eNB_TargetBranch} --target-commit latest"
+            } else {
+              sh "git fetch"
+              sh "git checkout -f ${eNB_CommitID}"
             }
+          } else {
+            echo "Some parameters are missing"
+            sh "./ci-scripts/fail.sh"
+          }
         }
-        stage ("Build and Test") {
-            steps {
-                script {
-                    dir ('ci-scripts') {
-                        echo "\u2705 \u001B[32m${testStageName}\u001B[0m"
-                        // If not present picking a default XML file
-                        if (params.pythonTestXmlFile == null) {
-                            // picking default
-                            testXMLFile = 'xml_files/enb_usrpB210_band7_50PRB.xml'
-                            echo "Test XML file(default):   ${testXMLFile}"
-                            mainPythonAllXmlFiles += "--XMLTestFile=" + testXMLFile + " "
-                        } else {
-                            String[] myXmlTestSuite = testXMLFile.split("\\r?\\n")
-                            for (xmlFile in myXmlTestSuite) {
-                                if (fileExists(xmlFile)) {
-                                    mainPythonAllXmlFiles += "--XMLTestFile=" + xmlFile + " "
-                                    echo "Test XML file         :   ${xmlFile}"
-                                }
-                            }
-                        }
-                        withCredentials([
-                            [$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB_Credentials}", usernameVariable: 'eNB_Username', passwordVariable: 'eNB_Password'],
-                            [$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB1_Credentials}", usernameVariable: 'eNB1_Username', passwordVariable: 'eNB1_Password'],
-                            [$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB2_Credentials}", usernameVariable: 'eNB2_Username', passwordVariable: 'eNB2_Password'],
-                            [$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.EPC_Credentials}", usernameVariable: 'EPC_Username', passwordVariable: 'EPC_Password'],
-                            [$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.ADB_Credentials}", usernameVariable: 'ADB_Username', passwordVariable: 'ADB_Password']
-                        ]) {
-                            sh "python3 main.py --mode=InitiateHtml --ranRepository=${eNB_Repository} --ranBranch=${eNB_Branch} --ranCommitID=${eNB_CommitID} --ranAllowMerge=${eNB_AllowMergeRequestProcess} --ranTargetBranch=${eNB_TargetBranch} --ADBIPAddress=${params.ADB_IPAddress} --ADBUserName=${ADB_Username} --ADBPassword=${ADB_Password} ${mainPythonAllXmlFiles}"
-                            String[] myXmlTestSuite = testXMLFile.split("\\r?\\n")
-                            for (xmlFile in myXmlTestSuite) {
-                                if (fileExists(xmlFile)) {
-                                    try {
-                                        sh "python3 main.py --mode=TesteNB --ranRepository=${eNB_Repository} --ranBranch=${eNB_Branch} --ranCommitID=${eNB_CommitID} --ranAllowMerge=${eNB_AllowMergeRequestProcess} --ranTargetBranch=${eNB_TargetBranch} --eNBIPAddress=${params.eNB_IPAddress} --eNBUserName=${eNB_Username} --eNBPassword=${eNB_Password} --eNBSourceCodePath=${params.eNB_SourceCodePath} --eNB1IPAddress=${params.eNB1_IPAddress} --eNB1UserName=${eNB1_Username} --eNB1Password=${eNB1_Password} --eNB1SourceCodePath=${params.eNB1_SourceCodePath} --eNB2IPAddress=${params.eNB2_IPAddress} --eNB2UserName=${eNB2_Username} --eNB2Password=${eNB2_Password} --eNB2SourceCodePath=${params.eNB2_SourceCodePath} --EPCIPAddress=${params.EPC_IPAddress} --EPCType=${params.EPC_Type} --EPCUserName=${EPC_Username} --EPCPassword=${EPC_Password} --EPCSourceCodePath=${params.EPC_SourceCodePath} --ADBIPAddress=${params.ADB_IPAddress} --ADBUserName=${ADB_Username} --ADBPassword=${ADB_Password} --XMLTestFile=${xmlFile}"
-                                    } catch (Exception e) {
-                                        currentBuild.result = 'FAILURE'
-                                        buildStageStatus = false
-                                    }
-                                }
-                            }
-                            sh "python3 main.py --mode=FinalizeHtml --finalStatus=${buildStageStatus} --eNBIPAddress=${params.eNB_IPAddress} --eNBUserName=${eNB_Username} --eNBPassword=${eNB_Password}"
-                        }
-                    }
+      }
+    }
+    stage ("Build and Test") {
+      steps {
+        script {
+          dir ('ci-scripts') {
+            echo "\u2705 \u001B[32m${testStageName}\u001B[0m"
+            // If not present picking a default XML file
+            if (params.pythonTestXmlFile == null) {
+              // picking default
+              testXMLFile = 'xml_files/enb_usrpB210_band7_50PRB.xml'
+              echo "Test XML file(default):   ${testXMLFile}"
+              mainPythonAllXmlFiles += "--XMLTestFile=" + testXMLFile + " "
+            } else {
+              String[] myXmlTestSuite = testXMLFile.split("\\r?\\n")
+              for (xmlFile in myXmlTestSuite) {
+                if (fileExists(xmlFile)) {
+                  mainPythonAllXmlFiles += "--XMLTestFile=" + xmlFile + " "
+                  echo "Test XML file     :   ${xmlFile}"
+                }
+              }
+            }
+            withCredentials([
+              [$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB_Credentials}", usernameVariable: 'eNB_Username', passwordVariable: 'eNB_Password'],
+              [$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB1_Credentials}", usernameVariable: 'eNB1_Username', passwordVariable: 'eNB1_Password'],
+              [$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB2_Credentials}", usernameVariable: 'eNB2_Username', passwordVariable: 'eNB2_Password'],
+              [$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.EPC_Credentials}", usernameVariable: 'EPC_Username', passwordVariable: 'EPC_Password'],
+              [$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.ADB_Credentials}", usernameVariable: 'ADB_Username', passwordVariable: 'ADB_Password']
+            ]) {
+              sh "python3 main.py --mode=InitiateHtml --ranRepository=${eNB_Repository} --ranBranch=${eNB_Branch} --ranCommitID=${eNB_CommitID} --ranAllowMerge=${eNB_AllowMergeRequestProcess} --ranTargetBranch=${eNB_TargetBranch} --ADBIPAddress=${params.ADB_IPAddress} --ADBUserName=${ADB_Username} --ADBPassword=${ADB_Password} ${mainPythonAllXmlFiles}"
+              String[] myXmlTestSuite = testXMLFile.split("\\r?\\n")
+              for (xmlFile in myXmlTestSuite) {
+                if (fileExists(xmlFile)) {
+                  try {
+                    sh "python3 main.py --mode=TesteNB --ranRepository=${eNB_Repository} --ranBranch=${eNB_Branch} --ranCommitID=${eNB_CommitID} --ranAllowMerge=${eNB_AllowMergeRequestProcess} --ranTargetBranch=${eNB_TargetBranch} --eNBIPAddress=${params.eNB_IPAddress} --eNBUserName=${eNB_Username} --eNBPassword=${eNB_Password} --eNBSourceCodePath=${params.eNB_SourceCodePath} --eNB1IPAddress=${params.eNB1_IPAddress} --eNB1UserName=${eNB1_Username} --eNB1Password=${eNB1_Password} --eNB1SourceCodePath=${params.eNB1_SourceCodePath} --eNB2IPAddress=${params.eNB2_IPAddress} --eNB2UserName=${eNB2_Username} --eNB2Password=${eNB2_Password} --eNB2SourceCodePath=${params.eNB2_SourceCodePath} --EPCIPAddress=${params.EPC_IPAddress} --EPCType=${params.EPC_Type} --EPCUserName=${EPC_Username} --EPCPassword=${EPC_Password} --EPCSourceCodePath=${params.EPC_SourceCodePath} --ADBIPAddress=${params.ADB_IPAddress} --ADBUserName=${ADB_Username} --ADBPassword=${ADB_Password} --XMLTestFile=${xmlFile}"
+                  } catch (Exception e) {
+                    currentBuild.result = 'FAILURE'
+                    buildStageStatus = false
+                  }
                 }
+              }
+              sh "python3 main.py --mode=FinalizeHtml --finalStatus=${buildStageStatus} --eNBIPAddress=${params.eNB_IPAddress} --eNBUserName=${eNB_Username} --eNBPassword=${eNB_Password}"
             }
+          }
         }
-        stage('Log Collection') {
-            parallel {
-                stage('Log Collection (eNB - Build)') {
-                    steps {
-                        withCredentials([
-                             [$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB_Credentials}", usernameVariable: 'eNB_Username', passwordVariable: 'eNB_Password']
-                        ]) {
-                            echo '\u2705 \u001B[32mLog Collection (eNB - Build)\u001B[0m'
-                            sh "python3 ci-scripts/main.py --mode=LogCollectBuild --eNBIPAddress=${params.eNB_IPAddress} --eNBUserName=${eNB_Username} --eNBPassword=${eNB_Password} --eNBSourceCodePath=${params.eNB_SourceCodePath}"
+      }
+    }
+    stage('Log Collection') {
+      parallel {
+        stage('Log Collection (eNB - Build)') {
+          steps {
+            withCredentials([
+               [$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB_Credentials}", usernameVariable: 'eNB_Username', passwordVariable: 'eNB_Password']
+            ]) {
+              echo '\u2705 \u001B[32mLog Collection (eNB - Build)\u001B[0m'
+              sh "python3 ci-scripts/main.py --mode=LogCollectBuild --eNBIPAddress=${params.eNB_IPAddress} --eNBUserName=${eNB_Username} --eNBPassword=${eNB_Password} --eNBSourceCodePath=${params.eNB_SourceCodePath}"
 
-                            echo '\u2705 \u001B[32mLog Transfer (eNB - Build)\u001B[0m'
-                            sh "sshpass -p \'${eNB_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${eNB_Username}@${params.eNB_IPAddress}:${eNB_SourceCodePath}/cmake_targets/build.log.zip ./build.log.${env.BUILD_ID}.zip || true"
-                        }
-                        script {
-                            if(fileExists("build.log.${env.BUILD_ID}.zip")) {
-                                archiveArtifacts "build.log.${env.BUILD_ID}.zip"
-                            }
-                        }
-                    }
-                }
-                stage('Log Collection (eNB - Run)') {
-                    steps {
-                        withCredentials([
-                             [$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB_Credentials}", usernameVariable: 'eNB_Username', passwordVariable: 'eNB_Password']
-                        ]) {
-                            echo '\u2705 \u001B[32mLog Collection (eNB - Run)\u001B[0m'
-                            sh "python3 ci-scripts/main.py --mode=LogCollecteNB --eNBIPAddress=${params.eNB_IPAddress} --eNBUserName=${eNB_Username} --eNBPassword=${eNB_Password} --eNBSourceCodePath=${params.eNB_SourceCodePath}"
+              echo '\u2705 \u001B[32mLog Transfer (eNB - Build)\u001B[0m'
+              sh "sshpass -p \'${eNB_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${eNB_Username}@${params.eNB_IPAddress}:${eNB_SourceCodePath}/cmake_targets/build.log.zip ./build.log.${env.BUILD_ID}.zip || true"
+            }
+            script {
+              if(fileExists("build.log.${env.BUILD_ID}.zip")) {
+                archiveArtifacts "build.log.${env.BUILD_ID}.zip"
+              }
+            }
+          }
+        }
+        stage('Log Collection (eNB - Run)') {
+          steps {
+            withCredentials([
+               [$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.eNB_Credentials}", usernameVariable: 'eNB_Username', passwordVariable: 'eNB_Password']
+            ]) {
+              echo '\u2705 \u001B[32mLog Collection (eNB - Run)\u001B[0m'
+              sh "python3 ci-scripts/main.py --mode=LogCollecteNB --eNBIPAddress=${params.eNB_IPAddress} --eNBUserName=${eNB_Username} --eNBPassword=${eNB_Password} --eNBSourceCodePath=${params.eNB_SourceCodePath}"
 
-                            echo '\u2705 \u001B[32mLog Transfer (eNB - Run)\u001B[0m'
-                            sh "sshpass -p \'${eNB_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${eNB_Username}@${params.eNB_IPAddress}:${eNB_SourceCodePath}/cmake_targets/enb.log.zip ./enb.log.${env.BUILD_ID}.zip || true"
-                        }
-                        script {
-                            if(fileExists("enb.log.${env.BUILD_ID}.zip")) {
-                                archiveArtifacts "enb.log.${env.BUILD_ID}.zip"
-                            }
-                        }
-                    }
+              echo '\u2705 \u001B[32mLog Transfer (eNB - Run)\u001B[0m'
+              sh "sshpass -p \'${eNB_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${eNB_Username}@${params.eNB_IPAddress}:${eNB_SourceCodePath}/cmake_targets/enb.log.zip ./enb.log.${env.BUILD_ID}.zip || true"
+            }
+            script {
+              if(fileExists("enb.log.${env.BUILD_ID}.zip")) {
+                archiveArtifacts "enb.log.${env.BUILD_ID}.zip"
+              }
+            }
+          }
+        }
+        stage('Log Collection (CN)') {
+          // Bypassing this stage if EPC server is not defined
+          when {
+            expression { params.EPC_IPAddress != "none" }
+          }
+          steps {
+            script {
+              withCredentials([
+                   [$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.EPC_Credentials}", usernameVariable: 'EPC_Username', passwordVariable: 'EPC_Password']
+              ]) {
+                echo '\u2705 \u001B[32mLog Collection (HSS)\u001B[0m'
+                sh "python3 ci-scripts/main.py --mode=LogCollectHSS --EPCIPAddress=${params.EPC_IPAddress} --EPCUserName=${EPC_Username} --EPCPassword=${EPC_Password} --EPCSourceCodePath=${params.EPC_SourceCodePath} --EPCType=${params.EPC_Type}"
+                if (params.EPC_Type != 'OAICN5G') {
+                  sh "sshpass -p \'${EPC_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${EPC_Username}@${params.EPC_IPAddress}:${EPC_SourceCodePath}/scripts/hss.log.zip ./hss.log.${env.BUILD_ID}.zip || true"
                 }
-                stage('Log Collection (CN)') {
-                    steps {
-                        withCredentials([
-                             [$class: 'UsernamePasswordMultiBinding', credentialsId: "${params.EPC_Credentials}", usernameVariable: 'EPC_Username', passwordVariable: 'EPC_Password']
-                        ]) {
-                            echo '\u2705 \u001B[32mLog Transfer (CN)\u001B[0m'
-                            sh "sshpass -p \'${EPC_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${EPC_Username}@${params.EPC_IPAddress}:${EPC_SourceCodePath}/logs/oai-cn5g.log.zip ./oai-cn5g.log.${env.BUILD_ID}.zip || true"
-                        }
-                        script {
-                            if(fileExists("oai-cn5g.log.${env.BUILD_ID}.zip")) {
-                                archiveArtifacts "oai-cn5g.log.${env.BUILD_ID}.zip"
-                            }
-                            if(fileExists("ci-scripts/test_results.html")) {
-                                sh "mv ci-scripts/test_results.html test_results-${JOB_NAME}.html"
-                                sh "sed -i -e 's#TEMPLATE_JOB_NAME#${JOB_NAME}#' -e 's@build #TEMPLATE_BUILD_ID@build #${BUILD_ID}@' -e 's#Build-ID: TEMPLATE_BUILD_ID#Build-ID: <a href=\"${BUILD_URL}\">${BUILD_ID}</a>#' -e 's#TEMPLATE_STAGE_NAME#${testStageName}#' test_results-${JOB_NAME}.html"
-                                archiveArtifacts "test_results-${JOB_NAME}.html"
-                            }
-                        }
-                    }
+                echo '\u2705 \u001B[32mLog Collection (MME or AMF)\u001B[0m'
+                sh "python3 ci-scripts/main.py --mode=LogCollectMME --EPCIPAddress=${params.EPC_IPAddress} --EPCUserName=${EPC_Username} --EPCPassword=${EPC_Password} --EPCSourceCodePath=${params.EPC_SourceCodePath} --EPCType=${params.EPC_Type}"
+                if (params.EPC_Type == 'OAICN5G') {
+                  sh "sshpass -p \'${EPC_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${EPC_Username}@${params.EPC_IPAddress}:${EPC_SourceCodePath}/scripts/mme.log.zip ./amf.log.${env.BUILD_ID}.zip || true"
+                } else {
+                  sh "sshpass -p \'${EPC_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${EPC_Username}@${params.EPC_IPAddress}:${EPC_SourceCodePath}/scripts/mme.log.zip ./mme.log.${env.BUILD_ID}.zip || true"
                 }
-                stage ("SQL Collect"){
-                agent {label DataBaseHost}
-                    steps {
-                        script {
-                            if (currentBuild.result=='FAILURE') {StatusForDb = 'FAIL'} else {StatusForDb = 'PASS'}
-                            sh "python3 /home/oaicicd/mysql/sql_connect.py ${JOB_NAME} ${params.eNB_MR} ${params.eNB_Branch} ${env.BUILD_ID} ${env.BUILD_URL} ${StatusForDb} ''"
-                        }
-                    }
+                echo '\u2705 \u001B[32mLog Collection (SPGW or SMF/UPF)\u001B[0m'
+                sh "python3 ci-scripts/main.py --mode=LogCollectSPGW --EPCIPAddress=${params.EPC_IPAddress} --EPCUserName=${EPC_Username} --EPCPassword=${EPC_Password} --EPCSourceCodePath=${params.EPC_SourceCodePath} --EPCType=${params.EPC_Type}"
+                if (params.EPC_Type == 'OAICN5G') {
+                  sh "sshpass -p \'${EPC_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${EPC_Username}@${params.EPC_IPAddress}:${EPC_SourceCodePath}/scripts/spgw.log.zip ./smf-upf.log.${env.BUILD_ID}.zip || true"
+                } else {
+                  sh "sshpass -p \'${EPC_Password}\' scp -o 'StrictHostKeyChecking no' -o 'ConnectTimeout 10' ${EPC_Username}@${params.EPC_IPAddress}:${EPC_SourceCodePath}/scripts/spgw.log.zip ./spgw.log.${env.BUILD_ID}.zip || true"
                 }
+              }
+              if(fileExists("hss.log.${env.BUILD_ID}.zip")) {
+                archiveArtifacts "hss.log.${env.BUILD_ID}.zip"
+              }
+              if(fileExists("mme.log.${env.BUILD_ID}.zip")) {
+                archiveArtifacts "mme.log.${env.BUILD_ID}.zip"
+              }
+              if(fileExists("spgw.log.${env.BUILD_ID}.zip")) {
+                archiveArtifacts "spgw.log.${env.BUILD_ID}.zip"
+              }
+              if(fileExists("amf.log.${env.BUILD_ID}.zip")) {
+                archiveArtifacts "amf.log.${env.BUILD_ID}.zip"
+              }
+              if(fileExists("smf-upf.log.${env.BUILD_ID}.zip")) {
+                archiveArtifacts "smf-upf.log.${env.BUILD_ID}.zip"
+              }
+              echo '\u2705 \u001B[32mLog Collection for CoreNetwork Done!\u001B[0m'
             }
+          }
         }
-    } 
-
-    post {
-        always {
+        stage ("SQL Collect"){
+          when {
+            expression { params.DataBaseHost != "none" }
+          }
+          agent {label DataBaseHost}
+          steps {
             script {
-                if (params.pipelineZipsConsoleLog != null) {
-                    if (params.pipelineZipsConsoleLog) {
-                        echo "Archiving Jenkins console log"
-                        sh "wget --no-check-certificate --no-proxy ${env.JENKINS_URL}/job/${env.JOB_NAME}/${env.BUILD_ID}/consoleText -O consoleText.log || true"
-                        sh "zip -m consoleText.log.${env.BUILD_ID}.zip consoleText.log || true"
-                        if(fileExists("consoleText.log.${env.BUILD_ID}.zip")) {
-                            archiveArtifacts "consoleText.log.${env.BUILD_ID}.zip"
-                        }
-                    }
-                }
+              if (currentBuild.result=='FAILURE') {StatusForDb = 'FAIL'} else {StatusForDb = 'PASS'}
+              sh "python3 /home/oaicicd/mysql/sql_connect.py ${JOB_NAME} ${params.eNB_MR} ${params.eNB_Branch} ${env.BUILD_ID} ${env.BUILD_URL} ${StatusForDb} ''"
+            }
+          }
+        }
+      }
+    }
+  }
+
+  post {
+    always {
+      script {
+        if(fileExists("ci-scripts/test_results.html")) {
+          sh "mv ci-scripts/test_results.html test_results-${JOB_NAME}.html"
+          sh "sed -i -e 's#TEMPLATE_JOB_NAME#${JOB_NAME}#' -e 's@build #TEMPLATE_BUILD_ID@build #${BUILD_ID}@' -e 's#Build-ID: TEMPLATE_BUILD_ID#Build-ID: <a href=\"${BUILD_URL}\">${BUILD_ID}</a>#' -e 's#TEMPLATE_STAGE_NAME#${testStageName}#' test_results-${JOB_NAME}.html"
+          archiveArtifacts "test_results-${JOB_NAME}.html"
+        }
+        if (params.pipelineZipsConsoleLog != null) {
+          if (params.pipelineZipsConsoleLog) {
+            echo "Archiving Jenkins console log"
+            sh "wget --no-check-certificate --no-proxy ${env.JENKINS_URL}/job/${env.JOB_NAME}/${env.BUILD_ID}/consoleText -O consoleText.log || true"
+            sh "zip -m consoleText.log.${env.BUILD_ID}.zip consoleText.log || true"
+            if(fileExists("consoleText.log.${env.BUILD_ID}.zip")) {
+              archiveArtifacts "consoleText.log.${env.BUILD_ID}.zip"
             }
+          }
         }
+      }
     }
+  }
 }
diff --git a/ci-scripts/Jenkinsfile-trig-nsa b/ci-scripts/Jenkinsfile-trig-nsa
index 40d74f4da0e9c886ab1d8d09b4792d34a7861a17..27e27c842613188822a4ef29ce1c0e16e432d015 100644
--- a/ci-scripts/Jenkinsfile-trig-nsa
+++ b/ci-scripts/Jenkinsfile-trig-nsa
@@ -53,7 +53,7 @@ pipeline {
                             string(name: 'eNB_Branch', value: String.valueOf(SRC_BRANCH)),
                             string(name: 'eNB_CommitID', value: String.valueOf(COMMIT_ID)),
                             string(name: 'eNB_TargetBranch', value: String.valueOf(TARGET_BRANCH)),
-                            booleanParam(name: 'eNB_AllowMergeRequestProcess', value: Boolean.valueOf(ALLOW_MERGE))
+                            booleanParam(name: 'eNB_mergeRequest', value: Boolean.valueOf(ALLOW_MERGE))
                         ]
                         //calling NSA 2x2
                         build job: "RAN-NSA-2x2-Module-OAIEPC", wait : true, propagate : false, parameters: [
@@ -61,7 +61,7 @@ pipeline {
                             string(name: 'eNB_Branch', value: String.valueOf(SRC_BRANCH)),
                             string(name: 'eNB_CommitID', value: String.valueOf(COMMIT_ID)),
                             string(name: 'eNB_TargetBranch', value: String.valueOf(TARGET_BRANCH)),
-                            booleanParam(name: 'eNB_AllowMergeRequestProcess', value: Boolean.valueOf(ALLOW_MERGE))
+                            booleanParam(name: 'eNB_mergeRequest', value: Boolean.valueOf(ALLOW_MERGE))
                         ]
                         //calling LTE 2x2
                         build job: "RAN-LTE-2x2-Module-OAIEPC", wait : true, propagate : false, parameters: [
@@ -69,7 +69,7 @@ pipeline {
                             string(name: 'eNB_Branch', value: String.valueOf(SRC_BRANCH)),
                             string(name: 'eNB_CommitID', value: String.valueOf(COMMIT_ID)),
                             string(name: 'eNB_TargetBranch', value: String.valueOf(TARGET_BRANCH)),
-                            booleanParam(name: 'eNB_AllowMergeRequestProcess', value: Boolean.valueOf(ALLOW_MERGE))
+                            booleanParam(name: 'eNB_mergeRequest', value: Boolean.valueOf(ALLOW_MERGE))
                         ]
                         //calling SA 
                         build job: "RAN-SA-Module-CN5G", wait : true, propagate : false, parameters: [
@@ -77,7 +77,7 @@ pipeline {
                             string(name: 'eNB_Branch', value: String.valueOf(SRC_BRANCH)),
                             string(name: 'eNB_CommitID', value: String.valueOf(COMMIT_ID)),
                             string(name: 'eNB_TargetBranch', value: String.valueOf(TARGET_BRANCH)),
-                            booleanParam(name: 'eNB_AllowMergeRequestProcess', value: Boolean.valueOf(ALLOW_MERGE))
+                            booleanParam(name: 'eNB_mergeRequest', value: Boolean.valueOf(ALLOW_MERGE))
                         ]
 
                     }
diff --git a/ci-scripts/cls_containerize.py b/ci-scripts/cls_containerize.py
index d0669b7665f3e5b9692ee6fa336227a5583ddbf4..dac24080627394564bf19b0f44bd2ff8e74b8c6c 100644
--- a/ci-scripts/cls_containerize.py
+++ b/ci-scripts/cls_containerize.py
@@ -101,10 +101,43 @@ class Containerize():
 		self.cliContName = ''
 		self.cliOptions = ''
 
+		self.imageToCopy = ''
+		self.registrySvrId = ''
+		self.testSvrId = ''
+
 #-----------------------------------------------------------
 # Container management functions
 #-----------------------------------------------------------
 
+	def _createWorkspace(self, sshSession, password, sourcePath):
+		# on RedHat/CentOS .git extension is mandatory
+		result = re.search('([a-zA-Z0-9\:\-\.\/])+\.git', self.ranRepository)
+		if result is not None:
+			full_ran_repo_name = self.ranRepository.replace('git/', 'git')
+		else:
+			full_ran_repo_name = self.ranRepository + '.git'
+		sshSession.command('mkdir -p ' + sourcePath, '\$', 5)
+		sshSession.command('cd ' + sourcePath, '\$', 5)
+		sshSession.command('if [ ! -e .git ]; then stdbuf -o0 git clone ' + full_ran_repo_name + ' .; else stdbuf -o0 git fetch --prune; fi', '\$', 600)
+		# Raphael: here add a check if git clone or git fetch went smoothly
+		sshSession.command('git config user.email "jenkins@openairinterface.org"', '\$', 5)
+		sshSession.command('git config user.name "OAI Jenkins"', '\$', 5)
+
+		sshSession.command('echo ' + password + ' | sudo -S git clean -x -d -ff', '\$', 30)
+		sshSession.command('mkdir -p cmake_targets/log', '\$', 5)
+		# if the commit ID is provided use it to point to it
+		if self.ranCommitID != '':
+			sshSession.command('git checkout -f ' + self.ranCommitID, '\$', 30)
+		# if the branch is not develop, then it is a merge request and we need to do
+		# the potential merge. Note that merge conflicts should already been checked earlier
+		if (self.ranAllowMerge):
+			if self.ranTargetBranch == '':
+				if (self.ranBranch != 'develop') and (self.ranBranch != 'origin/develop'):
+					sshSession.command('git merge --ff origin/develop -m "Temporary merge for CI"', '\$', 5)
+			else:
+				logging.debug('Merging with the target branch: ' + self.ranTargetBranch)
+				sshSession.command('git merge --ff origin/' + self.ranTargetBranch + ' -m "Temporary merge for CI"', '\$', 5)
+
 	def BuildImage(self, HTML):
 		if self.ranRepository == '' or self.ranBranch == '' or self.ranCommitID == '':
 			HELP.GenericHelp(CONST.Version)
@@ -173,53 +206,28 @@ class Containerize():
 	
 		self.testCase_id = HTML.testCase_id
 	
-		# on RedHat/CentOS .git extension is mandatory
-		result = re.search('([a-zA-Z0-9\:\-\.\/])+\.git', self.ranRepository)
-		if result is not None:
-			full_ran_repo_name = self.ranRepository.replace('git/', 'git')
-		else:
-			full_ran_repo_name = self.ranRepository + '.git'
-		mySSH.command('mkdir -p ' + lSourcePath, '\$', 5)
-		mySSH.command('cd ' + lSourcePath, '\$', 5)
-		mySSH.command('if [ ! -e .git ]; then stdbuf -o0 git clone ' + full_ran_repo_name + ' .; else stdbuf -o0 git fetch --prune; fi', '\$', 600)
-		# Raphael: here add a check if git clone or git fetch went smoothly
-		mySSH.command('git config user.email "jenkins@openairinterface.org"', '\$', 5)
-		mySSH.command('git config user.name "OAI Jenkins"', '\$', 5)
+		self._createWorkspace(mySSH, lPassWord, lSourcePath)
 
-		mySSH.command('echo ' + lPassWord + ' | sudo -S git clean -x -d -ff', '\$', 30)
-		mySSH.command('mkdir -p cmake_targets/log', '\$', 5)
-		# if the commit ID is provided use it to point to it
-		if self.ranCommitID != '':
-			mySSH.command('git checkout -f ' + self.ranCommitID, '\$', 30)
-		# if the branch is not develop, then it is a merge request and we need to do 
-		# the potential merge. Note that merge conflicts should already been checked earlier
-		imageTag = 'develop'
+ 		# if asterix, copy the entitlement and subscription manager configurations
+		if self.host == 'Red Hat':
+			mySSH.command('mkdir -p  tmp/ca/', '\$', 5)
+			mySSH.command('mkdir -p tmp/entitlement/', '\$', 5) 
+			mySSH.command('sudo cp /etc/rhsm/ca/redhat-uep.pem tmp/ca/', '\$', 5)
+			mySSH.command('sudo cp /etc/pki/entitlement/*.pem tmp/entitlement/', '\$', 5)
+
+		sharedimage = 'ran-build'
 		sharedTag = 'develop'
 		forceSharedImageBuild = False
+		imageTag = 'develop'
 		if (self.ranAllowMerge):
 			imageTag = 'ci-temp'
-			if self.ranTargetBranch == '':
-				if (self.ranBranch != 'develop') and (self.ranBranch != 'origin/develop'):
-					mySSH.command('git merge --ff origin/develop -m "Temporary merge for CI"', '\$', 5)
-			else:
-				logging.debug('Merging with the target branch: ' + self.ranTargetBranch)
-				mySSH.command('git merge --ff origin/' + self.ranTargetBranch + ' -m "Temporary merge for CI"', '\$', 5)
-				mySSH.command('git diff HEAD..origin/develop -- docker/Dockerfile.ran' + self.dockerfileprefix + ' | grep -i INDEX', '\$', 5)
+			if self.ranTargetBranch == 'develop':
+				mySSH.command('git diff HEAD..origin/develop -- docker/Dockerfile.ran' + self.dockerfileprefix + ' | grep --colour=never -i INDEX', '\$', 5)
 				result = re.search('index', mySSH.getBefore())
 				if result is not None:
 					forceSharedImageBuild = True
 					sharedTag = 'ci-temp'
-		else:
-			forceSharedImageBuild = True
-
- 		# if asterix, copy the entitlement and subscription manager configurations
-		if self.host == 'Red Hat':
-			mySSH.command('mkdir -p  tmp/ca/', '\$', 5)
-			mySSH.command('mkdir -p tmp/entitlement/', '\$', 5) 
-			mySSH.command('sudo cp /etc/rhsm/ca/redhat-uep.pem tmp/ca/', '\$', 5)
-			mySSH.command('sudo cp /etc/pki/entitlement/*.pem tmp/entitlement/', '\$', 5)
 
-		sharedimage = 'ran-build'
 		# Let's remove any previous run artifacts if still there
 		mySSH.command(self.cli + ' image prune --force', '\$', 30)
 		if forceSharedImageBuild:
@@ -397,6 +405,56 @@ class Containerize():
 			HTML.CreateHtmlTabFooter(False)
 			sys.exit(1)
 
+	def Copy_Image_to_Test_Server(self, HTML):
+		imageTag = 'develop'
+		if (self.ranAllowMerge):
+			imageTag = 'ci-temp'
+
+		lSsh = SSH.SSHConnection()
+		# Going to the Docker Registry server
+		if self.registrySvrId == '0':
+			lIpAddr = self.eNBIPAddress
+			lUserName = self.eNBUserName
+			lPassWord = self.eNBPassword
+		elif self.registrySvrId == '1':
+			lIpAddr = self.eNB1IPAddress
+			lUserName = self.eNB1UserName
+			lPassWord = self.eNB1Password
+		elif self.registrySvrId == '2':
+			lIpAddr = self.eNB2IPAddress
+			lUserName = self.eNB2UserName
+			lPassWord = self.eNB2Password
+		lSsh.open(lIpAddr, lUserName, lPassWord)
+		lSsh.command('docker save ' + self.imageToCopy + ':' + imageTag + ' | gzip > ' + self.imageToCopy + '-' + imageTag + '.tar.gz', '\$', 60)
+		lSsh.copyin(lIpAddr, lUserName, lPassWord, '~/' + self.imageToCopy + '-' + imageTag + '.tar.gz', '.')
+		lSsh.command('rm ' + self.imageToCopy + '-' + imageTag + '.tar.gz', '\$', 60)
+		lSsh.close()
+
+		# Going to the Test Server
+		if self.testSvrId == '0':
+			lIpAddr = self.eNBIPAddress
+			lUserName = self.eNBUserName
+			lPassWord = self.eNBPassword
+		elif self.testSvrId == '1':
+			lIpAddr = self.eNB1IPAddress
+			lUserName = self.eNB1UserName
+			lPassWord = self.eNB1Password
+		elif self.testSvrId == '2':
+			lIpAddr = self.eNB2IPAddress
+			lUserName = self.eNB2UserName
+			lPassWord = self.eNB2Password
+		lSsh.open(lIpAddr, lUserName, lPassWord)
+		lSsh.copyout(lIpAddr, lUserName, lPassWord, './' + self.imageToCopy + '-' + imageTag + '.tar.gz', '~')
+		lSsh.command('docker rmi ' + self.imageToCopy + ':' + imageTag, '\$', 10)
+		lSsh.command('docker load < ' + self.imageToCopy + '-' + imageTag + '.tar.gz', '\$', 60)
+		lSsh.command('rm ' + self.imageToCopy + '-' + imageTag + '.tar.gz', '\$', 60)
+		lSsh.close()
+
+		if os.path.isfile('./' + self.imageToCopy + '-' + imageTag + '.tar.gz'):
+			os.remove('./' + self.imageToCopy + '-' + imageTag + '.tar.gz')
+
+		HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
+
 	def DeployObject(self, HTML, EPC):
 		if self.eNB_serverId[self.eNB_instance] == '0':
 			lIpAddr = self.eNBIPAddress
@@ -417,35 +475,33 @@ class Containerize():
 			HELP.GenericHelp(CONST.Version)
 			sys.exit('Insufficient Parameter')
 		logging.debug('\u001B[1m Deploying OAI Object on server: ' + lIpAddr + '\u001B[0m')
+
 		mySSH = SSH.SSHConnection()
 		mySSH.open(lIpAddr, lUserName, lPassWord)
-		# Putting the CPUs in a good state, we do that only on a few servers
-		mySSH.command('hostname', '\$', 5)
-		result = re.search('obelix|asterix',  mySSH.getBefore())
-		if result is not None:
-			mySSH.command('if command -v cpupower &> /dev/null; then echo ' + lPassWord + ' | sudo -S cpupower idle-set -D 0; fi', '\$', 5)
-			time.sleep(5)
 		
+		self._createWorkspace(mySSH, lPassWord, lSourcePath)
+
 		mySSH.command('cd ' + lSourcePath + '/' + self.yamlPath[self.eNB_instance], '\$', 5)
 		mySSH.command('cp docker-compose.yml ci-docker-compose.yml', '\$', 5)
 		imageTag = 'develop'
 		if (self.ranAllowMerge):
 			imageTag = 'ci-temp'
 		mySSH.command('sed -i -e "s/image: oai-enb:latest/image: oai-enb:' + imageTag + '/" ci-docker-compose.yml', '\$', 2)
+		mySSH.command('sed -i -e "s/image: oai-gnb:latest/image: oai-gnb:' + imageTag + '/" ci-docker-compose.yml', '\$', 2)
 		localMmeIpAddr = EPC.MmeIPAddress
 		mySSH.command('sed -i -e "s/CI_MME_IP_ADDR/' + localMmeIpAddr + '/" ci-docker-compose.yml', '\$', 2)
-		if self.flexranCtrlDeployed:
-			mySSH.command('sed -i -e \'s/FLEXRAN_ENABLED:.*/FLEXRAN_ENABLED: "yes"/\' ci-docker-compose.yml', '\$', 2)
-			mySSH.command('sed -i -e "s/CI_FLEXRAN_CTL_IP_ADDR/' + self.flexranCtrlIpAddress + '/" ci-docker-compose.yml', '\$', 2)
-		else:
-			mySSH.command('sed -i -e "s/FLEXRAN_ENABLED:.*$/FLEXRAN_ENABLED: \"no\"/" ci-docker-compose.yml', '\$', 2)
-			mySSH.command('sed -i -e "s/CI_FLEXRAN_CTL_IP_ADDR/127.0.0.1/" ci-docker-compose.yml', '\$', 2)
+#		if self.flexranCtrlDeployed:
+#			mySSH.command('sed -i -e "s/FLEXRAN_ENABLED:.*/FLEXRAN_ENABLED: \'yes\'/" ci-docker-compose.yml', '\$', 2)
+#			mySSH.command('sed -i -e "s/CI_FLEXRAN_CTL_IP_ADDR/' + self.flexranCtrlIpAddress + '/" ci-docker-compose.yml', '\$', 2)
+#		else:
+#			mySSH.command('sed -i -e "s/FLEXRAN_ENABLED:.*$/FLEXRAN_ENABLED: \'no\'/" ci-docker-compose.yml', '\$', 2)
+#			mySSH.command('sed -i -e "s/CI_FLEXRAN_CTL_IP_ADDR/127.0.0.1/" ci-docker-compose.yml', '\$', 2)
 		# Currently support only one
-		mySSH.command('docker-compose --file ci-docker-compose.yml config --services | sed -e "s@^@service=@"', '\$', 2)
+		mySSH.command('docker-compose --file ci-docker-compose.yml config --services | sed -e "s@^@service=@" 2>&1', '\$', 10)
 		result = re.search('service=(?P<svc_name>[a-zA-Z0-9\_]+)', mySSH.getBefore())
 		if result is not None:
 			svcName = result.group('svc_name')
-			mySSH.command('docker-compose --file ci-docker-compose.yml up -d ' + svcName, '\$', 2)
+			mySSH.command('docker-compose --file ci-docker-compose.yml up -d ' + svcName, '\$', 10)
 
 		# Checking Status
 		mySSH.command('docker-compose --file ci-docker-compose.yml config', '\$', 5)
@@ -459,7 +515,7 @@ class Containerize():
 			time.sleep(5)
 			cnt = 0
 			while (cnt < 3):
-				mySSH.command('docker inspect --format=\'{{.State.Health.Status}}\' ' + containerName, '\$', 5)
+				mySSH.command('docker inspect --format="{{.State.Health.Status}}" ' + containerName, '\$', 5)
 				unhealthyNb = mySSH.getBefore().count('unhealthy')
 				healthyNb = mySSH.getBefore().count('healthy') - unhealthyNb
 				startingNb = mySSH.getBefore().count('starting')
@@ -528,12 +584,9 @@ class Containerize():
 			time.sleep(5)
 			mySSH.command('docker logs ' + containerName + ' > ' + lSourcePath + '/cmake_targets/' + self.eNB_logFile[self.eNB_instance], '\$', 30)
 			mySSH.command('docker rm -f ' + containerName, '\$', 30)
+		# Forcing the down now to remove the networks and any artifacts
+		mySSH.command('docker-compose --file ci-docker-compose.yml down', '\$', 5)
 
-		# Putting the CPUs back in a idle state, we do that only on a few servers
-		mySSH.command('hostname', '\$', 5)
-		result = re.search('obelix|asterix',  mySSH.getBefore())
-		if result is not None:
-			mySSH.command('if command -v cpupower &> /dev/null; then echo ' + lPassWord + ' | sudo -S cpupower idle-set -E; fi', '\$', 5)
 		mySSH.close()
 
 		# Analyzing log file!
@@ -841,3 +894,114 @@ class Containerize():
 		else:
 			self.exitStatus = 1
 			HTML.CreateHtmlTestRowQueue(self.cliOptions, 'KO', 1, html_queue)
+
+	def CheckAndAddRoute(self, svrName, ipAddr, userName, password):
+		logging.debug('Checking IP routing on ' + svrName)
+		mySSH = SSH.SSHConnection()
+		if svrName == 'porcepix':
+			mySSH.open(ipAddr, userName, password)
+			# Check if route to asterix gnb exists
+			mySSH.command('ip route | grep --colour=never "192.168.68.64/26"', '\$', 10)
+			result = re.search('192.168.18.194', mySSH.getBefore())
+			if result is None:
+				mySSH.command('echo ' + password + ' | sudo -S ip route add 192.168.68.64/26 via 192.168.18.194 dev eno1', '\$', 10)
+			# Check if route to obelix enb exists
+			mySSH.command('ip route | grep --colour=never "192.168.68.128/26"', '\$', 10)
+			result = re.search('192.168.18.193', mySSH.getBefore())
+			if result is None:
+				mySSH.command('echo ' + password + ' | sudo -S ip route add 192.168.68.128/26 via 192.168.18.193 dev eno1', '\$', 10)
+			# Check if route to nepes gnb exists
+			mySSH.command('ip route | grep --colour=never "192.168.68.192/26"', '\$', 10)
+			result = re.search('192.168.18.209', mySSH.getBefore())
+			if result is None:
+				mySSH.command('echo ' + password + ' | sudo -S ip route add 192.168.68.192/26 via 192.168.18.209 dev eno1', '\$', 10)
+			# Check if forwarding is enabled
+			mySSH.command('sysctl net.ipv4.conf.all.forwarding', '\$', 10)
+			result = re.search('net.ipv4.conf.all.forwarding = 1', mySSH.getBefore())
+			if result is None:
+				mySSH.command('echo ' + password + ' | sudo -S sysctl net.ipv4.conf.all.forwarding=1', '\$', 10)
+			# Check if iptables forwarding is accepted
+			mySSH.command('echo ' + password + ' | sudo -S iptables -L', '\$', 10)
+			result = re.search('Chain FORWARD .*policy ACCEPT', mySSH.getBefore())
+			if result is None:
+				mySSH.command('echo ' + password + ' | sudo -S iptables -P FORWARD ACCEPT', '\$', 10)
+			mySSH.close()
+		if svrName == 'asterix':
+			mySSH.open(ipAddr, userName, password)
+			# Check if route to porcepix epc exists
+			mySSH.command('ip route | grep --colour=never "192.168.61.192/26"', '\$', 10)
+			result = re.search('192.168.18.210', mySSH.getBefore())
+			if result is None:
+				mySSH.command('echo ' + password + ' | sudo -S ip route add 192.168.61.192/26 via 192.168.18.210 dev em1', '\$', 10)
+			# Check if route to porcepix cn5g exists
+			mySSH.command('ip route | grep --colour=never "192.168.70.128/26"', '\$', 10)
+			result = re.search('192.168.18.210', mySSH.getBefore())
+			if result is None:
+				mySSH.command('echo ' + password + ' | sudo -S ip route add 192.168.70.128/26 via 192.168.18.210 dev em1', '\$', 10)
+			# Check if X2 route to obelix enb exists
+			mySSH.command('ip route | grep --colour=never "192.168.68.128/26"', '\$', 10)
+			result = re.search('192.168.18.193', mySSH.getBefore())
+			if result is None:
+				mySSH.command('echo ' + password + ' | sudo -S ip route add 192.168.68.128/26 via 192.168.18.193 dev em1', '\$', 10)
+			# Check if forwarding is enabled
+			mySSH.command('sysctl net.ipv4.conf.all.forwarding', '\$', 10)
+			result = re.search('net.ipv4.conf.all.forwarding = 1', mySSH.getBefore())
+			if result is None:
+				mySSH.command('echo ' + password + ' | sudo -S sysctl net.ipv4.conf.all.forwarding=1', '\$', 10)
+			# Check if iptables forwarding is accepted
+			mySSH.command('echo ' + password + ' | sudo -S iptables -L', '\$', 10)
+			result = re.search('Chain FORWARD .*policy ACCEPT', mySSH.getBefore())
+			if result is None:
+				mySSH.command('echo ' + password + ' | sudo -S iptables -P FORWARD ACCEPT', '\$', 10)
+			mySSH.close()
+		if svrName == 'obelix':
+			mySSH.open(ipAddr, userName, password)
+			# Check if route to porcepix epc exists
+			mySSH.command('ip route | grep --colour=never "192.168.61.192/26"', '\$', 10)
+			result = re.search('192.168.18.210', mySSH.getBefore())
+			if result is None:
+				mySSH.command('echo ' + password + ' | sudo -S ip route add 192.168.61.192/26 via 192.168.18.210 dev eno1', '\$', 10)
+			# Check if X2 route to asterix gnb exists
+			mySSH.command('ip route | grep --colour=never "192.168.68.64/26"', '\$', 10)
+			result = re.search('192.168.18.194', mySSH.getBefore())
+			if result is None:
+				mySSH.command('echo ' + password + ' | sudo -S ip route add 192.168.68.64/26 via 192.168.18.194 dev eno1', '\$', 10)
+			# Check if X2 route to nepes gnb exists
+			mySSH.command('ip route | grep --colour=never "192.168.68.192/26"', '\$', 10)
+			result = re.search('192.168.18.209', mySSH.getBefore())
+			if result is None:
+				mySSH.command('echo ' + password + ' | sudo -S ip route add 192.168.68.192/26 via 192.168.18.209 dev eno1', '\$', 10)
+			# Check if forwarding is enabled
+			mySSH.command('sysctl net.ipv4.conf.all.forwarding', '\$', 10)
+			result = re.search('net.ipv4.conf.all.forwarding = 1', mySSH.getBefore())
+			if result is None:
+				mySSH.command('echo ' + password + ' | sudo -S sysctl net.ipv4.conf.all.forwarding=1', '\$', 10)
+			# Check if iptables forwarding is accepted
+			mySSH.command('echo ' + password + ' | sudo -S iptables -L', '\$', 10)
+			result = re.search('Chain FORWARD .*policy ACCEPT', mySSH.getBefore())
+			if result is None:
+				mySSH.command('echo ' + password + ' | sudo -S iptables -P FORWARD ACCEPT', '\$', 10)
+			mySSH.close()
+		if svrName == 'nepes':
+			mySSH.open(ipAddr, userName, password)
+			# Check if route to porcepix epc exists
+			mySSH.command('ip route | grep --colour=never "192.168.61.192/26"', '\$', 10)
+			result = re.search('192.168.18.210', mySSH.getBefore())
+			if result is None:
+				mySSH.command('echo ' + password + ' | sudo -S ip route add 192.168.61.192/26 via 192.168.18.210 dev enp0s31f6', '\$', 10)
+			# Check if X2 route to obelix enb exists
+			mySSH.command('ip route | grep --colour=never "192.168.68.128/26"', '\$', 10)
+			result = re.search('192.168.18.193', mySSH.getBefore())
+			if result is None:
+				mySSH.command('echo ' + password + ' | sudo -S ip route add 192.168.68.128/26 via 192.168.18.193 dev enp0s31f6', '\$', 10)
+			# Check if forwarding is enabled
+			mySSH.command('sysctl net.ipv4.conf.all.forwarding', '\$', 10)
+			result = re.search('net.ipv4.conf.all.forwarding = 1', mySSH.getBefore())
+			if result is None:
+				mySSH.command('echo ' + password + ' | sudo -S sysctl net.ipv4.conf.all.forwarding=1', '\$', 10)
+			# Check if iptables forwarding is accepted
+			mySSH.command('echo ' + password + ' | sudo -S iptables -L', '\$', 10)
+			result = re.search('Chain FORWARD .*policy ACCEPT', mySSH.getBefore())
+			if result is None:
+				mySSH.command('echo ' + password + ' | sudo -S iptables -P FORWARD ACCEPT', '\$', 10)
+			mySSH.close()
diff --git a/ci-scripts/cls_module_ue.py b/ci-scripts/cls_module_ue.py
index 74ae9c207e7e0e2bcc97e4bf642f0b915602fbec..e006eb6aae0b2e8593ee8ca2d27a42b2a2c1e40f 100644
--- a/ci-scripts/cls_module_ue.py
+++ b/ci-scripts/cls_module_ue.py
@@ -63,7 +63,7 @@ class Module_UE:
 	#if not it will be started
 	def CheckCMProcess(self,CNType):
 		HOST=self.HostUsername+'@'+self.HostIPAddress
-		COMMAND="ps aux | grep " + self.Process['Name'] + " | grep -v grep "
+		COMMAND="ps aux | grep --colour=never " + self.Process['Name'] + " | grep -v grep "
 		logging.debug(COMMAND)
 		ssh = subprocess.Popen(["ssh", "%s" % HOST, COMMAND],shell=False,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
 		result = ssh.stdout.readlines()
@@ -81,7 +81,7 @@ class Module_UE:
 			#checking the process
 			time.sleep(5)
 			HOST=self.HostUsername+'@'+self.HostIPAddress
-			COMMAND="ps aux | grep " + self.Process['Name'] + " | grep -v grep "
+			COMMAND="ps aux | grep --colour=never " + self.Process['Name'] + " | grep -v grep "
 			logging.debug(COMMAND)
 			ssh = subprocess.Popen(["ssh", "%s" % HOST, COMMAND],shell=False,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
 			result = ssh.stdout.readlines()
@@ -108,7 +108,7 @@ class Module_UE:
 		response= []
 		tentative = 3 
 		while (len(response)==0) and (tentative>0):
-			COMMAND="ip a show dev " + self.UENetwork + " | grep inet | grep " + self.UENetwork
+			COMMAND="ip a show dev " + self.UENetwork + " | grep --colour=never inet | grep " + self.UENetwork
 			logging.debug(COMMAND)
 			ssh = subprocess.Popen(["ssh", "%s" % HOST, COMMAND],shell=False,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
 			response = ssh.stdout.readlines()
@@ -136,7 +136,7 @@ class Module_UE:
 		response= []
 		tentative = 3 
 		while (len(response)==0) and (tentative>0):
-			COMMAND="ip a show dev " + self.UENetwork + " | grep mtu"
+			COMMAND="ip a show dev " + self.UENetwork + " | grep --colour=never mtu"
 			logging.debug(COMMAND)
 			ssh = subprocess.Popen(["ssh", "%s" % HOST, COMMAND],shell=False,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
 			response = ssh.stdout.readlines()
diff --git a/ci-scripts/cls_oaicitest.py b/ci-scripts/cls_oaicitest.py
index 92a4e8637e7303cbcd9ec2f3b7987d4b49e0194c..ab7d249f7e05dee1b120d657a38c800e95d6f48f 100644
--- a/ci-scripts/cls_oaicitest.py
+++ b/ci-scripts/cls_oaicitest.py
@@ -192,16 +192,16 @@ class OaiCiTest():
 			result = re.search('LAST_BUILD_INFO', SSH.getBefore())
 			if result is not None:
 				mismatch = False
-				SSH.command('grep SRC_COMMIT LAST_BUILD_INFO.txt', '\$', 2)
+				SSH.command('grep --colour=never SRC_COMMIT LAST_BUILD_INFO.txt', '\$', 2)
 				result = re.search(self.ranCommitID, SSH.getBefore())
 				if result is None:
 					mismatch = True
-				SSH.command('grep MERGED_W_TGT_BRANCH LAST_BUILD_INFO.txt', '\$', 2)
+				SSH.command('grep --colour=never MERGED_W_TGT_BRANCH LAST_BUILD_INFO.txt', '\$', 2)
 				if self.ranAllowMerge:
 					result = re.search('YES', SSH.getBefore())
 					if result is None:
 						mismatch = True
-					SSH.command('grep TGT_BRANCH LAST_BUILD_INFO.txt', '\$', 2)
+					SSH.command('grep --colour=never TGT_BRANCH LAST_BUILD_INFO.txt', '\$', 2)
 					if self.ranTargetBranch == '':
 						result = re.search('develop', SSH.getBefore())
 					else:
@@ -451,13 +451,13 @@ class OaiCiTest():
 		SSH = sshconnection.SSHConnection()
 		SSH.open(self.UEIPAddress, self.UEUserName, self.UEPassword)
 		# b2xx_fx3_utils reset procedure
-		SSH.command('echo ' + self.UEPassword + ' | sudo -S uhd_find_devices', '\$', 90)
+		SSH.command('echo ' + self.UEPassword + ' | sudo -S uhd_find_devices', '\$', 180)
 		result = re.search('type: b200', SSH.getBefore())
 		if result is not None:
 			logging.debug('Found a B2xx device --> resetting it')
 			SSH.command('echo ' + self.UEPassword + ' | sudo -S b2xx_fx3_utils --reset-device', '\$', 10)
 			# Reloading FGPA bin firmware
-			SSH.command('echo ' + self.UEPassword + ' | sudo -S uhd_find_devices', '\$', 90)
+			SSH.command('echo ' + self.UEPassword + ' | sudo -S uhd_find_devices', '\$', 180)
 		result = re.search('type: n3xx', str(SSH.getBefore()))
 		if result is not None:
 			logging.debug('Found a N3xx device --> resetting it')
@@ -660,7 +660,7 @@ class OaiCiTest():
 		SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
 		count = 0
 		while count < 5:
-			SSH.command('echo ' + self.ADBPassword + ' | sudo -S lsof | grep ttyUSB0', '\$', 10)
+			SSH.command('echo ' + self.ADBPassword + ' | sudo -S lsof | grep --colour=never ttyUSB0', '\$', 10)
 			result = re.search('picocom', SSH.getBefore())
 			if result is None:
 				count = 10
@@ -1328,7 +1328,7 @@ class OaiCiTest():
 		SSH = sshconnection.SSHConnection()
 		SSH.open(self.ADBIPAddress, self.ADBUserName, self.ADBPassword)
 		if self.ADBCentralized:
-			SSH.command('lsusb | egrep "Future Technology Devices International, Ltd FT2232C" | sed -e "s#:.*##" -e "s# #_#g"', '\$', 15)
+			SSH.command('lsusb | egrep --colour=never "Future Technology Devices International, Ltd FT2232C" | sed -e "s#:.*##" -e "s# #_#g"', '\$', 15)
 			#self.CatMDevices = re.findall("\\\\r\\\\n([A-Za-z0-9_]+)",SSH.getBefore())
 			self.CatMDevices = re.findall("\\\\r\\\\n([A-Za-z0-9_]+)",SSH.getBefore())
 		else:
@@ -1583,7 +1583,7 @@ class OaiCiTest():
 					if re.match('OAI-Rel14-Docker', EPC.Type, re.IGNORECASE):
 						Target = EPC.MmeIPAddress
 					elif re.match('OAICN5G', EPC.Type, re.IGNORECASE):
-						Target = '8.8.8.8'
+						Target = EPC.MmeIPAddress
 					else:
 						Target = EPC.IPAddress
 					#ping from module NIC rather than IP address to make sure round trip is over the air	
@@ -2325,7 +2325,7 @@ class OaiCiTest():
 				server_filename = 'iperf_server_' + self.testCase_id + '_' + self.ue_id + '.log'
 				SSH.command('docker exec -it prod-trf-gen /bin/bash -c "killall --signal SIGKILL iperf"', '\$', 5)
 				iperf_cmd = 'echo $USER; nohup bin/iperf -s -u 2>&1 > ' + server_filename
-				cmd = 'docker exec -it prod-trf-gen /bin/bash -c \"' + iperf_cmd + '\"' 
+				cmd = 'docker exec -d prod-trf-gen /bin/bash -c \"' + iperf_cmd + '\"' 
 				SSH.command(cmd,'\$',5)
 				SSH.close()
 
@@ -3601,7 +3601,7 @@ class OaiCiTest():
 				UhdVersion = result.group('uhd_version')
 				logging.debug('UHD Version is: ' + UhdVersion)
 				HTML.UhdVersion[idx]=UhdVersion
-		SSH.command('echo ' + Password + ' | sudo -S uhd_find_devices', '\$', 90)
+		SSH.command('echo ' + Password + ' | sudo -S uhd_find_devices', '\$', 180)
 		usrp_boards = re.findall('product: ([0-9A-Za-z]+)\\\\r\\\\n', SSH.getBefore())
 		count = 0
 		for board in usrp_boards:
diff --git a/ci-scripts/cls_physim1.py b/ci-scripts/cls_physim1.py
index 131aa01c4c90babbf9af457c6dabd443843d4326..2052d98c2c89e483254a1a36bc0683183ba693e9 100644
--- a/ci-scripts/cls_physim1.py
+++ b/ci-scripts/cls_physim1.py
@@ -140,7 +140,7 @@ class PhySim:
 				logging.debug('oai-physim size is unknown')
 
 		# logging to OC Cluster and then switch to corresponding project
-		mySSH.command(f'oc login -u {ocUserName} -p {ocPassword}', '\$', 6)
+		mySSH.command(f'oc login -u {ocUserName} -p {ocPassword}', '\$', 30)
 		if mySSH.getBefore().count('Login successful.') == 0:
 			logging.error('\u001B[1m OC Cluster Login Failed\u001B[0m')
 			mySSH.close()
@@ -149,7 +149,7 @@ class PhySim:
 			return
 		else:
 			logging.debug('\u001B[1m   Login to OC Cluster Successfully\u001B[0m')
-		mySSH.command(f'oc project {ocProjectName}', '\$', 6)
+		mySSH.command(f'oc project {ocProjectName}', '\$', 30)
 		if mySSH.getBefore().count(f'Already on project "{ocProjectName}"') == 0 and mySSH.getBefore().count(f'Now using project "{self.OCProjectName}"') == 0:
 			logging.error(f'\u001B[1m Unable to access OC project {ocProjectName}\u001B[0m')
 			mySSH.close()
@@ -160,7 +160,7 @@ class PhySim:
 			logging.debug(f'\u001B[1m   Now using project {ocProjectName}\u001B[0m')
 
 		# Tag the image and push to the OC cluster
-		mySSH.command('oc whoami -t | sudo podman login -u ' + ocUserName + ' --password-stdin https://default-route-openshift-image-registry.apps.5glab.nsa.eurecom.fr/ --tls-verify=false', '\$', 6)
+		mySSH.command('oc whoami -t | sudo podman login -u ' + ocUserName + ' --password-stdin https://default-route-openshift-image-registry.apps.5glab.nsa.eurecom.fr/ --tls-verify=false', '\$', 30)
 		if mySSH.getBefore().count('Login Succeeded!') == 0:
 			logging.error('\u001B[1m Podman Login to OC Cluster Registry Failed\u001B[0m')
 			mySSH.close()
@@ -170,7 +170,7 @@ class PhySim:
 		else:
 			logging.debug('\u001B[1m Podman Login to OC Cluster Registry Successfully\u001B[0m')
 		time.sleep(2)
-		mySSH.command('oc create -f openshift/oai-physim-image-stream.yml', '\$', 6)
+		mySSH.command('oc create -f openshift/oai-physim-image-stream.yml', '\$', 30)
 		if mySSH.getBefore().count('(AlreadyExists):') == 0 and mySSH.getBefore().count('created') == 0:
 			logging.error(f'\u001B[1m Image Stream "oai-physim" Creation Failed on OC Cluster {ocProjectName}\u001B[0m')
 			mySSH.close()
@@ -180,9 +180,9 @@ class PhySim:
 		else:
 			logging.debug(f'\u001B[1m   Image Stream "oai-physim" created on OC project {ocProjectName}\u001B[0m')
 		time.sleep(2)
-		mySSH.command(f'sudo podman tag oai-physim:{imageTag} default-route-openshift-image-registry.apps.5glab.nsa.eurecom.fr/{self.OCProjectName}/oai-physim:{imageTag}', '\$', 6)
+		mySSH.command(f'sudo podman tag oai-physim:{imageTag} default-route-openshift-image-registry.apps.5glab.nsa.eurecom.fr/{self.OCProjectName}/oai-physim:{imageTag}', '\$', 30)
 		time.sleep(2)
-		mySSH.command(f'sudo podman push default-route-openshift-image-registry.apps.5glab.nsa.eurecom.fr/{self.OCProjectName}/oai-physim:{imageTag} --tls-verify=false', '\$', 30)
+		mySSH.command(f'sudo podman push default-route-openshift-image-registry.apps.5glab.nsa.eurecom.fr/{self.OCProjectName}/oai-physim:{imageTag} --tls-verify=false', '\$', 180)
 		if mySSH.getBefore().count('Storing signatures') == 0:
 			logging.error('\u001B[1m Image "oai-physim" push to OC Cluster Registry Failed\u001B[0m')
 			mySSH.close()
@@ -195,18 +195,18 @@ class PhySim:
 		# Using helm charts deployment
 		time.sleep(5)
 		mySSH.command(f'sed -i -e "s#TAG#{imageTag}#g" ./charts/physims/values.yaml', '\$', 6)
-		mySSH.command('helm install physim ./charts/physims/ | tee -a cmake_targets/log/physim_helm_summary.txt 2>&1', '\$', 6)
+		mySSH.command('helm install physim ./charts/physims/ | tee -a cmake_targets/log/physim_helm_summary.txt 2>&1', '\$', 30)
 		if mySSH.getBefore().count('STATUS: deployed') == 0:
 			logging.error('\u001B[1m Deploying PhySim Failed using helm chart on OC Cluster\u001B[0m')
-			mySSH.command('helm uninstall physim >> cmake_targets/log/physim_helm_summary.txt 2>&1', '\$', 6)
+			mySSH.command('helm uninstall physim >> cmake_targets/log/physim_helm_summary.txt 2>&1', '\$', 30)
 			isFinished1 = False
 			while(isFinished1 == False):
 				time.sleep(20)
 				mySSH.command('oc get pods -l app.kubernetes.io/instance=physim', '\$', 6, resync=True)
 				if re.search('No resources found', mySSH.getBefore()):
 					isFinished1 = True
-			mySSH.command(f'sudo podman rmi default-route-openshift-image-registry.apps.5glab.nsa.eurecom.fr/{self.OCProjectName}/oai-physim:{imageTag}', '\$', 6)
-			mySSH.command('oc delete is oai-physim', '\$', 6)
+			mySSH.command(f'sudo podman rmi default-route-openshift-image-registry.apps.5glab.nsa.eurecom.fr/{self.OCProjectName}/oai-physim:{imageTag}', '\$', 30)
+			mySSH.command('oc delete is oai-physim', '\$', 30)
 			mySSH.close()
 			self.AnalyzeLogFile_phySim(HTML)
 			RAN.prematureExit = True
@@ -217,7 +217,7 @@ class PhySim:
 		count = 0
 		while(count < 2 and isRunning == False):
 			time.sleep(60)
-			mySSH.command('oc get pods -o wide -l app.kubernetes.io/instance=physim | tee -a cmake_targets/log/physim_pods_summary.txt', '\$', 6, resync=True)
+			mySSH.command('oc get pods -o wide -l app.kubernetes.io/instance=physim | tee -a cmake_targets/log/physim_pods_summary.txt', '\$', 30, resync=True)
 			if mySSH.getBefore().count('Running') == 12:
 				logging.debug('\u001B[1m Running the physim test Scenarios\u001B[0m')
 				isRunning = True
diff --git a/ci-scripts/conf_files/gnb.band78.sa.fr1.106PRB.2x2.usrpn310.conf b/ci-scripts/conf_files/gnb.band78.sa.fr1.106PRB.2x2.usrpn310.conf
index c889ba2b89d904f9d62726a82f43361b7429c855..0e8466e2dff4f0764b5884dc99276e45e0630da5 100644
--- a/ci-scripts/conf_files/gnb.band78.sa.fr1.106PRB.2x2.usrpn310.conf
+++ b/ci-scripts/conf_files/gnb.band78.sa.fr1.106PRB.2x2.usrpn310.conf
@@ -255,7 +255,7 @@ MACRLCs = (
     tr_n_preference  = "local_RRC";
 #    pusch_TargetSNRx10 = 200;
 #    pucch_TargetSNRx10 = 150;
-    ulsch_max_slots_inactivity=20;
+     ulsch_max_frame_inactivity = 1;
   }
 );
 
diff --git a/ci-scripts/epc.py b/ci-scripts/epc.py
index 8376e07fd58343543f680551f059e1565a0aab4e..fdf77ba0070a479a57e8ec8b404b3a9180a7e397 100644
--- a/ci-scripts/epc.py
+++ b/ci-scripts/epc.py
@@ -53,7 +53,7 @@ import constants as CONST
 class EPCManagement():
 
 	def __init__(self):
-		
+
 		self.IPAddress = ''
 		self.UserName = ''
 		self.Password = ''
@@ -62,7 +62,6 @@ class EPCManagement():
 		self.PcapFileName = ''
 		self.testCase_id = ''
 		self.MmeIPAddress = ''
-		self.AmfIPAddress = ''
 		self.containerPrefix = 'prod'
 		self.mmeConfFile = 'mme.conf'
 		self.yamlPath = ''
@@ -228,6 +227,7 @@ class EPCManagement():
 			sys.exit('Insufficient EPC Parameters')
 		mySSH = SSH.SSHConnection()
 		mySSH.open(self.IPAddress, self.UserName, self.Password)
+		html_cell = '<pre style="background-color:white">\n'
 		if re.match('ltebox', self.Type, re.IGNORECASE):
 			logging.debug('Using the SABOX simulated HSS')
 			mySSH.command('if [ -d ' + self.SourceCodePath + '/scripts ]; then echo ' + self.Password + ' | sudo -S rm -Rf ' + self.SourceCodePath + '/scripts ; fi', '\$', 5)
@@ -238,16 +238,48 @@ class EPCManagement():
 			logging.debug('Using the sabox')
 			mySSH.command('cd /opt/ltebox/tools', '\$', 5)
 			mySSH.command('echo ' + self.Password + ' | sudo -S ./start_sabox', '\$', 5)
+			html_cell += 'N/A\n'
 		elif re.match('OAICN5G', self.Type, re.IGNORECASE):
 			logging.debug('Starting OAI CN5G')
 			mySSH.command('if [ -d ' + self.SourceCodePath + '/scripts ]; then echo ' + self.Password + ' | sudo -S rm -Rf ' + self.SourceCodePath + '/scripts ; fi', '\$', 5)
 			mySSH.command('mkdir -p ' + self.SourceCodePath + '/scripts', '\$', 5)
 			mySSH.command('cd /opt/oai-cn5g-fed/docker-compose', '\$', 5)
 			mySSH.command('./core-network.sh start nrf spgwu', '\$', 60)
+			time.sleep(2)
+			mySSH.command('docker-compose -p 5gcn ps -a', '\$', 60)
+			if mySSH.getBefore().count('Up (healthy)') != 6:
+				logging.error('Not all container healthy')
+			else:
+				logging.debug('OK')
+			mySSH.command('docker-compose config | grep --colour=never image', '\$', 10)
+			listOfImages = mySSH.getBefore()
+			for imageLine in listOfImages.split('\\r\\n'):
+				res1 = re.search('image: (?P<name>[a-zA-Z0-9\-]+):(?P<tag>[a-zA-Z0-9\-]+)', str(imageLine))
+				res2 = re.search('mysql', str(imageLine))
+				if res1 is not None and res2 is None:
+					html_cell += res1.group('name') + ':' + res1.group('tag') + ' '
+					nbChars = len(res1.group('name')) + len(res1.group('tag')) + 2
+					while (nbChars < 32):
+						html_cell += ' '
+						nbChars += 1
+					mySSH.command('docker image inspect --format="Size = {{.Size}} bytes" ' + res1.group('name') + ':' + res1.group('tag'), '\$', 10)
+					res3 = re.search('Size *= *(?P<size>[0-9\-]*) *bytes', mySSH.getBefore())
+					if res3 is not None:
+						imageSize = int(res3.group('size'))
+						imageSize = int(imageSize/(1024*1024))
+						html_cell += str(imageSize) + ' MBytes '
+					mySSH.command('docker image inspect --format="Date = {{.Created}}" ' + res1.group('name') + ':' + res1.group('tag'), '\$', 10)
+					res4 = re.search('Date *= *(?P<date>[0-9\-]*)T', mySSH.getBefore())
+					if res4 is not None:
+						html_cell += '(' + res4.group('date') + ')'
+					html_cell += '\n'
 		else:
 			logging.error('This option should not occur!')
+		html_cell += '</pre>'
 		mySSH.close()
-		HTML.CreateHtmlTestRow(self.Type, 'OK', CONST.ALL_PROCESSES_OK)
+		html_queue = SimpleQueue()
+		html_queue.put(html_cell)
+		HTML.CreateHtmlTestRowQueue(self.Type, 'OK', 1, html_queue)
 
 	def SetAmfIPAddress(self):
 		# Not an error if we don't need an 5GCN
@@ -371,7 +403,7 @@ class EPCManagement():
 		elif re.match('OAI-Rel14-CUPS', self.Type, re.IGNORECASE):
 			mySSH.command('echo ' + self.Password + ' | sudo -S killall --signal SIGINT oai_hss || true', '\$', 5)
 			time.sleep(2)
-			mySSH.command('stdbuf -o0  ps -aux | grep hss | grep -v grep', '\$', 5)
+			mySSH.command('stdbuf -o0  ps -aux | grep --colour=never hss | grep -v grep', '\$', 5)
 			result = re.search('oai_hss -j', mySSH.getBefore())
 			if result is not None:
 				mySSH.command('echo ' + self.Password + ' | sudo -S killall --signal SIGKILL oai_hss || true', '\$', 5)
@@ -379,7 +411,7 @@ class EPCManagement():
 		elif re.match('OAI', self.Type, re.IGNORECASE):
 			mySSH.command('echo ' + self.Password + ' | sudo -S killall --signal SIGINT run_hss oai_hss || true', '\$', 5)
 			time.sleep(2)
-			mySSH.command('stdbuf -o0  ps -aux | grep hss | grep -v grep', '\$', 5)
+			mySSH.command('stdbuf -o0  ps -aux | grep --colour=never hss | grep -v grep', '\$', 5)
 			result = re.search('\/bin\/bash .\/run_', mySSH.getBefore())
 			if result is not None:
 				mySSH.command('echo ' + self.Password + ' | sudo -S killall --signal SIGKILL run_hss oai_hss || true', '\$', 5)
@@ -465,6 +497,7 @@ class EPCManagement():
 	def Terminate5GCN(self, HTML):
 		mySSH = SSH.SSHConnection()
 		mySSH.open(self.IPAddress, self.UserName, self.Password)
+		message = ''
 		if re.match('ltebox', self.Type, re.IGNORECASE):
 			logging.debug('Terminating SA BOX')
 			mySSH.command('cd /opt/ltebox/tools', '\$', 5)
@@ -475,15 +508,31 @@ class EPCManagement():
 			time.sleep(1)
 			mySSH.command('echo ' + self.Password + ' | sudo -S screen -S simulated_5g_hss -X quit', '\$', 5)
 		elif re.match('OAICN5G', self.Type, re.IGNORECASE):
-			self.LogCollectOAICN5G()
+			logging.debug('OAI CN5G Collecting Log files to workspace')
+			mySSH.command('echo ' + self.Password + ' | sudo rm -rf ' + self.SourceCodePath + '/logs', '\$', 5)
+			mySSH.command('mkdir ' + self.SourceCodePath + '/logs','\$', 5)
+			containers_list=['oai-smf','oai-spgwu','oai-amf','oai-nrf']
+			for c in containers_list:
+				mySSH.command('docker logs ' + c + ' > ' + self.SourceCodePath + '/logs/' + c + '.log', '\$', 5)
+
 			logging.debug('Terminating OAI CN5G')
 			mySSH.command('cd /opt/oai-cn5g-fed/docker-compose', '\$', 5)
-			mySSH.command('docker-compose down', '\$', 5)
 			mySSH.command('./core-network.sh stop nrf spgwu', '\$', 60)
+			time.sleep(2)
+			mySSH.command('tshark -r /tmp/oai-cn5g.pcap | egrep --colour=never "Tracking area update" ','\$', 30)
+			result = re.search('Tracking area update request', mySSH.getBefore())
+			if result is not None:
+				message = 'UE requested ' + str(mySSH.getBefore().count('Tracking area update request')) + 'Tracking area update request(s)'
+			else:
+				message = 'No Tracking area update request'
+			logging.debug(message)
 		else:
 			logging.error('This should not happen!')
 		mySSH.close()
-		HTML.CreateHtmlTestRow('N/A', 'OK', CONST.ALL_PROCESSES_OK)
+		html_queue = SimpleQueue()
+		html_cell = '<pre style="background-color:white">' + message + '</pre>'
+		html_queue.put(html_cell)
+		HTML.CreateHtmlTestRowQueue(self.Type, 'OK', 1, html_queue)
 
 	def DeployEpc(self, HTML):
 		logging.debug('Trying to deploy')
@@ -529,6 +578,10 @@ class EPCManagement():
 			mySSH.copyout(self.IPAddress, self.UserName, self.Password, './' + self.yamlPath + '/mme_fd.sprint.conf', self.SourceCodePath + '/scripts')
 			mySSH.copyout(self.IPAddress, self.UserName, self.Password, './' + self.yamlPath + '/redis_extern.conf', self.SourceCodePath + '/scripts')
 			mySSH.command('chmod a+x ' + self.SourceCodePath + '/scripts/entrypoint.sh', '\$', 5)
+		else:
+			mySSH.copyout(self.IPAddress, self.UserName, self.Password, './' + self.yamlPath + '/entrypoint.sh', self.SourceCodePath + '/scripts')
+			mySSH.copyout(self.IPAddress, self.UserName, self.Password, './' + self.yamlPath + '/mme.conf', self.SourceCodePath + '/scripts')
+			mySSH.command('chmod 775 entrypoint.sh', '\$', 60)
 		mySSH.command('wget --quiet --tries=3 --retry-connrefused https://raw.githubusercontent.com/OPENAIRINTERFACE/openair-hss/develop/src/hss_rel14/db/oai_db.cql', '\$', 30)
 		mySSH.command('docker-compose down', '\$', 60)
 		mySSH.command('docker-compose up -d db_init', '\$', 60)
@@ -572,6 +625,30 @@ class EPCManagement():
 			listOfContainers += ' prod-trf-gen'
 			expectedHealthyContainers += 1
 
+		mySSH.command('docker-compose config | grep --colour=never image', '\$', 10)
+		html_cell = '<pre style="background-color:white">\n'
+		listOfImages = mySSH.getBefore()
+		for imageLine in listOfImages.split('\\r\\n'):
+			res1 = re.search('image: (?P<name>[a-zA-Z0-9\-]+):(?P<tag>[a-zA-Z0-9\-]+)', str(imageLine))
+			res2 = re.search('cassandra|redis', str(imageLine))
+			if res1 is not None and res2 is None:
+				html_cell += res1.group('name') + ':' + res1.group('tag') + ' '
+				nbChars = len(res1.group('name')) + len(res1.group('tag')) + 2
+				while (nbChars < 32):
+					html_cell += ' '
+					nbChars += 1
+				mySSH.command('docker image inspect --format="Size = {{.Size}} bytes" ' + res1.group('name') + ':' + res1.group('tag'), '\$', 10)
+				res3 = re.search('Size *= *(?P<size>[0-9\-]*) *bytes', mySSH.getBefore())
+				if res3 is not None:
+					imageSize = int(res3.group('size'))
+					imageSize = int(imageSize/(1024*1024))
+					html_cell += str(imageSize) + ' MBytes '
+				mySSH.command('docker image inspect --format="Date = {{.Created}}" ' + res1.group('name') + ':' + res1.group('tag'), '\$', 10)
+				res4 = re.search('Date *= *(?P<date>[0-9\-]*)T', mySSH.getBefore())
+				if res4 is not None:
+					html_cell += '(' + res4.group('date') + ')'
+				html_cell += '\n'
+		html_cell += '</pre>'
 		# Checking if all are healthy
 		cnt = 0
 		while (cnt < 3):
@@ -587,6 +664,8 @@ class EPCManagement():
 		logging.debug(' -- ' + str(healthyNb) + ' healthy container(s)')
 		logging.debug(' -- ' + str(unhealthyNb) + ' unhealthy container(s)')
 		logging.debug(' -- ' + str(startingNb) + ' still starting container(s)')
+		html_queue = SimpleQueue()
+		html_queue.put(html_cell)
 		if healthyNb == expectedHealthyContainers:
 			mySSH.command('docker exec -d prod-oai-hss /bin/bash -c "nohup tshark -i any -f \'port 9042 or port 3868\' -w /tmp/hss_check_run.pcap 2>&1 > /dev/null"', '\$', 5)
 			if self.isMagmaUsed:
@@ -598,11 +677,11 @@ class EPCManagement():
 			mySSH.command('docker exec -d prod-oai-spgwu-tiny /bin/bash -c "nohup tshark -i any -f \'port 8805\'  -w /tmp/spgwu_check_run.pcap 2>&1 > /dev/null"', '\$', 10)
 			mySSH.close()
 			logging.debug('Deployment OK')
-			HTML.CreateHtmlTestRow(self.Type, 'OK', CONST.ALL_PROCESSES_OK)
+			HTML.CreateHtmlTestRowQueue(self.Type, 'OK', 1, html_queue)
 		else:
 			mySSH.close()
 			logging.debug('Deployment went wrong')
-			HTML.CreateHtmlTestRow(self.Type, 'KO', CONST.INVALID_PARAMETER)
+			HTML.CreateHtmlTestRowQueue(self.Type, 'KO', 1, html_queue)
 
 	def UndeployEpc(self, HTML):
 		logging.debug('Trying to undeploy')
@@ -639,6 +718,13 @@ class EPCManagement():
 			mySSH.command('docker cp prod-magma-mme:/tmp/mme_check_run.pcap mme_' + self.testCase_id + '.pcap', '\$', 60)
 		else:
 			mySSH.command('docker cp prod-oai-mme:/tmp/mme_check_run.pcap mme_' + self.testCase_id + '.pcap', '\$', 60)
+		mySSH.command('tshark -r mme_' + self.testCase_id + '.pcap | egrep --colour=never "Tracking area update"', '\$', 60)
+		result = re.search('Tracking area update request', mySSH.getBefore())
+		if result is not None:
+			message = 'UE requested ' + str(mySSH.getBefore().count('Tracking area update request')) + 'Tracking area update request(s)'
+		else:
+			message = 'No Tracking area update request'
+		logging.debug(message)
 		mySSH.command('docker cp prod-oai-spgwc:/tmp/spgwc_check_run.pcap spgwc_' + self.testCase_id + '.pcap', '\$', 60)
 		mySSH.command('docker cp prod-oai-spgwu-tiny:/tmp/spgwu_check_run.pcap spgwu_' + self.testCase_id + '.pcap', '\$', 60)
 		# Remove all
@@ -665,12 +751,15 @@ class EPCManagement():
 		mySSH.command('docker inspect --format=\'{{.Name}}\' prod-oai-public-net prod-oai-private-net', '\$', 10)
 		noMoreNetworkNb = mySSH.getBefore().count('No such object')
 		mySSH.close()
+		html_queue = SimpleQueue()
+		html_cell = '<pre style="background-color:white">' + message + '</pre>'
+		html_queue.put(html_cell)
 		if noMoreContainerNb == nbContainers and noMoreNetworkNb == 2:
 			logging.debug('Undeployment OK')
-			HTML.CreateHtmlTestRow(self.Type, 'OK', CONST.ALL_PROCESSES_OK)
+			HTML.CreateHtmlTestRowQueue(self.Type, 'OK', 1, html_queue)
 		else:
 			logging.debug('Undeployment went wrong')
-			HTML.CreateHtmlTestRow(self.Type, 'KO', CONST.INVALID_PARAMETER)
+			HTML.CreateHtmlTestRowQueu(self.Type, 'KO', 1, html_queue)
 
 	def LogCollectHSS(self):
 		mySSH = SSH.SSHConnection()
@@ -689,6 +778,8 @@ class EPCManagement():
 				mySSH.command('docker cp ' + self.containerPrefix + '-oai-hss:/openair-hss/hss_check_run.log .', '\$', 60)
 				mySSH.command('docker cp ' + self.containerPrefix + '-oai-hss:/tmp/hss_check_run.pcap .', '\$', 60)
 				mySSH.command('zip hss.log.zip hss_check_run.*', '\$', 60)
+		elif re.match('OAICN5G', self.Type, re.IGNORECASE):
+			logging.debug('LogCollect is bypassed for that variant')
 		elif re.match('OAI', self.Type, re.IGNORECASE) or re.match('OAI-Rel14-CUPS', self.Type, re.IGNORECASE):
 			mySSH.command('zip hss.log.zip hss*.log', '\$', 60)
 			mySSH.command('echo ' + self.Password + ' | sudo -S rm hss*.log', '\$', 5)
@@ -719,6 +810,11 @@ class EPCManagement():
 				mySSH.command('docker cp ' + self.containerPrefix + '-oai-mme:/openair-mme/mme_check_run.log .', '\$', 60)
 				mySSH.command('docker cp ' + self.containerPrefix + '-oai-mme:/tmp/mme_check_run.pcap .', '\$', 60)
 				mySSH.command('zip mme.log.zip mme_check_run.*', '\$', 60)
+		elif re.match('OAICN5G', self.Type, re.IGNORECASE):
+			mySSH.command('cd ' + self.SourceCodePath + '/logs','\$', 5)
+			mySSH.command('cp -f /tmp/oai-cn5g.pcap .','\$', 30)
+			mySSH.command('zip mme.log.zip oai-amf.log oai-nrf.log oai-cn5g.pcap','\$', 30)
+			mySSH.command('mv mme.log.zip ' + self.SourceCodePath + '/scripts','\$', 30)
 		elif re.match('OAI', self.Type, re.IGNORECASE) or re.match('OAI-Rel14-CUPS', self.Type, re.IGNORECASE):
 			mySSH.command('zip mme.log.zip mme*.log', '\$', 60)
 			mySSH.command('echo ' + self.Password + ' | sudo -S rm mme*.log', '\$', 5)
@@ -748,6 +844,10 @@ class EPCManagement():
 				mySSH.command('docker cp ' + self.containerPrefix + '-oai-spgwc:/tmp/spgwc_check_run.pcap .', '\$', 60)
 				mySSH.command('docker cp ' + self.containerPrefix + '-oai-spgwu-tiny:/tmp/spgwu_check_run.pcap .', '\$', 60)
 				mySSH.command('zip spgw.log.zip spgw*_check_run.*', '\$', 60)
+		elif re.match('OAICN5G', self.Type, re.IGNORECASE):
+			mySSH.command('cd ' + self.SourceCodePath + '/logs','\$', 5)
+			mySSH.command('zip spgw.log.zip oai-smf.log oai-spgwu.log','\$', 30)
+			mySSH.command('mv spgw.log.zip ' + self.SourceCodePath + '/scripts','\$', 30)
 		elif re.match('OAI', self.Type, re.IGNORECASE) or re.match('OAI-Rel14-CUPS', self.Type, re.IGNORECASE):
 			mySSH.command('zip spgw.log.zip spgw*.log', '\$', 60)
 			mySSH.command('echo ' + self.Password + ' | sudo -S rm spgw*.log', '\$', 5)
@@ -757,17 +857,3 @@ class EPCManagement():
 		else:
 			logging.error('This option should not occur!')
 		mySSH.close()
-
-	def LogCollectOAICN5G(self):
-		mySSH = SSH.SSHConnection()
-		mySSH.open(self.IPAddress, self.UserName, self.Password)
-		logging.debug('OAI CN5G Collecting Log files to workspace')
-		mySSH.command('echo ' + self.Password + ' | sudo rm -rf ' + self.SourceCodePath + '/logs', '\$', 5)
-		mySSH.command('mkdir ' + self.SourceCodePath + '/logs','\$', 5)	
-		containers_list=['oai-smf','oai-spgwu','oai-amf','oai-nrf']
-		for c in containers_list:
-			mySSH.command('docker logs ' + c + ' > ' + self.SourceCodePath + '/logs/' + c + '.log', '\$', 5)
-		mySSH.command('cd ' + self.SourceCodePath + '/logs', '\$', 5)		
-		mySSH.command('zip oai-cn5g.log.zip *.log', '\$', 60)
-		mySSH.close()
-
diff --git a/ci-scripts/main.py b/ci-scripts/main.py
index 9923d515a47e7568fa333817ecde12408d0085fd..409647e216b7253d405522fe78111ebdf5c08c0b 100644
--- a/ci-scripts/main.py
+++ b/ci-scripts/main.py
@@ -410,6 +410,17 @@ def GetParametersFromXML(action):
 		if (string_field is not None):
 			CONTAINERS.cliOptions = string_field
 
+	elif action == 'Copy_Image_to_Test':
+		string_field = test.findtext('image_name')
+		if (string_field is not None):
+			CONTAINERS.imageToCopy = string_field
+		string_field = test.findtext('registry_svr_id')
+		if (string_field is not None):
+			CONTAINERS.registrySvrId = string_field
+		string_field = test.findtext('test_svr_id')
+		if (string_field is not None):
+			CONTAINERS.testSvrId = string_field
+
 	else: # ie action == 'Run_PhySim':
 		ldpc.runargs = test.findtext('physim_run_args')
 		
@@ -734,6 +745,22 @@ elif re.match('^TesteNB$', mode, re.IGNORECASE) or re.match('^TestUE$', mode, re
 	HTML.SethtmlUEConnected(len(CiTestObj.UEDevices) + len(CiTestObj.CatMDevices))
 	HTML.CreateHtmlTabHeader()
 
+	# On CI bench w/ containers, we need to validate if IP routes are set
+	if EPC.IPAddress == '192.168.18.210':
+		CONTAINERS.CheckAndAddRoute('porcepix', EPC.IPAddress, EPC.UserName, EPC.Password)
+	if CONTAINERS.eNBIPAddress == '192.168.18.194':
+		CONTAINERS.CheckAndAddRoute('asterix', CONTAINERS.eNBIPAddress, CONTAINERS.eNBUserName, CONTAINERS.eNBPassword)
+	if CONTAINERS.eNB1IPAddress == '192.168.18.194':
+		CONTAINERS.CheckAndAddRoute('asterix', CONTAINERS.eNB1IPAddress, CONTAINERS.eNB1UserName, CONTAINERS.eNB1Password)
+	if CONTAINERS.eNBIPAddress == '192.168.18.193':
+		CONTAINERS.CheckAndAddRoute('obelix', CONTAINERS.eNBIPAddress, CONTAINERS.eNBUserName, CONTAINERS.eNBPassword)
+	if CONTAINERS.eNB1IPAddress == '192.168.18.193':
+		CONTAINERS.CheckAndAddRoute('obelix', CONTAINERS.eNB1IPAddress, CONTAINERS.eNB1UserName, CONTAINERS.eNB1Password)
+	if CONTAINERS.eNBIPAddress == '192.168.18.209':
+		CONTAINERS.CheckAndAddRoute('nepes', CONTAINERS.eNBIPAddress, CONTAINERS.eNBUserName, CONTAINERS.eNBPassword)
+	if CONTAINERS.eNB1IPAddress == '192.168.18.209':
+		CONTAINERS.CheckAndAddRoute('nepes', CONTAINERS.eNB1IPAddress, CONTAINERS.eNB1UserName, CONTAINERS.eNB1Password)
+
 	CiTestObj.FailReportCnt = 0
 	RAN.prematureExit=True
 	HTML.startTime=int(round(time.time() * 1000))
@@ -853,6 +880,8 @@ elif re.match('^TesteNB$', mode, re.IGNORECASE) or re.match('^TestUE$', mode, re
 					HTML=ldpc.Run_PhySim(HTML,CONST,id)
 				elif action == 'Build_Image':
 					CONTAINERS.BuildImage(HTML)
+				elif action == 'Copy_Image_to_Test':
+					CONTAINERS.Copy_Image_to_Test_Server(HTML)
 				elif action == 'Deploy_Object':
 					CONTAINERS.DeployObject(HTML, EPC)
 				elif action == 'Undeploy_Object':
diff --git a/ci-scripts/ran.py b/ci-scripts/ran.py
index 9acb1de34e3b834b6e6094a8f601d78a3f12d8b8..74f13d031b8d57c3107c1eaccac444359b87e214 100644
--- a/ci-scripts/ran.py
+++ b/ci-scripts/ran.py
@@ -165,16 +165,16 @@ class RANManagement():
 			result = re.search('LAST_BUILD_INFO', mySSH.getBefore())
 			if result is not None:
 				mismatch = False
-				mySSH.command('grep SRC_COMMIT LAST_BUILD_INFO.txt', '\$', 2)
+				mySSH.command('grep --colour=never SRC_COMMIT LAST_BUILD_INFO.txt', '\$', 2)
 				result = re.search(self.ranCommitID, mySSH.getBefore())
 				if result is None:
 					mismatch = True
-				mySSH.command('grep MERGED_W_TGT_BRANCH LAST_BUILD_INFO.txt', '\$', 2)
+				mySSH.command('grep --colour=never MERGED_W_TGT_BRANCH LAST_BUILD_INFO.txt', '\$', 2)
 				if (self.ranAllowMerge):
 					result = re.search('YES', mySSH.getBefore())
 					if result is None:
 						mismatch = True
-					mySSH.command('grep TGT_BRANCH LAST_BUILD_INFO.txt', '\$', 2)
+					mySSH.command('grep --colour=never TGT_BRANCH LAST_BUILD_INFO.txt', '\$', 2)
 					if self.ranTargetBranch == '':
 						result = re.search('develop', mySSH.getBefore())
 					else:
@@ -423,13 +423,13 @@ class RANManagement():
 		# do not reset board twice in IF4.5 case
 		result = re.search('^rru|^enb|^du.band', str(config_file))
 		if result is not None:
-			mySSH.command('echo ' + lPassWord + ' | sudo -S uhd_find_devices', '\$', 90)
+			mySSH.command('echo ' + lPassWord + ' | sudo -S uhd_find_devices', '\$', 180)
 			result = re.search('type: b200', mySSH.getBefore())
 			if result is not None:
 				logging.debug('Found a B2xx device --> resetting it')
 				mySSH.command('echo ' + lPassWord + ' | sudo -S b2xx_fx3_utils --reset-device', '\$', 10)
 				# Reloading FGPA bin firmware
-				mySSH.command('echo ' + lPassWord + ' | sudo -S uhd_find_devices', '\$', 90)
+				mySSH.command('echo ' + lPassWord + ' | sudo -S uhd_find_devices', '\$', 180)
 		# Make a copy and adapt to EPC / eNB IP addresses
 		mySSH.command('cp ' + full_config_file + ' ' + ci_full_config_file, '\$', 5)
 		localMmeIpAddr = EPC.MmeIPAddress
@@ -446,7 +446,7 @@ class RANManagement():
 		else:
 			mySSH.command('sed -i -e \'s/FLEXRAN_ENABLED.*;/FLEXRAN_ENABLED        = "no";/\' ' + ci_full_config_file, '\$', 2);
 		self.eNBmbmsEnables[int(self.eNB_instance)] = False
-		mySSH.command('grep enable_enb_m2 ' + ci_full_config_file, '\$', 2);
+		mySSH.command('grep --colour=never enable_enb_m2 ' + ci_full_config_file, '\$', 2);
 		result = re.search('yes', mySSH.getBefore())
 		if result is not None:
 			self.eNBmbmsEnables[int(self.eNB_instance)] = True
@@ -593,8 +593,12 @@ class RANManagement():
 				lPassWord = self.eNBPassword
 			mySSH = SSH.SSHConnection()
 			mySSH.open(lIpAddr, lUserName, lPassWord)
-			mySSH.command('stdbuf -o0 ps -aux | grep --color=never ' + self.air_interface[self.eNB_instance] + ' | grep -v grep', '\$', 5)
-			result = re.search(self.air_interface[self.eNB_instance], mySSH.getBefore())
+			if self.air_interface[self.eNB_instance] == '':
+				pattern = 'softmodem'
+			else:
+				pattern = self.air_interface[self.eNB_instance]
+			mySSH.command('stdbuf -o0 ps -aux | grep --color=never ' + pattern + ' | grep -v grep', '\$', 5)
+			result = re.search(pattern, mySSH.getBefore())
 			if result is None:
 				logging.debug('\u001B[1;37;41m eNB Process Not Found! \u001B[0m')
 				status_queue.put(CONST.ENB_PROCESS_FAILED)
@@ -734,8 +738,8 @@ class RANManagement():
 		mySSH.command('echo ' + self.eNBPassword + ' | sudo -S mv /tmp/enb_*.pcap .','\$',20)
 		mySSH.command('echo ' + self.eNBPassword + ' | sudo -S mv /tmp/gnb_*.pcap .','\$',20)
 		mySSH.command('echo ' + self.eNBPassword + ' | sudo -S rm -f enb.log.zip', '\$', 5)
-		mySSH.command('echo ' + self.eNBPassword + ' | sudo -S zip enb.log.zip enb*.log core* enb_*record.raw enb_*.pcap gnb_*.pcap enb_*txt physim_*.log *stats.log *monitor.pickle *monitor.png', '\$', 60)
-		mySSH.command('echo ' + self.eNBPassword + ' | sudo -S rm enb*.log core* enb_*record.raw enb_*.pcap gnb_*.pcap enb_*txt physim_*.log *stats.log *.pickle *.png', '\$', 5)
+		mySSH.command('echo ' + self.eNBPassword + ' | sudo -S zip enb.log.zip enb*.log core* enb_*record.raw enb_*.pcap gnb_*.pcap enb_*txt physim_*.log *stats.log *monitor.pickle *monitor*.png', '\$', 60)
+		mySSH.command('echo ' + self.eNBPassword + ' | sudo -S rm enb*.log core* enb_*record.raw enb_*.pcap gnb_*.pcap enb_*txt physim_*.log *stats.log *monitor.pickle *monitor*.png', '\$', 5)
 		mySSH.close()
 
 	def AnalyzeLogFile_eNB(self, eNBlogFile, HTML):
@@ -792,23 +796,11 @@ class RANManagement():
 		pb_receiving_samples_cnt = 0
 		#count "removing UE" msg
 		removing_ue = 0
+		#count"X2AP-PDU"
+		x2ap_pdu = 0
 		#NSA specific log markers
 		nsa_markers ={'SgNBReleaseRequestAcknowledge': [],'FAILURE': [], 'scgFailureInformationNR-r15': [], 'SgNBReleaseRequest': []}
 	
-		#the datalog config file has to be loaded
-		datalog_rt_stats_file='datalog_rt_stats.yaml'
-		if (os.path.isfile(datalog_rt_stats_file)):
-			yaml_file=datalog_rt_stats_file
-		elif (os.path.isfile('ci-scripts/'+datalog_rt_stats_file)):
-			yaml_file='ci-scripts/'+datalog_rt_stats_file
-		else:
-			logging.error("Datalog RT stats yaml file cannot be found")
-			sys.exit("Datalog RT stats yaml file cannot be found")
-
-		with open(yaml_file,'r') as f:
-			datalog_rt_stats = yaml.load(f,Loader=yaml.FullLoader)
-		rt_keys = datalog_rt_stats['Ref'] #we use the keys from the Ref field  
-
 		line_cnt=0 #log file line counter
 		for line in enb_log_file.readlines():
 			line_cnt+=1
@@ -975,15 +967,7 @@ class RANManagement():
 				if result is not None:
 					#remove 1- all useless char before relevant info (ulsch or dlsch) 2- trailing char
 					dlsch_ulsch_stats[k]=re.sub(r'^.*\]\s+', r'' , line.rstrip())
-			#real time statistics for gNB
-			for k in rt_keys:
-				result = re.search(k, line)     
-				if result is not None:
-					#remove 1- all useless char before relevant info  2- trailing char
-					line=line.replace('[0m','')
-					tmp=re.match(rf'^.*?(\b{k}\b.*)',line.rstrip()) #from python 3.6 we can use literal string interpolation for the variable k, using rf' in the regex
-					if tmp!=None: #with ULULULUULULULLLL at the head of the line, we skip it
-						real_time_stats[k]=tmp.group(1)
+
 
 			#count "problem receiving samples" msg
 			result = re.search('\[PHY\]\s+problem receiving samples', str(line))
@@ -993,7 +977,10 @@ class RANManagement():
 			result = re.search('\[MAC\]\s+Removing UE', str(line))
 			if result is not None:
 				removing_ue += 1
-
+			#count "X2AP-PDU"
+			result = re.search('X2AP-PDU', str(line))
+			if result is not None:
+				x2ap_pdu += 1
 			#nsa markers logging
 			for k in nsa_markers:
 				result = re.search(k, line)
@@ -1001,7 +988,55 @@ class RANManagement():
 					nsa_markers[k].append(line_cnt)					
 
 		enb_log_file.close()
-		logging.debug('   File analysis completed')
+
+
+		#the following part takes the *_stats.log files as source (not the stdout log file)
+
+		#the datalog config file has to be loaded
+		datalog_rt_stats_file='datalog_rt_stats.yaml'
+		if (os.path.isfile(datalog_rt_stats_file)):
+			yaml_file=datalog_rt_stats_file
+		elif (os.path.isfile('ci-scripts/'+datalog_rt_stats_file)):
+			yaml_file='ci-scripts/'+datalog_rt_stats_file
+		else:
+			logging.error("Datalog RT stats yaml file cannot be found")
+			sys.exit("Datalog RT stats yaml file cannot be found")
+
+		with open(yaml_file,'r') as f:
+			datalog_rt_stats = yaml.load(f,Loader=yaml.FullLoader)
+		rt_keys = datalog_rt_stats['Ref'] #we use the keys from the Ref field  
+
+		if (os.path.isfile('./nrL1_stats.log')) and (os.path.isfile('./nrL1_stats.log')):
+			stat_files_present=True
+		else:
+			stat_files_present=False
+			logging.debug("NR Stats files for RT analysis not found")
+		if stat_files_present:
+			nrL1_stats = open('./nrL1_stats.log', 'r')
+			nrMAC_stats = open('./nrMAC_stats.log', 'r')
+			for line in nrL1_stats.readlines():
+				for k in rt_keys:
+					result = re.search(k, line)     
+					if result is not None:
+						#remove 1- all useless char before relevant info  2- trailing char
+						tmp=re.match(rf'^.*?(\b{k}\b.*)',line.rstrip()) #from python 3.6 we can use literal string interpolation for the variable k, using rf' in the regex
+						if tmp!=None: 
+							real_time_stats[k]=tmp.group(1)
+			for line in nrMAC_stats.readlines():
+				for k in rt_keys:
+					result = re.search(k, line)     
+					if result is not None:
+						#remove 1- all useless char before relevant info  2- trailing char
+						tmp=re.match(rf'^.*?(\b{k}\b.*)',line.rstrip()) #from python 3.6 we can use literal string interpolation for the variable k, using rf' in the regex
+						if tmp!=None: 
+							real_time_stats[k]=tmp.group(1)
+			nrL1_stats.close()
+			nrMAC_stats.close()
+
+		#stdout log file and stat log files analysis completed
+		logging.debug('   File analysis (stdout, stats) completed')
+
+		#post processing depending on the node type
 		if (self.air_interface[self.eNB_instance] == 'lte-softmodem') or (self.air_interface[self.eNB_instance] == 'ocp-enb'):
 			nodeB_prefix = 'e'
 		else:
@@ -1087,6 +1122,11 @@ class RANManagement():
 			htmlMsg = statMsg+'\n'
 			logging.debug(statMsg)
 			htmleNBFailureMsg += htmlMsg
+			#X2AP-PDU log
+			statMsg = 'X2AP-PDU msg count =  '+str(x2ap_pdu)
+			htmlMsg = statMsg+'\n'
+			logging.debug(statMsg)
+			htmleNBFailureMsg += htmlMsg
 			#nsa markers
 			statMsg = 'logfile line count = ' + str(line_cnt)			
 			htmlMsg = statMsg+'\n'
diff --git a/ci-scripts/sshconnection.py b/ci-scripts/sshconnection.py
index b85c40a0bea56e7b8ce2bf4b7d7a3ddf47f55ce1..65e9c961d0a00a00e5a0f48cdc719aeac4bca98d 100644
--- a/ci-scripts/sshconnection.py
+++ b/ci-scripts/sshconnection.py
@@ -60,7 +60,8 @@ class SSHConnection():
 		connect_status = False
 		while count < 4:
 			self.ssh = pexpect.spawn('ssh -o PubkeyAuthentication=no {}@{}'.format(username,ipaddress))
-			self.ssh.timeout = 5
+			# Longer timeout at connection due to asterix slowness
+			self.ssh.timeout = 25
 			self.sshresponse = self.ssh.expect(['Are you sure you want to continue connecting (yes/no)?', 'password:', 'Last login', pexpect.EOF, pexpect.TIMEOUT])
 			if self.sshresponse == 0:
 				self.ssh.sendline('yes')
diff --git a/ci-scripts/stats_monitor.py b/ci-scripts/stats_monitor.py
index 877194de5944bf6932aa239e6942bbed9f2b2479..933bf13203c7fe577d68f52b7438f7087683beb0 100755
--- a/ci-scripts/stats_monitor.py
+++ b/ci-scripts/stats_monitor.py
@@ -17,9 +17,15 @@ class StatMonitor():
     def __init__(self,cfg_file):
         with open(cfg_file,'r') as file:
             self.d = yaml.load(file)
-        for node in self.d:
-            for metric in self.d[node]:
-                self.d[node][metric]=[]
+        for node in self.d:#so far we have enb or gnb as nodes
+            for metric_l1 in self.d[node]: #first level of metric keys
+                if metric_l1!="graph": #graph is a reserved word to configure graph paging, so it is disregarded
+                    if self.d[node][metric_l1] is None:#first level is None -> create array
+                        self.d[node][metric_l1]=[]
+                    else: #first level is not None -> there is a second level -> create array
+                        for metric_l2 in self.d[node][metric_l1]:
+                            self.d[node][metric_l1][metric_l2]=[]                
+
 
 
     def process_gnb (self,node_type,output):
@@ -36,6 +42,11 @@ class StatMonitor():
                 percentage=float(result.group(2))/float(result.group(1))
                 self.d[node_type]['ulsch_err_perc_round_1'].append(percentage)
 
+            for k in self.d[node_type]['rt']:
+                result=re.match(rf'^.*\b{k}\b:\s+([0-9\.]+) us;\s+([0-9]+);\s+([0-9\.]+) us;',tmp)
+                if result is not None:
+                    self.d[node_type]['rt'][k].append(float(result.group(3)))
+
 
     def process_enb (self,node_type,output):
         for line in output:
@@ -62,23 +73,37 @@ class StatMonitor():
 
 
     def graph(self,node_type):
-        col = 1
-        figure, axis = plt.subplots(len(self.d[node_type]), col ,figsize=(10, 10))
-        i=0
-        for metric in self.d[node_type]:
-            major_ticks = np.arange(0, len(self.d[node_type][metric])+1, 1)
-            axis[i].set_xticks(major_ticks)
-            axis[i].set_xticklabels([])
-            axis[i].plot(self.d[node_type][metric],marker='o')
-            axis[i].set_xlabel('time')
-            axis[i].set_ylabel(metric)
-            axis[i].set_title(metric)
-            i+=1
-
-        plt.tight_layout()
-        # Combine all the operations and display
-        plt.savefig(node_type+'_stats_monitor.png')
-        plt.show()
+        for page in self.d[node_type]['graph']:#work out a set a graphs per page
+            col = 1
+            figure, axis = plt.subplots(len(self.d[node_type]['graph'][page]), col ,figsize=(10, 10))
+            i=0
+            for m in self.d[node_type]['graph'][page]:#metric may refer to 1 level or 2 levels 
+                metric_path=m.split('.')
+                if len(metric_path)==1:#1 level
+                    metric_l1=metric_path[0]
+                    major_ticks = np.arange(0, len(self.d[node_type][metric_l1])+1, 1)
+                    axis[i].set_xticks(major_ticks)
+                    axis[i].set_xticklabels([])
+                    axis[i].plot(self.d[node_type][metric_l1],marker='o')
+                    axis[i].set_xlabel('time')
+                    axis[i].set_ylabel(metric_l1)
+                    axis[i].set_title(metric_l1)
+                    
+                else:#2 levels
+                    metric_l1=metric_path[0]
+                    metric_l2=metric_path[1]
+                    major_ticks = np.arange(0, len(self.d[node_type][metric_l1][metric_l2])+1, 1)
+                    axis[i].set_xticks(major_ticks)
+                    axis[i].set_xticklabels([])
+                    axis[i].plot(self.d[node_type][metric_l1][metric_l2],marker='o')
+                    axis[i].set_xlabel('time')
+                    axis[i].set_ylabel(metric_l2)
+                    axis[i].set_title(metric_l2)
+                i+=1                
+
+            plt.tight_layout()
+            #save as png
+            plt.savefig(node_type+'_stats_monitor_'+page+'.png')
 
 
 if __name__ == "__main__":
@@ -88,7 +113,7 @@ if __name__ == "__main__":
     mon=StatMonitor(cfg_filename)
 
     #collecting stats when modem process is stopped
-    CMD='ps aux | grep mode | grep -v grep'
+    CMD='ps aux | grep modem | grep -v grep'
     process=subprocess.Popen(CMD, shell=True, stdout=subprocess.PIPE)
     output = process.stdout.readlines()
     while len(output)!=0 :
diff --git a/ci-scripts/stats_monitor_conf.yaml b/ci-scripts/stats_monitor_conf.yaml
index 6c0a2b0225f22130043a3344fd8f1ceb3b8e866b..8760c067fccf2cd7cf7f8a7f10cae9547d863511 100644
--- a/ci-scripts/stats_monitor_conf.yaml
+++ b/ci-scripts/stats_monitor_conf.yaml
@@ -2,10 +2,50 @@ enb :
   PHR:
   bler:
   mcsoff:
-  mcs:  
+  mcs:
+  graph:
+    page1:
+      PHR:
+      bler:
+      mcsoff:
+      mcs:      
 
 gnb :
   dlsch_err:
   dlsch_err_perc_round_1:
   ulsch_err:
-  ulsch_err_perc_round_1:
\ No newline at end of file
+  ulsch_err_perc_round_1:
+  rt :
+    feprx:
+    feptx_prec:
+    feptx_ofdm:
+    feptx_total:
+    L1 Tx processing thread 0:
+    L1 Tx processing thread 1:
+    DLSCH encoding:
+    L1 Rx processing:
+    PUSCH inner-receiver:
+    PUSCH decoding:
+    DL & UL scheduling timing stats:
+    UL Indication:
+  graph : 
+    page1:
+      dlsch_err:
+      dlsch_err_perc_round_1:
+      ulsch_err:
+      ulsch_err_perc_round_1:
+    page2:    
+      rt.feprx:
+      rt.feptx_prec:
+      rt.feptx_ofdm:
+      rt.feptx_total:
+    page3:
+      rt.L1 Tx processing thread 0:
+      rt.L1 Tx processing thread 1:
+      rt.DLSCH encoding:
+      rt.L1 Rx processing:
+    page4:
+      rt.PUSCH inner-receiver:
+      rt.PUSCH decoding:
+      rt.DL & UL scheduling timing stats:
+      rt.UL Indication:
\ No newline at end of file
diff --git a/ci-scripts/xml_class_list.yml b/ci-scripts/xml_class_list.yml
index d8e1b26a215ddabafbea38471a0c6f0b34e71f86..9e33ee468de70d1ef0eb6b1484e02a6f539f5204 100755
--- a/ci-scripts/xml_class_list.yml
+++ b/ci-scripts/xml_class_list.yml
@@ -37,6 +37,7 @@
   - IdleSleep
   - Perform_X2_Handover
   - Build_Image
+  - Copy_Image_to_Test
   - Deploy_Object
   - Undeploy_Object
   - Cppcheck_Analysis
diff --git a/ci-scripts/xml_files/container_nsa_b200_quectel.xml b/ci-scripts/xml_files/container_nsa_b200_quectel.xml
new file mode 100644
index 0000000000000000000000000000000000000000..76610c2656200688b1d2b84cec23ccea525e9e5d
--- /dev/null
+++ b/ci-scripts/xml_files/container_nsa_b200_quectel.xml
@@ -0,0 +1,153 @@
+<!--
+
+ Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The OpenAirInterface Software Alliance licenses this file to You under
+ the OAI Public License, Version 1.1  (the "License"); you may not use this file
+ except in compliance with the License.
+ You may obtain a copy of the License at
+
+      http://www.openairinterface.org/?page_id=698
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ For more information about the OpenAirInterface (OAI) Software Alliance:
+      contact@openairinterface.org
+
+-->
+<testCaseList>
+	<htmlTabRef>TEST-NSA-FR1-TM1-B200</htmlTabRef>
+	<htmlTabName>NSA SanityCheck with QUECTEL</htmlTabName>
+	<htmlTabIcon>tasks</htmlTabIcon>
+	<repeatCount>1</repeatCount>
+	<TestCaseRequestedList>
+ 000001
+ 010002
+ 030000
+ 030101
+ 000001
+ 030102
+ 000001
+ 010000
+ 000001
+ 050000
+ 050001
+ 070000
+ 070001
+ 010002
+ 000001
+ 030202
+ 030201
+	</TestCaseRequestedList>
+	<TestCaseExclusionList></TestCaseExclusionList>
+
+	<testCase id="010000">
+		<class>Initialize_UE</class>
+		<desc>Initialize Quectel</desc>
+		<id>idefix</id>
+		<UE_Trace>yes</UE_Trace>
+	</testCase>
+
+
+	<testCase id="010002">
+		<class>Terminate_UE</class>
+		<desc>Terminate Quectel</desc>
+		<id>idefix</id>
+	</testCase>
+
+	<testCase id="030000">
+		<class>Copy_Image_to_Test</class>
+		<desc>Copy gNB image to test server</desc>
+		<image_name>oai-gnb</image_name>
+		<registry_svr_id>0</registry_svr_id>
+		<test_svr_id>1</test_svr_id>
+	</testCase>
+
+	<testCase id="030101">
+		<class>Deploy_Object</class>
+		<desc>Deploy eNB (FDD/Band7/5MHz/B200) in a container</desc>
+		<yaml_path>ci-scripts/yaml_files/nsa_b200_enb</yaml_path>
+		<eNB_instance>0</eNB_instance>
+		<eNB_serverId>0</eNB_serverId>
+	</testCase>
+
+	<testCase id="030102">
+		<class>Deploy_Object</class>
+		<desc>Deploy gNB (TDD/Band78/40MHz/B200) in a container</desc>
+		<yaml_path>ci-scripts/yaml_files/nsa_b200_gnb</yaml_path>
+		<eNB_instance>1</eNB_instance>
+		<eNB_serverId>1</eNB_serverId>
+	</testCase>
+
+	<testCase id="000001">
+		<class>IdleSleep</class>
+		<desc>Sleep</desc>
+		<idle_sleep_time_in_sec>5</idle_sleep_time_in_sec>
+	</testCase>
+
+	<testCase id="000002">
+		<class>IdleSleep</class>
+		<desc>Sleep</desc>
+		<idle_sleep_time_in_sec>20</idle_sleep_time_in_sec>
+	</testCase>
+
+
+	<testCase id="050000">
+		<class>Ping</class>
+		<desc>Ping: 20pings in 20sec</desc>
+		<id>idefix</id>
+		<ping_args>-c 20</ping_args>
+		<ping_packetloss_threshold>1</ping_packetloss_threshold>
+	</testCase>
+
+	<testCase id="050001">
+		<class>Ping</class>
+		<desc>Ping: 100pings in 20sec</desc>
+		<id>idefix</id>
+		<ping_args>-c 100 -i 0.2</ping_args>
+		<ping_packetloss_threshold>1</ping_packetloss_threshold>
+	</testCase>
+
+	<testCase id="070000">
+		<class>Iperf</class>
+		<desc>iperf (DL/20Mbps/UDP)(60 sec)(single-ue profile)</desc>
+		<iperf_args>-u -b 20M -t 60</iperf_args>
+		<direction>DL</direction>
+		<id>idefix</id>
+		<iperf_packetloss_threshold>3</iperf_packetloss_threshold>
+		<iperf_profile>single-ue</iperf_profile>
+	</testCase>
+
+	<testCase id="070001">
+		<class>Iperf</class>
+		<desc>iperf (UL/2Mbps/UDP)(60 sec)(single-ue profile)</desc>
+		<iperf_args>-u -b 2M -t 60</iperf_args>
+		<direction>UL</direction>
+		<id>idefix</id>
+		<iperf_packetloss_threshold>1</iperf_packetloss_threshold>
+		<iperf_profile>single-ue</iperf_profile>
+	</testCase>
+
+    <testCase id="030201">
+        <class>Undeploy_Object</class>
+        <desc>Undeploy eNB</desc>
+		<yaml_path>ci-scripts/yaml_files/nsa_b200_enb</yaml_path>
+        <eNB_instance>0</eNB_instance>
+        <eNB_serverId>0</eNB_serverId>
+    </testCase>
+
+    <testCase id="030202">
+        <class>Undeploy_Object</class>
+        <desc>Undeploy gNB</desc>
+		<yaml_path>ci-scripts/yaml_files/nsa_b200_gnb</yaml_path>
+        <eNB_instance>1</eNB_instance>
+        <eNB_serverId>1</eNB_serverId>
+    </testCase>
+
+</testCaseList>
+
diff --git a/ci-scripts/xml_files/fr1_nsa_2x2_quectel.xml b/ci-scripts/xml_files/fr1_nsa_2x2_quectel.xml
index 16dfd1e377ba0416ba1ee8fb2b722b9151b006c3..bdca76379d02cf670c50930b4ae0bf3cc761b944 100644
--- a/ci-scripts/xml_files/fr1_nsa_2x2_quectel.xml
+++ b/ci-scripts/xml_files/fr1_nsa_2x2_quectel.xml
@@ -113,8 +113,8 @@
 
 	<testCase id="070000">
 		<class>Iperf</class>
-		<desc>iperf (DL/20Mbps/UDP)(60 sec)(single-ue profile)</desc>
-		<iperf_args>-u -b 20M -t 60</iperf_args>
+		<desc>iperf (DL/60Mbps/UDP)(60 sec)(single-ue profile)</desc>
+		<iperf_args>-u -b 60M -t 60</iperf_args>
 		<direction>DL</direction>
 		<id>nrmodule2_quectel</id>
 		<iperf_packetloss_threshold>5</iperf_packetloss_threshold>
diff --git a/ci-scripts/xml_files/fr1_nsa_quectel.xml b/ci-scripts/xml_files/fr1_nsa_quectel.xml
index 6c11a66d912bd29a94075f7db2819a4a5185ed4a..2d0ff53525dcbe313e9b49296fdac105878f164d 100644
--- a/ci-scripts/xml_files/fr1_nsa_quectel.xml
+++ b/ci-scripts/xml_files/fr1_nsa_quectel.xml
@@ -110,8 +110,8 @@
 
 	<testCase id="070000">
 		<class>Iperf</class>
-		<desc>iperf (DL/20Mbps/UDP)(60 sec)(single-ue profile)</desc>
-		<iperf_args>-u -b 20M -t 60</iperf_args>
+		<desc>iperf (DL/60Mbps/UDP)(60 sec)(single-ue profile)</desc>
+		<iperf_args>-u -b 60M -t 60</iperf_args>
 		<direction>DL</direction>
 		<id>idefix</id>
 		<iperf_packetloss_threshold>3</iperf_packetloss_threshold>
diff --git a/ci-scripts/xml_files/fr1_sa_quectel.xml b/ci-scripts/xml_files/fr1_sa_quectel.xml
index b401465f80960d9dc9358632f9f06ef90f4ea294..f4473710cfbc8fe2f963e045fb4415580403d7e2 100644
--- a/ci-scripts/xml_files/fr1_sa_quectel.xml
+++ b/ci-scripts/xml_files/fr1_sa_quectel.xml
@@ -98,8 +98,8 @@
 
 	<testCase id="070000">
 		<class>Iperf</class>
-		<desc>iperf (DL/20Mbps/UDP)(60 sec)(single-ue profile)</desc>
-		<iperf_args>-u -b 20M -t 60</iperf_args>
+		<desc>iperf (DL/60Mbps/UDP)(60 sec)(single-ue profile)</desc>
+		<iperf_args>-u -b 60M -t 60</iperf_args>
 		<direction>DL</direction>
 		<id>nrmodule2_quectel</id>
 		<iperf_packetloss_threshold>5</iperf_packetloss_threshold>
diff --git a/ci-scripts/yaml_files/fr1_epc_20897/docker-compose.yml b/ci-scripts/yaml_files/fr1_epc_20897/docker-compose.yml
index 0005ce8844ad3c5e5aa696aadf63ed4a43b76515..a3675dc78e10366cf72c794a7832e2701082f119 100644
--- a/ci-scripts/yaml_files/fr1_epc_20897/docker-compose.yml
+++ b/ci-scripts/yaml_files/fr1_epc_20897/docker-compose.yml
@@ -111,6 +111,9 @@ services:
             TAC_LB_SGW_TEST_0: '03'
             TAC_HB_SGW_TEST_0: '00'
             SGW_IPV4_ADDRESS_FOR_S11_TEST_0: 0.0.0.0
+        volumes:
+            - ./mme.conf:/openair-mme/etc/mme.conf
+            - ./entrypoint.sh:/openair-mme/bin/entrypoint.sh
         healthcheck:
             test: /bin/bash -c "pgrep oai_mme"
             interval: 10s
diff --git a/ci-scripts/yaml_files/fr1_epc_20897/entrypoint.sh b/ci-scripts/yaml_files/fr1_epc_20897/entrypoint.sh
new file mode 100755
index 0000000000000000000000000000000000000000..27c6c401a62f0b77253f77a182d579fd983834b9
--- /dev/null
+++ b/ci-scripts/yaml_files/fr1_epc_20897/entrypoint.sh
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+set -euo pipefail
+
+# First see if all interfaces are up
+ifconfig
+
+# S10 might be on loopback --> needs bring-up
+if [[ "$MME_INTERFACE_NAME_FOR_S10" == *"lo:"* ]]
+then
+    ifconfig ${MME_INTERFACE_NAME_FOR_S10} ${MME_IPV4_ADDRESS_FOR_S10} up
+fi
+
+LIST_OF_NETWORKS=`ifconfig -s | egrep -v "^Iface|^lo" | cut -d' ' -f1`
+
+for if_name in $LIST_OF_NETWORKS
+do
+    IF_IP_ADDR=`ifconfig $if_name | grep inet | sed -e "s# *inet#inet#" | cut -d' ' -f2`
+    if [[ "${IF_IP_ADDR}" == "${MME_IPV4_ADDRESS_FOR_S1_MME}" ]]; then
+        echo "S1C is on $if_name"
+	MME_INTERFACE_NAME_FOR_S1_MME=$if_name
+    fi
+    if [[ "${IF_IP_ADDR}" == "${MME_IPV4_ADDRESS_FOR_S10}" ]]; then
+        echo "S10 is on $if_name"
+	MME_INTERFACE_NAME_FOR_S10=$if_name
+    fi
+    if [[ "${IF_IP_ADDR}" == "${MME_IPV4_ADDRESS_FOR_S11}" ]]; then
+        echo "S11 is on $if_name"
+	MME_INTERFACE_NAME_FOR_S11=$if_name
+    fi
+done
+
+CONFIG_DIR="/openair-mme/etc"
+
+for c in ${CONFIG_DIR}/mme_fd.conf; do
+    #echo "entrypoint.sh process config file $c"
+    sed -i -e "s#@TAC-LB#@TAC_LB#" -e "s#TAC-HB_#TAC_HB_#" ${c}
+    # grep variable names (format: ${VAR}) from template to be rendered
+    VARS=$(grep -oP '@[a-zA-Z0-9_]+@' ${c} | sort | uniq | xargs)
+    #echo "entrypoint.sh process vars $VARS"
+
+    # create sed expressions for substituting each occurrence of ${VAR}
+    # with the value of the environment variable "VAR"
+    EXPRESSIONS=""
+    for v in ${VARS}; do
+        #echo "var is $v"
+        NEW_VAR=`echo $v | sed -e "s#@##g"`
+        #echo "NEW_VAR is $NEW_VAR"
+        if [[ "${!NEW_VAR}x" == "x" ]]; then
+            echo "Error: Environment variable '${NEW_VAR}' is not set." \
+                "Config file '$(basename $c)' requires all of $VARS."
+            exit 1
+        fi
+        # Some fields require CIDR format
+        if [[ "${NEW_VAR}" == "MME_IPV4_ADDRESS_FOR_S1_MME" ]] || \
+           [[ "${NEW_VAR}" == "MME_IPV4_ADDRESS_FOR_S11" ]] || \
+           [[ "${NEW_VAR}" == "MME_IPV4_ADDRESS_FOR_S10" ]]; then
+            EXPRESSIONS="${EXPRESSIONS};s|${v}|${!NEW_VAR}/24|g"
+        else
+            EXPRESSIONS="${EXPRESSIONS};s|${v}|${!NEW_VAR}|g"
+        fi
+    done
+    EXPRESSIONS="${EXPRESSIONS#';'}"
+
+    # render template and inline replace config file
+    sed -i "${EXPRESSIONS}" ${c}
+done
+
+pushd /openair-mme/scripts
+./check_mme_s6a_certificate ${PREFIX} ${MME_FQDN}
+popd
+
+exec "$@"
diff --git a/ci-scripts/yaml_files/fr1_epc_20897/mme.conf b/ci-scripts/yaml_files/fr1_epc_20897/mme.conf
new file mode 100644
index 0000000000000000000000000000000000000000..9dcb44ef2a892c6702780c56ffb3a2aba4e9a658
--- /dev/null
+++ b/ci-scripts/yaml_files/fr1_epc_20897/mme.conf
@@ -0,0 +1,133 @@
+MME : 
+{
+    REALM                                     = "openairinterface.org";                      # YOUR REALM HERE
+    INSTANCE                                  = 1;                     # 0 is the default
+    PID_DIRECTORY                             = "/var/run";              # /var/run is the default
+    MAX_S1_ENB                                = 64;
+    MAX_UE                                    = 4096;
+    RELATIVE_CAPACITY                         = 10;
+    EMERGENCY_ATTACH_SUPPORTED                     = "no";
+    UNAUTHENTICATED_IMSI_SUPPORTED                 = "no";
+    DUMMY_HANDOVER_FORWARDING_ENABLED              = "yes";
+    EPS_NETWORK_FEATURE_SUPPORT_IMS_VOICE_OVER_PS_SESSION_IN_S1      = "no";    # DO NOT CHANGE
+    EPS_NETWORK_FEATURE_SUPPORT_EMERGENCY_BEARER_SERVICES_IN_S1_MODE = "no";    # DO NOT CHANGE
+    EPS_NETWORK_FEATURE_SUPPORT_LOCATION_SERVICES_VIA_EPC            = "no";    # DO NOT CHANGE
+    EPS_NETWORK_FEATURE_SUPPORT_EXTENDED_SERVICE_REQUEST             = "no";    # DO NOT CHANGE
+	
+    # Display statistics about whole system (expressed in seconds)
+    MME_STATISTIC_TIMER                       = 10;
+    MME_MOBILITY_COMPLETION_TIMER             = 2; # Amount of time in seconds the source MME waits to release resources after HANDOVER/TAU is complete (with or without.
+    MME_S10_HANDOVER_COMPLETION_TIMER         = 2; # Amount of time in soconds the target MME waits to check if a handover/tau process has completed successfully.
+   
+    IP_CAPABILITY = "IPV4V6";
+    INTERTASK_INTERFACE :
+    {
+        ITTI_QUEUE_SIZE            = 2000000;
+    };
+
+    S6A :
+    {
+        S6A_CONF                   = "/openair-mme/etc/mme_fd.conf";
+        HSS_HOSTNAME               = "hss.openairinterface.org";                              # THE HSS FQDN ex: hss.epc.mnc001.mcc001.3gppnetwork.org
+        HSS_REALM                  = "openairinterface.org";                             # THE HSS REALM ex: epc.mnc001.mcc001.3gppnetwork.org
+    };
+
+    SCTP :
+    {
+        SCTP_INSTREAMS  = 8;
+        SCTP_OUTSTREAMS = 8;
+    };
+
+    S1AP : 
+    {
+        S1AP_OUTCOME_TIMER = 10;
+    };
+
+    GUMMEI_LIST = ( 
+         {MCC="208" ; MNC="97"; MME_GID="32768" ; MME_CODE="3"; }                   # YOUR GUMMEI CONFIG HERE
+    );
+
+    TAI_LIST = ( 
+         {MCC="208" ; MNC="97";  TAC = "1"; },                       # YOUR TAI CONFIG HERE
+         {MCC="208" ; MNC="97";  TAC = "2"; },                       # YOUR TAI CONFIG HERE
+         {MCC="208" ; MNC="97";  TAC = "3"; }                        # YOUR TAI CONFIG HERE
+    );
+
+    NAS :
+    {
+        ORDERED_SUPPORTED_INTEGRITY_ALGORITHM_LIST = [ "EIA2" , "EIA1" , "EIA0" ];
+        ORDERED_SUPPORTED_CIPHERING_ALGORITHM_LIST = [ "EEA0" , "EEA1" , "EEA2" ];
+        T3402                                 =  12
+        T3412                                 =  0
+        T3422                                 =  6
+        T3450                                 =  6
+        T3460                                 =  6
+        T3470                                 =  6
+        T3485                                 =  3
+        T3486                                 =  3
+        T3489                                 =  4
+        T3495                                 =  3
+        NAS_FORCE_TAU			      =  0
+        STRICT_FILLER_BITS_CHECK              = "yes";
+    };
+
+    NETWORK_INTERFACES : 
+    {
+        # MME binded interface for S1-C or S1-MME  communication (S1AP), can be ethernet interface, virtual ethernet interface, we don't advise wireless interfaces
+        MME_INTERFACE_NAME_FOR_S1_MME   = "eth0";    # YOUR NETWORK CONFIG HERE
+        MME_IPV4_ADDRESS_FOR_S1_MME     = "192.168.61.195/24";      # CIDR, YOUR NETWORK CONFIG HERE
+#        MME_IPV6_ADDRESS_FOR_S1_MME           = "fd00::191/118";               # YOUR NETWORK CONFIG HERE
+        # MME binded interface for S11 communication (GTPV2-C)
+        MME_INTERFACE_NAME_FOR_S11      = "eth0";       # YOUR NETWORK CONFIG HERE
+        MME_IPV4_ADDRESS_FOR_S11        = "192.168.61.195/24";         # CIDR, YOUR NETWORK CONFIG HERE
+#        MME_IPV6_ADDRESS_FOR_S11          = "fd00:0:0:4::191/64";
+        MME_PORT_FOR_S11                  = 2123;                            # YOUR NETWORK CONFIG HERE
+
+
+        #S10 Interface
+        MME_INTERFACE_NAME_FOR_S10      = "lo";       # YOUR NETWORK CONFIG HERE
+        MME_IPV4_ADDRESS_FOR_S10        = "127.0.0.10/24";         # CIDR, YOUR NETWORK CONFIG HERE
+#        MME_IPV6_ADDRESS_FOR_S10          = "fd00:0:0:4::191/64";
+        MME_PORT_FOR_S10                = 2123;                                 # YOUR NETWORK CONFIG HERE
+    };
+
+    LOGGING :
+    {
+        # OUTPUT choice in { "CONSOLE", `path to file`", "`IPv4@`:`TCP port num`"}
+        # `path to file` must start with '.' or '/'
+        # if TCP stream choice, then you can easily dump the traffic on the remote or local host: nc -l `TCP port num` > received.txt
+        OUTPUT            = "CONSOLE";
+        THREAD_SAFE       = "no";                                               # THREAD_SAFE choice in { "yes", "no" }, safe to let 'no'
+        COLOR             = "yes";                                              # COLOR choice in { "yes", "no" } means use of ANSI styling codes or no
+        # Log level choice in { "EMERGENCY", "ALERT", "CRITICAL", "ERROR", "WARNING", "NOTICE", "INFO", "DEBUG", "TRACE"}
+        SCTP_LOG_LEVEL    = "TRACE";
+        S10_LOG_LEVEL     = "TRACE";
+        S11_LOG_LEVEL     = "TRACE";
+        # NEW LOGS FOR MCE	
+        SM_LOG_LEVEL      = "TRACE";
+        MCE_APP_LOG_LEVEL = "TRACE";
+        M2AP_LOG_LEVEL    = "TRACE";
+        GTPV2C_LOG_LEVEL  = "TRACE";
+        UDP_LOG_LEVEL     = "DEBUG";
+        S1AP_LOG_LEVEL    = "DEBUG";
+        NAS_LOG_LEVEL     = "TRACE";
+        MME_APP_LOG_LEVEL = "TRACE";
+        S6A_LOG_LEVEL     = "TRACE";
+        UTIL_LOG_LEVEL    = "ERROR";
+        MSC_LOG_LEVEL     = "ERROR";
+        ITTI_LOG_LEVEL    = "ERROR";
+        ASN1_VERBOSITY    = "annoying";
+    };
+
+
+#    WRR_LIST_SELECTION = (
+#        {ID="tac-lb03.tac-hb00.tac.epc.mnc001.mcc001.3gppnetwork.org" ;        SGW_IP_ADDRESS_FOR_S11="192.168.61.196";},
+#        {ID="tac-lb01.tac-hb00.tac.epc.mnc097.mcc208.3gppnetwork.org" ; SGW_IP_ADDRESS_FOR_S11="192.168.61.196";},
+#        {ID="tac-lb02.tac-hb00.tac.epc.mnc097.mcc208.3gppnetwork.org" ; MME_IP_ADDRESS_FOR_S10="0.0.0.0";},
+#        {ID="tac-lb03.tac-hb00.tac.epc.mnc097.mcc208.3gppnetwork.org" ; MME_IP_ADDRESS_FOR_S10="0.0.0.0";}
+#    );
+    WRR_LIST_SELECTION = (
+        {ID="tac-lb01.tac-hb00.tac.epc.mnc097.mcc208.3gppnetwork.org" ; SGW_IP_ADDRESS_FOR_S11="192.168.61.196";}
+    );
+};
+
diff --git a/ci-scripts/yaml_files/magma_nsa_20897/mme.conf b/ci-scripts/yaml_files/magma_nsa_20897/mme.conf
index e41fbe63046d350b175351e9448ae427e7f3719b..242b780a40dfd2406ecb004ae8d06d5780f117f9 100644
--- a/ci-scripts/yaml_files/magma_nsa_20897/mme.conf
+++ b/ci-scripts/yaml_files/magma_nsa_20897/mme.conf
@@ -56,9 +56,7 @@ MME :
 
     # ------- MME served TAIs
     TAI_LIST = (
-         {MCC="208" ; MNC="97";  TAC = "1"; },
-         {MCC="208" ; MNC="97";  TAC = "2"; },
-         {MCC="208" ; MNC="97";  TAC = "3"; }
+         {MCC="208" ; MNC="97";  TAC = "1"; }
     );
 
     TAC_LIST = (
diff --git a/ci-scripts/yaml_files/nsa_b200_enb/docker-compose.yml b/ci-scripts/yaml_files/nsa_b200_enb/docker-compose.yml
new file mode 100644
index 0000000000000000000000000000000000000000..60e82de50a3016f853cbb7e3e755bafa57a461e3
--- /dev/null
+++ b/ci-scripts/yaml_files/nsa_b200_enb/docker-compose.yml
@@ -0,0 +1,53 @@
+version: '3.8'
+
+services:
+    enb_mono_fdd:
+        image: oai-enb:latest
+        privileged: true
+        container_name: nsa-b200-enb
+        environment:
+            USE_FDD_MONO: 'yes'
+            USE_B2XX: 'yes'
+            ENB_NAME: eNB-in-docker
+            MCC: '222'
+            MNC: '01'
+            MNC_LENGTH: 2
+            TAC: 1
+            UTRA_BAND_ID: 7
+            DL_FREQUENCY_IN_MHZ: 2680
+            UL_FREQUENCY_OFFSET_IN_MHZ: 120
+            NID_CELL: 0
+            NB_PRB: 25
+            ENABLE_MEASUREMENT_REPORTS: 'yes'
+            ENABLE_X2: 'yes'
+            MME_S1C_IP_ADDRESS: 192.168.18.210
+            ENB_S1C_IF_NAME: eth0
+            ENB_S1C_IP_ADDRESS: 192.168.68.130
+            ENB_S1U_IF_NAME: eth0
+            ENB_S1U_IP_ADDRESS: 192.168.68.130
+            ENB_X2_IP_ADDRESS: 192.168.68.130
+            RRC_INACTIVITY_THRESHOLD: 0
+            FLEXRAN_ENABLED: 'no'
+            FLEXRAN_INTERFACE_NAME: eth0
+            FLEXRAN_IPV4_ADDRESS: 192.168.18.210
+            THREAD_PARALLEL_CONFIG: PARALLEL_RU_L1_TRX_SPLIT
+        volumes:
+            - /dev:/dev
+        networks:
+            public_net:
+                ipv4_address: 192.168.68.130
+        healthcheck:
+            # pgrep does NOT work
+            test: /bin/bash -c "ps aux | grep -v grep | grep -c softmodem"
+            interval: 10s
+            timeout: 5s
+            retries: 5
+
+networks:
+    public_net:
+        name: nsa-b200-enb-net
+        ipam:
+            config:
+                - subnet: 192.168.68.128/26
+        driver_opts:
+            com.docker.network.bridge.name: "nsa-enb-net"
diff --git a/ci-scripts/yaml_files/nsa_b200_gnb/docker-compose.yml b/ci-scripts/yaml_files/nsa_b200_gnb/docker-compose.yml
new file mode 100644
index 0000000000000000000000000000000000000000..692f830e5dd649a28f75d0b1648a983ca891b452
--- /dev/null
+++ b/ci-scripts/yaml_files/nsa_b200_gnb/docker-compose.yml
@@ -0,0 +1,50 @@
+version: '3.8'
+
+services:
+    gnb_mono_tdd:
+        image: oai-gnb:latest
+        privileged: true
+        container_name: nsa-b200-gnb
+        environment:
+            USE_NSA_TDD_MONO: 'yes'
+            USE_B2XX: 'yes'
+            GNB_NAME: gNB-in-docker
+            MCC: '222'
+            MNC: '01'
+            MNC_LENGTH: 2
+            TAC: 1
+            ENABLE_X2: 'yes'
+            ENB_X2_IP_ADDRESS: 192.168.68.130
+            MME_S1C_IP_ADDRESS: 192.168.18.210
+            GNB_S1C_IF_NAME: eth0
+            GNB_S1C_IP_ADDRESS: 192.168.68.194
+            GNB_S1U_IF_NAME: eth0
+            GNB_S1U_IP_ADDRESS: 192.168.68.194
+            GNB_X2_IP_ADDRESS: 192.168.68.194
+            RRC_INACTIVITY_THRESHOLD: 0
+            FLEXRAN_ENABLED: 'no'
+            FLEXRAN_INTERFACE_NAME: eth0
+            FLEXRAN_IPV4_ADDRESS: 192.168.18.210
+            THREAD_PARALLEL_CONFIG: PARALLEL_RU_L1_TRX_SPLIT
+            USE_ADDITIONAL_OPTIONS: '-E -q'
+        volumes:
+            - /dev:/dev
+        networks:
+            public_net:
+                ipv4_address: 192.168.68.194
+        #entrypoint: /bin/bash -c "sleep infinity"
+        healthcheck:
+            # pgrep does NOT work
+            test: /bin/bash -c "ps aux | grep -v grep | grep -c softmodem"
+            interval: 10s
+            timeout: 5s
+            retries: 5
+
+networks:
+    public_net:
+        name: nsa-b200-gnb-net
+        ipam:
+            config:
+                - subnet: 192.168.68.192/26
+        driver_opts:
+            com.docker.network.bridge.name: "nsa-gnb-net"
diff --git a/doc/FEATURE_SET.md b/doc/FEATURE_SET.md
index 4105d17511ba50be9ee9318e0c9d391645315431..9443fbd56e67fae3fc2e0e48d3fe479740768030 100644
--- a/doc/FEATURE_SET.md
+++ b/doc/FEATURE_SET.md
@@ -104,7 +104,7 @@ The MAC layer implements a subset of the **3GPP 36.321** release v8.6 in support
 - RLC interface (AM, UM)
 - UL power control
 - Link adaptation
-- Connected DRX (CDRX) support for FDD LTE UE. Compatible with R13 from 3GPP. Support for Cat-M1 UE comming soon.  
+- Connected DRX (CDRX) support for FDD LTE UE. Compatible with R13 from 3GPP. Support for Cat-M1 UE comming soon.
 
 ## eNB RLC Layer ##
 
@@ -206,7 +206,7 @@ The Physical layer implements **3GPP 36.211**, **36.212**, **36.213** and provid
 - PRACH preamble format 0
 - All downlink (DL) channels are supported: PSS, SSS, PBCH, PCFICH, PHICH, PDCCH, PDSCH, PMCH
 - All uplink (UL) channels are supported: PRACH, PUSCH, PUCCH (format 1/1a/1b), SRS, DRS
-- LTE MBMS-dedicated cell (feMBMS) procedures subset for LTE release 14 (experimental)  
+- LTE MBMS-dedicated cell (feMBMS) procedures subset for LTE release 14 (experimental)
 - LTE non-MBSFN subframe (feMBMS) Carrier Adquistion Subframe-CAS procedures (PSS/SSS/PBCH/PDSH) (experimental)
 - LTE MBSFN MBSFN subframe channel (feMBMS): PMCH (CS@1.25KHz) (channel estimation for 25MHz bandwidth) (experimental) 
 
@@ -313,6 +313,7 @@ The following features are valid for the gNB and the 5G-NR UE.
 - MAC downlink scheduler
   - phy-test scheduler (fixed allocation and usable also without UE)
   - regular scheduler with dynamic allocation
+  - MCS adaptation from HARQ BLER
 - MAC header generation (including timing advance)
 - ACK / NACK handling and HARQ procedures for downlink
 - MAC uplink scheduler
@@ -398,7 +399,7 @@ The following features are valid for the gNB and the 5G-NR UE.
   - Creates TUN interface to PDCP to inject and receive user-place traffic
   - No connection to the core network
 * Supporting Standalone (SA) mode:
-  - UE can register with the 5G Core Network, establish a PDU Session and exchange user-plane traffic  
+  - UE can register with the 5G Core Network, establish a PDU Session and exchange user-plane traffic
 
 ##  NR UE PHY Layer ##
 
@@ -484,7 +485,7 @@ The following features are valid for the gNB and the 5G-NR UE.
    - Interfaces with PDCP, MAC
 
 **UE PDCP**
-* Tx/Rx operations according to 38.323 Rel.16  
+* Tx/Rx operations according to 38.323 Rel.16
    - Integrity protection and ciphering procedures
    - Sequence number management, SDU dicard and in-order delivery
    - Radio bearer establishment/handling and association with PDCP entities
diff --git a/executables/nr-gnb.c b/executables/nr-gnb.c
index d659739ffae8cd0ffd8e6e8ea138938008abe41f..f83fac2737c7d6511f8272dcfa968f7d051397ca 100644
--- a/executables/nr-gnb.c
+++ b/executables/nr-gnb.c
@@ -108,6 +108,7 @@ time_stats_t softmodem_stats_rx_sf; // total rx time
 
 //#define TICK_TO_US(ts) (ts.diff)
 #define TICK_TO_US(ts) (ts.trials==0?0:ts.diff/ts.trials)
+#define L1STATSSTRLEN 16384
 
 
 void tx_func(void *param) {
@@ -329,46 +330,63 @@ void rx_func(void *param) {
        );
 #endif
 }
-static void *process_stats_thread(void *param) {
-
-  PHY_VARS_gNB *gNB  = (PHY_VARS_gNB *)param;
+static void dump_L1_meas_stats(PHY_VARS_gNB *gNB, RU_t *ru, char *output) {
+  int stroff = 0;
+  stroff += print_meas_log(gNB->phy_proc_tx_0, "L1 Tx processing thread 0", NULL, NULL, output);
+  stroff += print_meas_log(gNB->phy_proc_tx_1, "L1 Tx processing thread 1", NULL, NULL, output+stroff);
+  stroff += print_meas_log(&gNB->dlsch_encoding_stats, "DLSCH encoding", NULL, NULL, output+stroff);
+  stroff += print_meas_log(&gNB->phy_proc_rx, "L1 Rx processing", NULL, NULL, output+stroff);
+  stroff += print_meas_log(&gNB->ul_indication_stats, "UL Indication", NULL, NULL, output+stroff);
+  stroff += print_meas_log(&gNB->rx_pusch_stats, "PUSCH inner-receiver", NULL, NULL, output+stroff);
+  stroff += print_meas_log(&gNB->ulsch_decoding_stats, "PUSCH decoding", NULL, NULL, output+stroff);
+  if (ru->feprx) stroff += print_meas_log(&ru->ofdm_demod_stats,"feprx",NULL,NULL, output+stroff);
+
+  if (ru->feptx_ofdm) {
+    stroff += print_meas_log(&ru->precoding_stats,"feptx_prec",NULL,NULL, output+stroff);
+    stroff += print_meas_log(&ru->txdataF_copy_stats,"txdataF_copy",NULL,NULL, output+stroff);
+    stroff += print_meas_log(&ru->ofdm_mod_stats,"feptx_ofdm",NULL,NULL, output+stroff);
+    stroff += print_meas_log(&ru->ofdm_total_stats,"feptx_total",NULL,NULL, output+stroff);
+  }
 
-  reset_meas(&gNB->dlsch_encoding_stats);
-  reset_meas(&gNB->phy_proc_rx);
-  reset_meas(&gNB->ul_indication_stats);
-  reset_meas(&gNB->rx_pusch_stats);
-  reset_meas(&gNB->ulsch_decoding_stats);
+  if (ru->fh_north_asynch_in) stroff += print_meas_log(&ru->rx_fhaul,"rx_fhaul",NULL,NULL, output+stroff);
 
-  wait_sync("process_stats_thread");
+  stroff += print_meas_log(&ru->tx_fhaul,"tx_fhaul",NULL,NULL, output+stroff);
 
-  while(!oai_exit)
-  {
-    sleep(1);
-    print_meas(gNB->phy_proc_tx_0, "L1 Tx processing thread 0", NULL, NULL);
-    print_meas(gNB->phy_proc_tx_1, "L1 Tx processing thread 1", NULL, NULL);
-    print_meas(&gNB->dlsch_encoding_stats, "DLSCH encoding", NULL, NULL);
-    print_meas(&gNB->phy_proc_rx, "L1 Rx processing", NULL, NULL);
-    print_meas(&gNB->ul_indication_stats, "UL Indication", NULL, NULL);
-    print_meas(&gNB->rx_pusch_stats, "PUSCH inner-receiver", NULL, NULL);
-    print_meas(&gNB->ulsch_decoding_stats, "PUSCH decoding", NULL, NULL);
+  if (ru->fh_north_out) {
+    stroff += print_meas_log(&ru->compression,"compression",NULL,NULL, output+stroff);
+    stroff += print_meas_log(&ru->transport,"transport",NULL,NULL, output+stroff);
   }
-  return(NULL);
 }
 
 void *nrL1_stats_thread(void *param) {
   PHY_VARS_gNB     *gNB      = (PHY_VARS_gNB *)param;
+  RU_t *ru = RC.ru[0];
+  char output[L1STATSSTRLEN];
+  memset(output,0,L1STATSSTRLEN);
   wait_sync("L1_stats_thread");
   FILE *fd;
+  fd=fopen("nrL1_stats.log","w");
+  AssertFatal(fd!=NULL,"Cannot open nrL1_stats.log\n");
+
+  reset_meas(gNB->phy_proc_tx_0);
+  reset_meas(gNB->phy_proc_tx_1);
+  reset_meas(&gNB->dlsch_encoding_stats);
+  reset_meas(&gNB->phy_proc_rx);
+  reset_meas(&gNB->ul_indication_stats);
+  reset_meas(&gNB->rx_pusch_stats);
+  reset_meas(&gNB->ulsch_decoding_stats);
+
   while (!oai_exit) {
     sleep(1);
-    fd=fopen("nrL1_stats.log","w");
-    AssertFatal(fd!=NULL,"Cannot open nrL1_stats.log\n");
     dump_nr_I0_stats(fd,gNB);
     dump_pdsch_stats(fd,gNB);
     dump_pusch_stats(fd,gNB);
-    //    nr_dump_uci_stats(fd,eNB,eNB->proc.L1_proc_tx.frame_tx);
-    fclose(fd);
+    dump_L1_meas_stats(gNB, ru, output);
+    fprintf(fd,"%s\n",output);
+    fflush(fd);
+    fseek(fd,0,SEEK_SET);
   }
+  fclose(fd);
   return(NULL);
 }
 
@@ -429,11 +447,10 @@ void init_gNB_Tpool(int inst) {
   initNotifiedFIFO(gNB->resp_RU_tx);
   notifiedFIFO_elt_t *msgRUTx = newNotifiedFIFO_elt(sizeof(processingData_RU_t),0,gNB->resp_RU_tx,ru_tx_func);
   processingData_RU_t *msgData = (processingData_RU_t*)msgRUTx->msgData;
-  msgData->next_slot = sf_ahead*gNB->frame_parms.slots_per_subframe; // first Tx slot
+  int first_tx_slot = sf_ahead*gNB->frame_parms.slots_per_subframe;
+  msgData->next_slot = get_next_downlink_slot(gNB, &gNB->gNB_config, 0, first_tx_slot-1);
   pushNotifiedFIFO(gNB->resp_RU_tx,msgRUTx); // to unblock the process in the beginning
 
-  // Stats measurement thread
-  if(opp_enabled == 1) threadCreate(&proc->process_stats_thread, process_stats_thread,(void *)gNB, "time_meas", -1, OAI_PRIORITY_RT_LOW);
   threadCreate(&proc->L1_stats_thread,nrL1_stats_thread,(void*)gNB,"L1_stats",-1,OAI_PRIORITY_RT_LOW);
 
 }
diff --git a/executables/nr-ru.c b/executables/nr-ru.c
index c90750d39aa37793710943c048f7d1e99345b189..3a2589632bde85268a9c5a5f76eb2604cb7f26e8 100644
--- a/executables/nr-ru.c
+++ b/executables/nr-ru.c
@@ -1508,7 +1508,6 @@ void init_RU_proc(RU_t *ru) {
     if (ru->feptx_ofdm) nr_init_feptx_thread(ru);
   }
 
-  if (opp_enabled == 1) threadCreate(&ru->ru_stats_thread,ru_stats_thread,(void *)ru, "emulateRF", -1, OAI_PRIORITY_RT_LOW);
 }
 
 void kill_NR_RU_proc(int inst) {
diff --git a/openair1/PHY/TOOLS/time_meas.c b/openair1/PHY/TOOLS/time_meas.c
index c62fbed10630ef627417a1318ea02c2ec5375e3b..996f7fb2777007bf9cf6970d0c70220420f3c116 100644
--- a/openair1/PHY/TOOLS/time_meas.c
+++ b/openair1/PHY/TOOLS/time_meas.c
@@ -131,6 +131,49 @@ void print_meas(time_stats_t *ts,
   }
 }
 
+int print_meas_log(time_stats_t *ts,
+                const char *name,
+                time_stats_t *total_exec_time,
+                time_stats_t *sf_exec_time,
+                char *output)
+{
+  int stroff = 0;
+  static int first_time = 0;
+  static double cpu_freq_GHz = 0.0;
+
+  if (cpu_freq_GHz == 0.0)
+    cpu_freq_GHz = get_cpu_freq_GHz();
+
+  if (first_time == 0) {
+    first_time=1;
+
+    if ((total_exec_time == NULL) || (sf_exec_time== NULL))
+      stroff += sprintf(output, "%25s  %25s  %25s  %25s %25s %6f\n","Name","Total","Per Trials",   "Num Trials","CPU_F_GHz", cpu_freq_GHz);
+    else
+      stroff += sprintf(output+stroff, "%25s  %25s  %25s  %20s %15s %6f\n","Name","Total","Average/Frame","Trials",    "CPU_F_GHz", cpu_freq_GHz);
+  }
+
+  if (ts->trials>0) {
+    //printf("%20s: total: %10.3f ms, average: %10.3f us (%10d trials)\n", name, ts->diff/cpu_freq_GHz/1000000.0, ts->diff/ts->trials/cpu_freq_GHz/1000.0, ts->trials);
+    if ((total_exec_time == NULL) || (sf_exec_time== NULL)) {
+      stroff += sprintf(output+stroff, "%25s:  %15.3f us; %15d; %15.3f us;\n",
+              name,
+              (ts->diff/ts->trials/cpu_freq_GHz/1000.0),
+              ts->trials,
+              ts->max/cpu_freq_GHz/1000.0);
+    } else {
+      stroff += sprintf(output+stroff, "%25s:  %15.3f ms (%5.2f%%); %15.3f us (%5.2f%%); %15d;\n",
+              name,
+              (ts->diff/cpu_freq_GHz/1000000.0),
+              ((ts->diff/cpu_freq_GHz/1000000.0)/(total_exec_time->diff/cpu_freq_GHz/1000000.0))*100,  // percentage
+              (ts->diff/ts->trials/cpu_freq_GHz/1000.0),
+              ((ts->diff/ts->trials/cpu_freq_GHz/1000.0)/(sf_exec_time->diff/sf_exec_time->trials/cpu_freq_GHz/1000.0))*100,  // percentage
+              ts->trials);
+    }
+  }
+  return stroff;
+}
+
 double get_time_meas_us(time_stats_t *ts)
 {
   static double cpu_freq_GHz = 0.0;
diff --git a/openair1/PHY/TOOLS/time_meas.h b/openair1/PHY/TOOLS/time_meas.h
index f78ea55530a294fc87ebcddc0948d2f53b6e7adb..1ef2935917d5dbeb1a72fbb7456ffd1abef5ae33 100644
--- a/openair1/PHY/TOOLS/time_meas.h
+++ b/openair1/PHY/TOOLS/time_meas.h
@@ -88,6 +88,7 @@ static inline void stop_meas(time_stats_t *ts) __attribute__((always_inline));
 
 void print_meas_now(time_stats_t *ts, const char *name, FILE *file_name);
 void print_meas(time_stats_t *ts, const char *name, time_stats_t *total_exec_time, time_stats_t *sf_exec_time);
+int print_meas_log(time_stats_t *ts, const char *name, time_stats_t *total_exec_time, time_stats_t *sf_exec_time, char *output);
 double get_time_meas_us(time_stats_t *ts);
 double get_cpu_freq_GHz(void);
 
diff --git a/openair2/GNB_APP/MACRLC_nr_paramdef.h b/openair2/GNB_APP/MACRLC_nr_paramdef.h
index 7858cd6c3259151334036c2dba75f55c2c6e5ef1..b17fced7b723a0f5ff768d33f697327707f8c57c 100644
--- a/openair2/GNB_APP/MACRLC_nr_paramdef.h
+++ b/openair2/GNB_APP/MACRLC_nr_paramdef.h
@@ -60,6 +60,11 @@
 #define CONFIG_STRING_MACRLC_PUCCHTARGETSNRX10             "pucch_TargetSNRx10"
 #define CONFIG_STRING_MACRLC_PUCCHFAILURETHRES             "pucch_FailureThres"
 #define CONFIG_STRING_MACRLC_PUSCHFAILURETHRES             "pusch_FailureThres"
+#define CONFIG_STRING_MACRLC_DL_BLER_TARGET_UPPER          "dl_bler_target_upper"
+#define CONFIG_STRING_MACRLC_DL_BLER_TARGET_LOWER          "dl_bler_target_lower"
+#define CONFIG_STRING_MACRLC_DL_RD2_BLER_THRESHOLD         "dl_rd2_bler_threshold"
+#define CONFIG_STRING_MACRLC_DL_MAX_MCS                    "dl_max_mcs"
+
 
 /*-------------------------------------------------------------------------------------------------------------------------------------------------------*/
 /*                                            MacRLC  configuration parameters                                                                           */
@@ -88,6 +93,10 @@
 {CONFIG_STRING_MACRLC_PUCCHTARGETSNRX10,                 NULL,     0,          iptr:NULL,           defintval:150,             TYPE_INT,      0},        \
 {CONFIG_STRING_MACRLC_PUCCHFAILURETHRES,                 NULL,     0,          iptr:NULL,           defintval:10,              TYPE_INT,      0},        \
 {CONFIG_STRING_MACRLC_PUSCHFAILURETHRES,                 NULL,     0,          iptr:NULL,           defintval:10,              TYPE_INT,      0},        \
+{CONFIG_STRING_MACRLC_DL_BLER_TARGET_UPPER,   "Upper threshold of BLER to decrease DL MCS",   0, dblptr:NULL,  defdblval:0.15,  TYPE_DOUBLE,  0},        \
+{CONFIG_STRING_MACRLC_DL_BLER_TARGET_LOWER,   "Lower threshold of BLER to increase DL MCS",   0, dblptr:NULL,  defdblval:0.05,  TYPE_DOUBLE,  0},        \
+{CONFIG_STRING_MACRLC_DL_RD2_BLER_THRESHOLD,  "Threshold of RD2/RETX2 BLER to decrease DL MCS", 0, dblptr:NULL,  defdblval:0.01,  TYPE_DOUBLE,  0},      \
+{CONFIG_STRING_MACRLC_DL_MAX_MCS,             "Maximum DL MCS that should be used", 0, u8ptr:NULL,  defintval:28,  TYPE_UINT8,  0},      \
 }
 #define MACRLC_CC_IDX                                          0
 #define MACRLC_TRANSPORT_N_PREFERENCE_IDX                      1
@@ -111,5 +120,10 @@
 #define MACRLC_PUCCHTARGETSNRX10_IDX                           19
 #define MACRLC_PUCCHFAILURETHRES_IDX                           20 
 #define MACRLC_PUSCHFAILURETHRES_IDX                           21
+#define MACRLC_DL_BLER_TARGET_UPPER_IDX                        22
+#define MACRLC_DL_BLER_TARGET_LOWER_IDX                        23
+#define MACRLC_DL_RD2_BLER_THRESHOLD_IDX                       24
+#define MACRLC_DL_MAX_MCS_IDX                                  25
+
 /*---------------------------------------------------------------------------------------------------------------------------------------------------------*/
 #endif
diff --git a/openair2/GNB_APP/gnb_config.c b/openair2/GNB_APP/gnb_config.c
index e36db87997712d853d391cf4bbc2b510e3bea6cb..c9f3f020bb0f3e445f05cc8d46e45b641678d247 100644
--- a/openair2/GNB_APP/gnb_config.c
+++ b/openair2/GNB_APP/gnb_config.c
@@ -772,6 +772,10 @@ void RCconfig_nr_macrlc() {
         AssertFatal(1==0,"MACRLC %d: %s unknown southbound midhaul\n",j,*(MacRLC_ParamList.paramarray[j][MACRLC_TRANSPORT_S_PREFERENCE_IDX].strptr));
       } 
       RC.nrmac[j]->ulsch_max_frame_inactivity = *(MacRLC_ParamList.paramarray[j][MACRLC_ULSCH_MAX_FRAME_INACTIVITY].uptr);
+      RC.nrmac[j]->dl_bler_target_upper = *(MacRLC_ParamList.paramarray[j][MACRLC_DL_BLER_TARGET_UPPER_IDX].dblptr);
+      RC.nrmac[j]->dl_bler_target_lower = *(MacRLC_ParamList.paramarray[j][MACRLC_DL_BLER_TARGET_LOWER_IDX].dblptr);
+      RC.nrmac[j]->dl_rd2_bler_threshold = *(MacRLC_ParamList.paramarray[j][MACRLC_DL_RD2_BLER_THRESHOLD_IDX].dblptr);
+      RC.nrmac[j]->dl_max_mcs = *(MacRLC_ParamList.paramarray[j][MACRLC_DL_MAX_MCS_IDX].u8ptr);
       RC.nrmac[j]->num_ulprbbl = num_prbbl;
       LOG_I(NR_MAC,"Blacklisted PRBS %d\n",num_prbbl);
       memcpy(RC.nrmac[j]->ulprbbl,prbbl,275*sizeof(prbbl[0]));
diff --git a/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_dlsch.c b/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_dlsch.c
index a5fd6a3c91f3574106d221798e6c8899654150cf..53a86f43e53eac3a8a2f00819789700d19efe0c8 100644
--- a/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_dlsch.c
+++ b/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_dlsch.c
@@ -382,6 +382,74 @@ int nr_write_ce_dlsch_pdu(module_id_t module_idP,
   return offset;
 }
 
+#define BLER_UPDATE_FRAME 10
+#define BLER_FILTER 0.9f
+int get_mcs_from_bler(module_id_t mod_id, int CC_id, frame_t frame, sub_frame_t slot, int UE_id)
+{
+  gNB_MAC_INST *nrmac = RC.nrmac[mod_id];
+  const NR_ServingCellConfigCommon_t *scc = nrmac->common_channels[CC_id].ServingCellConfigCommon;
+  const int n = nr_slots_per_frame[*scc->ssbSubcarrierSpacing];
+
+  NR_DL_bler_stats_t *bler_stats = &nrmac->UE_info.UE_sched_ctrl[UE_id].dl_bler_stats;
+  /* first call: everything is zero. Initialize to sensible default */
+  if (bler_stats->last_frame_slot == 0 && bler_stats->mcs == 0) {
+    bler_stats->last_frame_slot = frame * n + slot;
+    bler_stats->mcs = 9;
+    bler_stats->bler = (nrmac->dl_bler_target_lower + nrmac->dl_bler_target_upper) / 2;
+    bler_stats->rd2_bler = nrmac->dl_rd2_bler_threshold;
+  }
+  const int now = frame * n + slot;
+  int diff = now - bler_stats->last_frame_slot;
+  if (diff < 0) // wrap around
+    diff += 1024 * n;
+
+  const uint8_t old_mcs = bler_stats->mcs;
+  const NR_mac_stats_t *stats = &nrmac->UE_info.mac_stats[UE_id];
+  // TODO put back this condition when relevant
+  /*const int dret3x = stats->dlsch_rounds[3] - bler_stats->dlsch_rounds[3];
+  if (dret3x > 0) {
+     if there is a third retransmission, decrease MCS for stabilization and
+     restart averaging window to stabilize transmission 
+    bler_stats->last_frame_slot = now;
+    bler_stats->mcs = max(9, bler_stats->mcs - 1);
+    memcpy(bler_stats->dlsch_rounds, stats->dlsch_rounds, sizeof(stats->dlsch_rounds));
+    LOG_D(MAC, "%4d.%2d: %d retx in 3rd round, setting MCS to %d and restarting window\n", frame, slot, dret3x, bler_stats->mcs);
+    return bler_stats->mcs;
+  }*/
+  if (diff < BLER_UPDATE_FRAME * n)
+    return old_mcs; // no update
+
+  // last update is longer than x frames ago
+  const int dtx = stats->dlsch_rounds[0] - bler_stats->dlsch_rounds[0];
+  const int dretx = stats->dlsch_rounds[1] - bler_stats->dlsch_rounds[1];
+  const int dretx2 = stats->dlsch_rounds[2] - bler_stats->dlsch_rounds[2];
+  const float bler_window = dtx > 0 ? (float) dretx / dtx : bler_stats->bler;
+  const float rd2_bler_wnd = dtx > 0 ? (float) dretx2 / dtx : bler_stats->rd2_bler;
+  bler_stats->bler = BLER_FILTER * bler_stats->bler + (1 - BLER_FILTER) * bler_window;
+  bler_stats->rd2_bler = BLER_FILTER / 4 * bler_stats->rd2_bler + (1 - BLER_FILTER / 4) * rd2_bler_wnd;
+
+  int new_mcs = old_mcs;
+  // TODO put back this condition when relevant
+  /* first ensure that number of 2nd retx is below threshold. If this is the
+   * case, use 1st retx to adjust faster 
+  if (bler_stats->rd2_bler > nrmac->dl_rd2_bler_threshold && old_mcs > 6) {
+    new_mcs -= 2;
+  } else if (bler_stats->rd2_bler < nrmac->dl_rd2_bler_threshold) {*/
+  if (bler_stats->bler < nrmac->dl_bler_target_lower && old_mcs < nrmac->dl_max_mcs && dtx > 9)
+    new_mcs += 1;
+  else if (bler_stats->bler > nrmac->dl_bler_target_upper && old_mcs > 6)
+    new_mcs -= 1;
+  // else we are within threshold boundaries
+
+
+  bler_stats->last_frame_slot = now;
+  bler_stats->mcs = new_mcs;
+  memcpy(bler_stats->dlsch_rounds, stats->dlsch_rounds, sizeof(stats->dlsch_rounds));
+  LOG_D(MAC, "%4d.%2d MCS %d -> %d (dtx %d, dretx %d, BLER wnd %.3f avg %.6f, dretx2 %d, RD2 BLER wnd %.3f avg %.6f)\n",
+        frame, slot, old_mcs, new_mcs, dtx, dretx, bler_window, bler_stats->bler, dretx2, rd2_bler_wnd, bler_stats->rd2_bler);
+  return new_mcs;
+}
+
 void nr_store_dlsch_buffer(module_id_t module_id,
                            frame_t frame,
                            sub_frame_t slot) {
@@ -630,8 +698,8 @@ void pf_dl(module_id_t module_id,
         continue;
 
       /* Calculate coeff */
-      sched_pdsch->mcs = 9;
       ps->nrOfLayers = 1;
+      sched_pdsch->mcs = get_mcs_from_bler(module_id, /* CC_id = */ 0, frame, slot, UE_id);
       uint32_t tbs = pf_tbs[ps->mcsTableIdx][sched_pdsch->mcs];
       coeff_ue[UE_id] = (float) tbs / thr_ue[UE_id];
       LOG_D(NR_MAC,"b %d, thr_ue[%d] %f, tbs %d, coeff_ue[%d] %f\n",
diff --git a/openair2/LAYER2/NR_MAC_gNB/main.c b/openair2/LAYER2/NR_MAC_gNB/main.c
index 6e13e4bcc6225cf523037588450bb305a3e41ec2..0089263620f56f2beb76764ac63f32d7f09b8470 100644
--- a/openair2/LAYER2/NR_MAC_gNB/main.c
+++ b/openair2/LAYER2/NR_MAC_gNB/main.c
@@ -81,22 +81,28 @@ void dump_mac_stats(gNB_MAC_INST *gNB, char *output, int strlen)
 
   for (int UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) {
 
-    stroff+=sprintf(output+stroff,"UE ID %d RNTI %04x (%d/%d) PH %d dB PCMAX %d dBm\n",
+    const NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id];
+    NR_mac_stats_t *stats = &UE_info->mac_stats[UE_id];
+    const int avg_rsrp = stats->num_rsrp_meas > 0 ? stats->cumul_rsrp / stats->num_rsrp_meas : 0;
+    stroff+=sprintf(output+stroff,"UE ID %d RNTI %04x (%d/%d) PH %d dB PCMAX %d dBm, average RSRP %d (%d meas)\n",
       UE_id,
       UE_info->rnti[UE_id],
       num++,
       UE_info->num_UEs,
-      UE_info->UE_sched_ctrl[UE_id].ph,
-      UE_info->UE_sched_ctrl[UE_id].pcmax);
+      sched_ctrl->ph,
+      sched_ctrl->pcmax,
+      avg_rsrp,
+      stats->num_rsrp_meas);
 
-    NR_mac_stats_t *stats = &UE_info->mac_stats[UE_id];
-    const int avg_rsrp = stats->num_rsrp_meas > 0 ? stats->cumul_rsrp / stats->num_rsrp_meas : 0;
-    stroff+=sprintf(output+stroff,"UE %d: dlsch_rounds %d/%d/%d/%d, dlsch_errors %d, pucch0_DTX %d average RSRP %d (%d meas)\n",
+    stroff+=sprintf(output+stroff,"UE %d: dlsch_rounds %d/%d/%d/%d, dlsch_errors %d, pucch0_DTX %d, BLER %.5f MCS %d\n",
           UE_id,
           stats->dlsch_rounds[0], stats->dlsch_rounds[1],
-          stats->dlsch_rounds[2], stats->dlsch_rounds[3], stats->dlsch_errors,
+          stats->dlsch_rounds[2], stats->dlsch_rounds[3],
+          stats->dlsch_errors,
           stats->pucch0_DTX,
-          avg_rsrp, stats->num_rsrp_meas);
+          sched_ctrl->dl_bler_stats.bler,
+          sched_ctrl->dl_bler_stats.mcs);
+
     stats->num_rsrp_meas = 0;
     stats->cumul_rsrp = 0 ;
     stroff+=sprintf(output+stroff,"UE %d: dlsch_total_bytes %d\n", UE_id, stats->dlsch_total_bytes);
@@ -121,7 +127,7 @@ void dump_mac_stats(gNB_MAC_INST *gNB, char *output, int strlen)
       }
     }
   }
-  print_meas(&gNB->eNB_scheduler, "DL & UL scheduling timing stats", NULL, NULL);
+  print_meas_log(&gNB->eNB_scheduler, "DL & UL scheduling timing stats", NULL, NULL, output+stroff);
 }
 
 
diff --git a/openair2/LAYER2/NR_MAC_gNB/nr_mac_gNB.h b/openair2/LAYER2/NR_MAC_gNB/nr_mac_gNB.h
index 4aa017ad6aa07cbdbe320ae56640b599ae5228e0..ff9b8809ce919511b30738034fa795ba580abcf6 100644
--- a/openair2/LAYER2/NR_MAC_gNB/nr_mac_gNB.h
+++ b/openair2/LAYER2/NR_MAC_gNB/nr_mac_gNB.h
@@ -424,6 +424,13 @@ typedef struct NR_UE_harq {
 
 //! fixme : need to enhace for the multiple TB CQI report
 
+typedef struct NR_DL_bler_stats {
+  frame_t last_frame_slot;
+  float bler;
+  float rd2_bler;
+  uint8_t mcs;
+  int dlsch_rounds[8];
+} NR_DL_bler_stats_t;
 
 //
 /*! As per spec 38.214 section 5.2.1.4.2
@@ -567,6 +574,9 @@ typedef struct {
   /// per-LC status data
   mac_rlc_status_resp_t rlc_status[MAX_NUM_LCID];
 
+  /// Estimation of HARQ from BLER
+  NR_DL_bler_stats_t dl_bler_stats;
+
   int lcid_mask;
   int lcid_to_schedule;
   uint16_t ta_frame;
@@ -765,6 +775,10 @@ typedef struct gNB_MAC_INST_s {
   NR_Type0_PDCCH_CSS_config_t type0_PDCCH_CSS_config[64];
 
   bool first_MIB;
+  double dl_bler_target_upper;
+  double dl_bler_target_lower;
+  double dl_rd2_bler_threshold;
+  uint8_t dl_max_mcs;
 } gNB_MAC_INST;
 
 #endif /*__LAYER2_NR_MAC_GNB_H__ */