Skip to content
Snippets Groups Projects
Commit da8d00d6 authored by Jaroslava Fiedlerova's avatar Jaroslava Fiedlerova
Browse files

Merge remote-tracking branch 'origin/increase-build-timeouts' into integration_2025_w11 (!3305)

This merge request is for two issues:

1. Core Network Deployment/Undeployment

If the Helm release oai5gcn does not exist (e.g., it was already deleted,
failed during installation), helm uninstall will fail with a "release not
found" error. The --timeout 60s makes Helm wait up to 60 seconds for all
resources to be deleted. If cleanup takes longer (e.g., due to resources
or dependencies), the command fails with a timeout error, potentially
leaving resources in an incomplete state.

2. Increase the timeout value for OpenShift RHEL Builds

The timeout parameter is set as the maximum wait time for the jobs to complete.
If the jobs do not complete within that time, the function logs an error. The
jobs may take longer to complete, the function exits early with an error, even
if the jobs are still progressing.

So, to address both the issues:

We first list all installed Helm releases in the given namespace, returning only
their names. We then pass the release names to helm uninstall, uninstalling all
releases in the namespace. The default value for timeout is 5 minutes.

Increased the timeout value for the RHEL Builds.
parents fe52ef28 b9a4ebd1
No related branches found
No related tags found
1 merge request!3309Integration: `2025.w11`
......@@ -72,8 +72,8 @@ def OC_deploy_CN(cmd, ocUserName, ocPassword, ocNamespace, path):
succeeded = OC_login(cmd, ocUserName, ocPassword, ocNamespace)
if not succeeded:
return False, CONST.OC_LOGIN_FAIL
cmd.run('helm uninstall oai5gcn --wait --timeout 60s')
ret = cmd.run(f'helm install --wait --timeout 120s oai5gcn {path}/ci-scripts/charts/oai-5g-basic/.')
cmd.run(f'helm list -aq -n {ocNamespace} | xargs -r helm uninstall -n {ocNamespace} --wait')
ret = cmd.run(f'helm install --wait oai5gcn {path}/ci-scripts/charts/oai-5g-basic/.')
if ret.returncode != 0:
logging.error('OC OAI CN5G: Deployment failed')
OC_logout(cmd)
......@@ -100,10 +100,10 @@ def OC_undeploy_CN(cmd, ocUserName, ocPassword, ocNamespace, path):
cmd.run(f'oc logs -f {podName} {ci} &> {path}/logs/{ii}.log &')
cmd.run(f'cd {path}/logs && zip -r -qq test_logs_CN.zip *.log')
cmd.copyin(f'{path}/logs/test_logs_CN.zip','test_logs_CN.zip')
ret = cmd.run('helm uninstall --wait --timeout 60s oai5gcn')
ret = cmd.run(f'helm list -aq -n {ocNamespace} | xargs -r helm uninstall -n {ocNamespace} --wait')
if ret.returncode != 0:
logging.error('OC OAI CN5G: Undeployment failed')
cmd.run('helm uninstall --wait --timeout 60s oai5gcn')
cmd.run(f'helm list -aq -n {ocNamespace} | xargs -r helm uninstall -n {ocNamespace} --wait')
OC_logout(cmd)
return False, CONST.OC_PROJECT_FAIL
report = cmd.run('oc get pods')
......@@ -330,7 +330,7 @@ class Cluster:
self._recreate_bc('ran-base', baseTag, 'openshift/ran-base-bc.yaml')
ranbase_job = self._start_build('ran-base')
attemptedImages += ['ran-base']
status = ranbase_job is not None and self._wait_build_end([ranbase_job], 800)
status = ranbase_job is not None and self._wait_build_end([ranbase_job], 1000)
if not status: logging.error('failure during build of ran-base')
self.cmd.run(f'oc logs {ranbase_job} &> cmake_targets/log/ran-base.log') # cannot use cmd.run because of redirect
# recover logs by mounting image
......@@ -392,7 +392,7 @@ class Cluster:
gnb_aw2s_job = self._start_build('oai-gnb-aw2s')
attemptedImages += ['oai-gnb-aw2s']
wait = enb_job is not None and gnb_job is not None and gnb_aw2s_job is not None and self._wait_build_end([enb_job, gnb_job, gnb_aw2s_job], 600)
wait = enb_job is not None and gnb_job is not None and gnb_aw2s_job is not None and self._wait_build_end([enb_job, gnb_job, gnb_aw2s_job], 800)
if not wait: logging.error('error during build of eNB/gNB')
status = status and wait
# recover logs
......@@ -421,7 +421,7 @@ class Cluster:
nrue_job = self._start_build('oai-nr-ue')
attemptedImages += ['oai-nr-ue']
wait = nr_cuup_job is not None and lteue_job is not None and nrue_job is not None and self._wait_build_end([nr_cuup_job, lteue_job, nrue_job], 600)
wait = nr_cuup_job is not None and lteue_job is not None and nrue_job is not None and self._wait_build_end([nr_cuup_job, lteue_job, nrue_job], 800)
if not wait: logging.error('error during build of nr-cuup/lteUE/nrUE')
status = status and wait
# recover logs
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment