From 236f7d56e5fccac73f39d5427df953b0d4ff2043 Mon Sep 17 00:00:00 2001 From: Laurent THOMAS <laurent.thomas@open-cells.com> Date: Tue, 10 May 2022 13:12:29 +0200 Subject: [PATCH] Remove UEid in the MAC After this commit, only the RNTI is used as an identifier in the MAC. Further, it removes some module_id, but a lot of them remain (the goal to remove all is still far from us) --- openair1/PHY/NR_TRANSPORT/nr_dlsch_coding.c | 2 +- openair1/PHY/NR_UE_TRANSPORT/nr_ulsch_ue.c | 2 +- openair1/SCHED_NR_UE/phy_procedures_nr_ue.c | 1 - openair1/SIMULATION/NR_PHY/dlsim.c | 17 +- openair2/F1AP/f1ap_du_ue_context_management.c | 9 +- openair2/LAYER2/NR_MAC_gNB/config.c | 43 +- openair2/LAYER2/NR_MAC_gNB/gNB_scheduler.c | 7 +- openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_RA.c | 58 +-- .../LAYER2/NR_MAC_gNB/gNB_scheduler_dlsch.c | 232 +++++----- .../LAYER2/NR_MAC_gNB/gNB_scheduler_phytest.c | 40 +- .../NR_MAC_gNB/gNB_scheduler_primitives.c | 339 +++++++------- .../LAYER2/NR_MAC_gNB/gNB_scheduler_srs.c | 27 +- .../LAYER2/NR_MAC_gNB/gNB_scheduler_uci.c | 289 ++++++------ .../LAYER2/NR_MAC_gNB/gNB_scheduler_ulsch.c | 438 +++++++++--------- openair2/LAYER2/NR_MAC_gNB/mac_proto.h | 24 +- openair2/LAYER2/NR_MAC_gNB/main.c | 84 ++-- openair2/LAYER2/NR_MAC_gNB/nr_mac_gNB.h | 35 +- openair2/NR_PHY_INTERFACE/NR_IF_Module.c | 3 - openair2/RRC/NR/rrc_gNB.c | 7 +- openair2/RRC/NR/rrc_gNB_nsa.c | 3 +- openair2/UTIL/OPT/opt.h | 5 +- openair2/UTIL/OPT/probe.c | 8 + 22 files changed, 823 insertions(+), 850 deletions(-) diff --git a/openair1/PHY/NR_TRANSPORT/nr_dlsch_coding.c b/openair1/PHY/NR_TRANSPORT/nr_dlsch_coding.c index 8e97a8cbdce..548cc6dbd42 100644 --- a/openair1/PHY/NR_TRANSPORT/nr_dlsch_coding.c +++ b/openair1/PHY/NR_TRANSPORT/nr_dlsch_coding.c @@ -310,7 +310,7 @@ int nr_dlsch_encoding(PHY_VARS_gNB *gNB, uint32_t A = rel15->TBSize[0]<<3; unsigned char *a=harq->pdu; if ( rel15->rnti != SI_RNTI) - trace_NRpdu(DIRECTION_DOWNLINK, a, rel15->TBSize[0], 0, WS_C_RNTI, rel15->rnti, frame, slot,0, 0); + trace_NRpdu(DIRECTION_DOWNLINK, a, rel15->TBSize[0], WS_C_RNTI, rel15->rnti, frame, slot,0, 0); NR_gNB_SCH_STATS_t *stats=NULL; int first_free=-1; diff --git a/openair1/PHY/NR_UE_TRANSPORT/nr_ulsch_ue.c b/openair1/PHY/NR_UE_TRANSPORT/nr_ulsch_ue.c index 450805a26cc..a07a19d085f 100644 --- a/openair1/PHY/NR_UE_TRANSPORT/nr_ulsch_ue.c +++ b/openair1/PHY/NR_UE_TRANSPORT/nr_ulsch_ue.c @@ -171,7 +171,7 @@ void nr_ue_ulsch_procedures(PHY_VARS_NR_UE *UE, trace_NRpdu(DIRECTION_UPLINK, harq_process_ul_ue->a, harq_process_ul_ue->pusch_pdu.pusch_data.tb_size, - 0, WS_C_RNTI, rnti, frame, slot, 0, 0); + WS_C_RNTI, rnti, frame, slot, 0, 0); if (nr_ulsch_encoding(UE, ulsch_ue, frame_parms, harq_pid, G) == -1) return; diff --git a/openair1/SCHED_NR_UE/phy_procedures_nr_ue.c b/openair1/SCHED_NR_UE/phy_procedures_nr_ue.c index 99731c8cb19..6ee9a95b59e 100644 --- a/openair1/SCHED_NR_UE/phy_procedures_nr_ue.c +++ b/openair1/SCHED_NR_UE/phy_procedures_nr_ue.c @@ -134,7 +134,6 @@ void nr_fill_rx_indication(fapi_nr_rx_indication_t *rx_ind, trace_NRpdu(DIRECTION_DOWNLINK, dlsch0->harq_processes[dlsch0->current_harq_pid]->b, dlsch0->harq_processes[dlsch0->current_harq_pid]->TBS / 8, - pdu_type, WS_C_RNTI, dlsch0->rnti, proc->frame_rx, diff --git a/openair1/SIMULATION/NR_PHY/dlsim.c b/openair1/SIMULATION/NR_PHY/dlsim.c index d7503ce7382..c804fc7173f 100644 --- a/openair1/SIMULATION/NR_PHY/dlsim.c +++ b/openair1/SIMULATION/NR_PHY/dlsim.c @@ -270,9 +270,9 @@ int g_mcsIndex = -1, g_mcsTableIdx = 0, g_rbStart = -1, g_rbSize = -1, g_nrOfLay void nr_dlsim_preprocessor(module_id_t module_id, frame_t frame, sub_frame_t slot) { - NR_UE_info_t *UE_info = &RC.nrmac[module_id]->UE_info; - AssertFatal(UE_info->num_UEs == 1, "can have only a single UE\n"); - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[0]; + NR_UE_info_t *UE_info = RC.nrmac[module_id]->UE_info.list[0]; + AssertFatal(RC.nrmac[module_id]->UE_info.list[1]==NULL, "can have only a single UE\n"); + NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl; NR_ServingCellConfigCommon_t *scc = RC.nrmac[0]->common_channels[0].ServingCellConfigCommon; /* manually set free CCE to 0 */ @@ -282,14 +282,14 @@ void nr_dlsim_preprocessor(module_id_t module_id, find_aggregation_candidates(&sched_ctrl->aggregation_level, &nr_of_candidates, sched_ctrl->search_space,4); - sched_ctrl->coreset = get_coreset(module_id, scc, sched_ctrl->active_bwp->bwp_Dedicated, sched_ctrl->search_space, target_ss); + sched_ctrl->coreset = get_coreset(RC.nrmac[module_id], scc, sched_ctrl->active_bwp->bwp_Dedicated, sched_ctrl->search_space, target_ss); sched_ctrl->cce_index = 0; NR_pdsch_semi_static_t *ps = &sched_ctrl->pdsch_semi_static; nr_set_pdsch_semi_static(NULL, scc, - UE_info->CellGroup[0], + UE_info->CellGroup, sched_ctrl->active_bwp, NULL, /* tda = */ 0, @@ -837,8 +837,7 @@ int main(int argc, char **argv) gNB_mac->pre_processor_dl = nr_dlsim_preprocessor; phy_init_nr_gNB(gNB,0,1); N_RB_DL = gNB->frame_parms.N_RB_DL; - NR_UE_info_t *UE_info = &RC.nrmac[0]->UE_info; - UE_info->num_UEs=1; + NR_UE_info_t *UE_info = RC.nrmac[0]->UE_info.list[0]; // stub to configure frame_parms // nr_phy_config_request_sim(gNB,N_RB_DL,N_RB_DL,mu,Nid_cell,SSB_positions); @@ -1098,10 +1097,10 @@ int main(int argc, char **argv) clear_nr_nfapi_information(RC.nrmac[0], 0, frame, slot); - UE_info->UE_sched_ctrl[0].harq_processes[harq_pid].ndi = !(trial&1); + UE_info->UE_sched_ctrl.harq_processes[harq_pid].ndi = !(trial&1); - UE_info->UE_sched_ctrl[0].harq_processes[harq_pid].round = round; + UE_info->UE_sched_ctrl.harq_processes[harq_pid].round = round; for (int i=0; i<MAX_NUM_CORESET; i++) gNB_mac->pdcch_cand[i] = 0; diff --git a/openair2/F1AP/f1ap_du_ue_context_management.c b/openair2/F1AP/f1ap_du_ue_context_management.c index 7b20bfb2aa5..e19f077592a 100644 --- a/openair2/F1AP/f1ap_du_ue_context_management.c +++ b/openair2/F1AP/f1ap_du_ue_context_management.c @@ -683,15 +683,15 @@ int DU_handle_UE_CONTEXT_RELEASE_COMMAND(instance_t instance, "RNTI obtained through DU ID (%x) is different from CU ID (%x)\n", rnti, ctxt.rnti); int UE_out_of_sync = 0; - if (RC.nrrrc && RC.nrrrc[instance]->node_type == ngran_gNB_DU) { - for (int n = 0; n < MAX_MOBILES_PER_GNB; ++n) { - if (RC.nrmac[instance]->UE_info.active[n] == TRUE - && rnti == RC.nrmac[instance]->UE_info.rnti[n]) { + UE_iterator(RC.nrmac[instance]->UE_info.list, UE) { + if (UE->rnti == rnti) { UE_out_of_sync = 0; break; } } + if (!UE) + LOG_E(F1AP,"Not found rnti: %x\n", rnti); } else { for (int n = 0; n < MAX_MOBILES_PER_ENB; ++n) { if (RC.mac[instance]->UE_info.active[n] == TRUE @@ -701,7 +701,6 @@ int DU_handle_UE_CONTEXT_RELEASE_COMMAND(instance_t instance, } } } - /* We don't need the Cause */ /* Optional RRC Container: if present, send to UE */ F1AP_FIND_PROTOCOLIE_BY_ID(F1AP_UEContextReleaseCommandIEs_t, ie, container, diff --git a/openair2/LAYER2/NR_MAC_gNB/config.c b/openair2/LAYER2/NR_MAC_gNB/config.c index b524f6ab4e9..0e55b519abb 100644 --- a/openair2/LAYER2/NR_MAC_gNB/config.c +++ b/openair2/LAYER2/NR_MAC_gNB/config.c @@ -455,18 +455,17 @@ int nr_mac_enable_ue_rrc_processing_timer(module_id_t Mod_idP, rnti_t rnti, NR_S if (rrc_reconfiguration_delay == 0) { return -1; } - const int UE_id = find_nr_UE_id(Mod_idP,rnti); - if (UE_id < 0) { + + NR_UE_info_t *UE_info = find_nr_UE(&RC.nrmac[Mod_idP]->UE_info,rnti); + if (!UE_info) { LOG_W(NR_MAC, "Could not find UE for RNTI 0x%04x\n", rnti); return -1; } - - NR_UE_info_t *UE_info = &RC.nrmac[Mod_idP]->UE_info; - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl; const uint16_t sf_ahead = 6/(0x01<<subcarrierSpacing) + ((6%(0x01<<subcarrierSpacing))>0); const uint16_t sl_ahead = sf_ahead * (0x01<<subcarrierSpacing); sched_ctrl->rrc_processing_timer = (rrc_reconfiguration_delay<<subcarrierSpacing) + sl_ahead; - LOG_I(NR_MAC, "Activating RRC processing timer for UE %d with %d ms\n", UE_id, rrc_reconfiguration_delay); + LOG_I(NR_MAC, "Activating RRC processing timer for UE %04x with %d ms\n", UE_info->rnti, rrc_reconfiguration_delay); return 0; } @@ -593,11 +592,15 @@ int rrc_mac_config_req_gNB(module_id_t Mod_idP, if(CellGroup->spCellConfig && CellGroup->spCellConfig->spCellConfigDedicated) servingCellConfig = CellGroup->spCellConfig->spCellConfigDedicated; - NR_UE_info_t *UE_info = &RC.nrmac[Mod_idP]->UE_info; if (add_ue == 1 && get_softmodem_params()->phy_test) { - const int UE_id = add_new_nr_ue(Mod_idP, rnti, CellGroup); - LOG_I(NR_MAC,"Added new UE_id %d/%x with initial CellGroup\n",UE_id,rnti); - process_CellGroup(CellGroup,&UE_info->UE_sched_ctrl[UE_id]); + NR_UE_info_t* UE = add_new_nr_ue(RC.nrmac[Mod_idP], rnti, CellGroup); + if (UE) + LOG_I(NR_MAC,"Added new UE %x with initial CellGroup\n", rnti); + else { + LOG_E(NR_MAC,"Error adding UE %04x\n", rnti); + return -1; + } + process_CellGroup(CellGroup,&UE->UE_sched_ctrl); } else if (add_ue == 1 && !get_softmodem_params()->phy_test) { const int CC_id = 0; NR_COMMON_channels_t *cc = &RC.nrmac[Mod_idP]->common_channels[CC_id]; @@ -644,12 +647,18 @@ int rrc_mac_config_req_gNB(module_id_t Mod_idP, LOG_I(NR_MAC,"Added new RA process for UE RNTI %04x with initial CellGroup\n", rnti); } else { // CellGroup has been updated NR_ServingCellConfigCommon_t *scc = RC.nrmac[Mod_idP]->common_channels[0].ServingCellConfigCommon; - const int UE_id = find_nr_UE_id(Mod_idP,rnti); + NR_UE_info_t * UE = find_nr_UE(&RC.nrmac[Mod_idP]->UE_info,rnti); + if (!UE) { + LOG_E(NR_MAC, "Can't find UE %04x\n", rnti); + return -1; + } int target_ss; - UE_info->CellGroup[UE_id] = CellGroup; - LOG_I(NR_MAC,"Modified UE_id %d/%x with CellGroup\n",UE_id,rnti); - process_CellGroup(CellGroup,&UE_info->UE_sched_ctrl[UE_id]); - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + + UE->CellGroup = CellGroup; + LOG_I(NR_MAC,"Modified rnti %04x with CellGroup\n",rnti); + process_CellGroup(CellGroup,&UE->UE_sched_ctrl); + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; + const NR_PDSCH_ServingCellConfig_t *pdsch = servingCellConfig ? servingCellConfig->pdsch_ServingCellConfig->choice.setup : NULL; if (get_softmodem_params()->sa) { // add all available DL HARQ processes for this UE in SA @@ -672,7 +681,7 @@ int rrc_mac_config_req_gNB(module_id_t Mod_idP, genericParameters = &scc->downlinkConfigCommon->initialDownlinkBWP->genericParameters; } sched_ctrl->search_space = get_searchspace(sib1 ? sib1->message.choice.c1->choice.systemInformationBlockType1 : NULL, scc, bwpd, target_ss); - sched_ctrl->coreset = get_coreset(Mod_idP, scc, bwpd, sched_ctrl->search_space, target_ss); + sched_ctrl->coreset = get_coreset(RC.nrmac[Mod_idP], scc, bwpd, sched_ctrl->search_space, target_ss); sched_ctrl->sched_pdcch = set_pdcch_structure(RC.nrmac[Mod_idP], sched_ctrl->search_space, sched_ctrl->coreset, @@ -685,7 +694,7 @@ int rrc_mac_config_req_gNB(module_id_t Mod_idP, CellGroup->spCellConfig->spCellConfigDedicated->csi_MeasConfig && CellGroup->spCellConfig->spCellConfigDedicated->csi_MeasConfig->choice.setup ) - compute_csi_bitlen (CellGroup->spCellConfig->spCellConfigDedicated->csi_MeasConfig->choice.setup, UE_info, UE_id, Mod_idP); + compute_csi_bitlen (CellGroup->spCellConfig->spCellConfigDedicated->csi_MeasConfig->choice.setup, UE); } } VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_RRC_MAC_CONFIG, VCD_FUNCTION_OUT); diff --git a/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler.c b/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler.c index 92937392357..198e08610ba 100644 --- a/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler.c +++ b/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler.c @@ -218,7 +218,7 @@ void schedule_nr_SRS(module_id_t module_idP, frame_t frameP, sub_frame_t subfram if ((1 << tmp) & deltaTSFC) { // This is an SRS subframe, loop over UEs - for (UE_id = 0; UE_id < MAX_MOBILES_PER_GNB; UE_id++) { + UEs_iterator() { if (!RC.nrmac[module_idP]->UE_info.active[UE_id]) continue; ul_req = &RC.nrmac[module_idP]->UL_req[CC_id].ul_config_request_body; // drop the allocation if the UE hasn't send RRCConnectionSetupComplete yet @@ -372,7 +372,6 @@ void gNB_dlsch_ulsch_scheduler(module_id_t module_idP, schedule_nr_prach(module_idP, f, s); } - // Schedule CSI-RS transmission nr_csirs_scheduling(module_idP, frame, slot, nr_slots_per_frame[*scc->ssbSubcarrierSpacing]); @@ -398,10 +397,10 @@ void gNB_dlsch_ulsch_scheduler(module_id_t module_idP, nr_schedule_ue_spec(module_idP, frame, slot); stop_meas(&gNB->schedule_dlsch); - nr_schedule_pucch(module_idP, frame, slot); + nr_schedule_pucch(RC.nrmac[module_idP], frame, slot); // This schedule SR after PUCCH for multiplexing - nr_sr_reporting(module_idP, frame, slot); + nr_sr_reporting(RC.nrmac[module_idP], frame, slot); stop_meas(&RC.nrmac[module_idP]->eNB_scheduler); diff --git a/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_RA.c b/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_RA.c index 19db162d96a..302685bfaf7 100644 --- a/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_RA.c +++ b/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_RA.c @@ -539,8 +539,10 @@ void nr_initiate_ra_proc(module_id_t module_idP, for (int i = 0; i < NR_NB_RA_PROC_MAX; i++) { NR_RA_t *ra = &cc->ra[i]; pr_found = 0; - const int UE_id = find_nr_UE_id(module_idP, ra->rnti); - if (UE_id != -1) { + const NR_UE_info_t * UE = find_nr_UE(&nr_mac->UE_info, ra->rnti); + if (UE) { + // the UE is already registered + LOG_W(NR_MAC, "Received RA for existing RNTI %04x\n", ra->rnti); continue; } if (ra->state == RA_IDLE) { @@ -634,7 +636,7 @@ void nr_initiate_ra_proc(module_id_t module_idP, AssertFatal(ra->ra_ss!=NULL,"SearchSpace cannot be null for RA\n"); - ra->coreset = get_coreset(module_idP, scc, bwp, ra->ra_ss, NR_SearchSpace__searchSpaceType_PR_common); + ra->coreset = get_coreset(nr_mac, scc, bwp, ra->ra_ss, NR_SearchSpace__searchSpaceType_PR_common); ra->sched_pdcch = set_pdcch_structure(nr_mac, ra->ra_ss, ra->coreset, @@ -669,7 +671,7 @@ void nr_initiate_ra_proc(module_id_t module_idP, ra->rnti = (taus() % 65518) + 1; loop++; } while (loop != 100 - && !((find_nr_UE_id(module_idP, ra->rnti) == -1) && (find_nr_RA_id(module_idP, CC_id, ra->rnti) == -1) + && !((find_nr_UE(&nr_mac->UE_info, ra->rnti) == NULL) && (find_nr_RA_id(module_idP, CC_id, ra->rnti) == -1) && ra->rnti >= 1 && ra->rnti <= 65519)); if (loop == 100) { LOG_E(NR_MAC, "%s:%d:%s: [RAPROC] initialisation random access aborted\n", __FILE__, __LINE__, __FUNCTION__); @@ -682,7 +684,7 @@ void nr_initiate_ra_proc(module_id_t module_idP, ra->beam_id = beam_index; LOG_I(NR_MAC, - "[gNB %d][RAPROC] CC_id %d Frame %d Activating Msg2 generation in frame %d, slot %d using RA rnti %x SSB " + "[gNB %d][RAPROC] CC_id %d Frame %d Activating Msg2 generation in frame %d, slot %d using RA rnti %x SSB, new rnti %04x " "index %u RA index %d\n", module_idP, CC_id, @@ -690,6 +692,7 @@ void nr_initiate_ra_proc(module_id_t module_idP, ra->Msg2_frame, ra->Msg2_slot, ra->RA_rnti, + ra->rnti, cc->ssb_index[beam_index], i); @@ -1517,7 +1520,7 @@ void nr_generate_Msg2(module_id_t module_idP, int CC_id, frame_t frameP, sub_fra } ra->state = WAIT_Msg3; - LOG_D(NR_MAC,"[gNB %d][RAPROC] Frame %d, Subframe %d: RA state %d\n", module_idP, frameP, slotP, ra->state); + LOG_W(NR_MAC,"[gNB %d][RAPROC] Frame %d, Subframe %d: rnti %04x RA state %d\n", module_idP, frameP, slotP, ra->rnti, ra->state); } } @@ -1560,9 +1563,14 @@ void nr_generate_Msg4(module_id_t module_idP, int CC_id, frame_t frameP, sub_fra ra->rnti = ra->crnti; } - int UE_id = find_nr_UE_id(module_idP, ra->rnti); - NR_UE_info_t *UE_info = &nr_mac->UE_info; - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + NR_UE_info_t * UE = find_nr_UE(&nr_mac->UE_info, ra->rnti); + if (!UE) { + LOG_E(NR_MAC,"want to generate Msg4, but rnti %04x not in the table\n", ra->rnti); + return; + } + + LOG_I(NR_MAC,"Generate msg4, rnti: %04x\n", ra->rnti); + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; NR_BWP_t *genericParameters = bwp ? & bwp->bwp_Common->genericParameters : &scc->downlinkConfigCommon->initialDownlinkBWP->genericParameters; @@ -1595,7 +1603,7 @@ void nr_generate_Msg4(module_id_t module_idP, int CC_id, frame_t frameP, sub_fra // Remove UE associated to TC-RNTI if(harq->round==0 && ra->msg3_dcch_dtch) { - mac_remove_nr_ue(module_idP, tc_rnti); + mac_remove_nr_ue(nr_mac, tc_rnti); } // get CCEindex, needed also for PUCCH and then later for PDCCH @@ -1626,7 +1634,7 @@ void nr_generate_Msg4(module_id_t module_idP, int CC_id, frame_t frameP, sub_fra LOG_D(NR_MAC,"[RAPROC] Msg4 r_pucch %d (CCEIndex %d, nb_of_candidates %d, delta_PRI %d)\n", r_pucch, CCEIndex, nr_of_candidates, delta_PRI); - int alloc = nr_acknack_scheduling(module_idP, UE_id, frameP, slotP, r_pucch, 1); + int alloc = nr_acknack_scheduling(module_idP, UE, frameP, slotP, r_pucch, 1); AssertFatal(alloc>=0,"Couldn't find a pucch allocation for ack nack (msg4)\n"); NR_sched_pucch_t *pucch = &sched_ctrl->sched_pucch[alloc]; harq->feedback_slot = pucch->ul_slot; @@ -1942,10 +1950,9 @@ void nr_generate_Msg4(module_id_t module_idP, int CC_id, frame_t frameP, sub_fra if(ra->msg3_dcch_dtch) { // If the UE used MSG3 to transfer a DCCH or DTCH message, then contention resolution is successful upon transmission of PDCCH - LOG_I(NR_MAC, "(ue %i, rnti 0x%04x) CBRA procedure succeeded!\n", UE_id, ra->rnti); + LOG_I(NR_MAC, "(ue rnti 0x%04x) CBRA procedure succeeded!\n", ra->rnti); nr_clear_ra_proc(module_idP, CC_id, frameP, ra); - UE_info->active[UE_id] = true; - UE_info->Msg4_ACKed[UE_id] = true; + UE->Msg4_ACKed = true; remove_front_nr_list(&sched_ctrl->feedback_dl_harq); harq->feedback_slot = -1; @@ -1962,28 +1969,27 @@ void nr_generate_Msg4(module_id_t module_idP, int CC_id, frame_t frameP, sub_fra void nr_check_Msg4_Ack(module_id_t module_id, int CC_id, frame_t frame, sub_frame_t slot, NR_RA_t *ra) { - int UE_id = find_nr_UE_id(module_id, ra->rnti); + NR_UE_info_t * UE = find_nr_UE(&RC.nrmac[module_id]->UE_info, ra->rnti); const int current_harq_pid = ra->harq_pid; - NR_UE_info_t *UE_info = &RC.nrmac[module_id]->UE_info; - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; NR_UE_harq_t *harq = &sched_ctrl->harq_processes[current_harq_pid]; - NR_mac_stats_t *stats = &UE_info->mac_stats[UE_id]; + NR_mac_stats_t *stats = &UE->mac_stats; - LOG_D(NR_MAC, "ue %d, rnti 0x%04x, harq is waiting %d, round %d, frame %d %d, harq id %d\n", UE_id, ra->rnti, harq->is_waiting, harq->round, frame, slot, current_harq_pid); + LOG_D(NR_MAC, "ue rnti 0x%04x, harq is waiting %d, round %d, frame %d %d, harq id %d\n", ra->rnti, harq->is_waiting, harq->round, frame, slot, current_harq_pid); if (harq->is_waiting == 0) { if (harq->round == 0) { + if (stats->dl.errors == 0) { - LOG_A(NR_MAC, "(ue %i, rnti 0x%04x) Received Ack of RA-Msg4. CBRA procedure succeeded!\n", UE_id, ra->rnti); - UE_info->active[UE_id] = true; - UE_info->Msg4_ACKed[UE_id] = true; + LOG_A(NR_MAC, "(UE RNTI 0x%04x) Received Ack of RA-Msg4. CBRA procedure succeeded!\n", ra->rnti); + UE->Msg4_ACKed = true; // Pause scheduling according to: // 3GPP TS 38.331 Section 12 Table 12.1-1: UE performance requirements for RRC procedures for UEs const NR_COMMON_channels_t *common_channels = &RC.nrmac[module_id]->common_channels[0]; const NR_SIB1_t *sib1 = common_channels->sib1 ? common_channels->sib1->message.choice.c1->choice.systemInformationBlockType1 : NULL; - const NR_ServingCellConfig_t *servingCellConfig = UE_info->CellGroup[UE_id] ? UE_info->CellGroup[UE_id]->spCellConfig->spCellConfigDedicated : NULL; + const NR_ServingCellConfig_t *servingCellConfig = UE->CellGroup ? UE->CellGroup->spCellConfig->spCellConfigDedicated : NULL; NR_BWP_t *genericParameters = get_dl_bwp_genericParameters(sched_ctrl->active_bwp, common_channels->ServingCellConfigCommon, sib1); @@ -1991,9 +1997,9 @@ void nr_check_Msg4_Ack(module_id_t module_id, int CC_id, frame_t frame, sub_fram NR_RRC_SETUP_DELAY_MS + NR_RRC_BWP_SWITCHING_DELAY_MS : NR_RRC_SETUP_DELAY_MS; sched_ctrl->rrc_processing_timer = (delay_ms << genericParameters->subcarrierSpacing); - LOG_I(NR_MAC, "(%d.%d) Activating RRC processing timer for UE %d with %d ms\n", frame, slot, UE_id, delay_ms); + LOG_I(NR_MAC, "(%d.%d) Activating RRC processing timer for UE %04x with %d ms\n", frame, slot, UE->rnti, delay_ms); } else { - LOG_I(NR_MAC, "(ue %i, rnti 0x%04x) RA Procedure failed at Msg4!\n", UE_id, ra->rnti); + LOG_I(NR_MAC, "(ue rnti 0x%04x) RA Procedure failed at Msg4!\n", ra->rnti); } nr_clear_ra_proc(module_id, CC_id, frame, ra); @@ -2001,7 +2007,7 @@ void nr_check_Msg4_Ack(module_id_t module_id, int CC_id, frame_t frame, sub_fram remove_nr_list(&sched_ctrl->retrans_dl_harq, current_harq_pid); } } else { - LOG_D(NR_MAC, "(ue %i, rnti 0x%04x) Received Nack of RA-Msg4. Preparing retransmission!\n", UE_id, ra->rnti); + LOG_I(NR_MAC, "(UE %04x) Received Nack of RA-Msg4. Preparing retransmission!\n", ra->rnti); ra->Msg4_frame = (frame + 1) % 1024; ra->Msg4_slot = 1; ra->state = Msg4; diff --git a/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_dlsch.c b/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_dlsch.c index 4fc4eae1a27..2e983e06641 100644 --- a/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_dlsch.c +++ b/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_dlsch.c @@ -317,10 +317,9 @@ int nr_write_ce_dlsch_pdu(module_id_t module_idP, void nr_store_dlsch_buffer(module_id_t module_id, frame_t frame, sub_frame_t slot) { - NR_UE_info_t *UE_info = &RC.nrmac[module_id]->UE_info; - for (int UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) { - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + UE_iterator(RC.nrmac[module_id]->UE_info.list, UE) { + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; sched_ctrl->num_total_bytes = 0; sched_ctrl->dl_pdus_total = 0; @@ -328,13 +327,11 @@ void nr_store_dlsch_buffer(module_id_t module_id, // Note: DL_SCH_LCID_DCCH, DL_SCH_LCID_DCCH1, DL_SCH_LCID_DTCH for (int i = 0; i < sched_ctrl->dl_lc_num; ++i) { const int lcid = sched_ctrl->dl_lc_ids[i]; - const uint16_t rnti = UE_info->rnti[UE_id]; - LOG_D(NR_MAC, "In %s: UE %d/%x: LCID %d\n", __FUNCTION__, UE_id, rnti, lcid); - + const uint16_t rnti = UE->rnti; + LOG_D(NR_MAC, "In %s: UE %x: LCID %d\n", __FUNCTION__, rnti, lcid); if (lcid == DL_SCH_LCID_DTCH && sched_ctrl->rrc_processing_timer > 0) { continue; } - start_meas(&RC.nrmac[module_id]->rlc_status_ind); sched_ctrl->rlc_status[lcid] = mac_rlc_status_ind(module_id, rnti, @@ -360,7 +357,7 @@ void nr_store_dlsch_buffer(module_id_t module_id, slot, lcid < 4 ? "DCCH":"DTCH", lcid, - UE_id, + UE->rnti, sched_ctrl->rlc_status[lcid].bytes_in_buffer, sched_ctrl->num_total_bytes, sched_ctrl->dl_pdus_total, @@ -374,15 +371,14 @@ bool allocate_dl_retransmission(module_id_t module_id, sub_frame_t slot, uint16_t *rballoc_mask, int *n_rb_sched, - int UE_id, + NR_UE_info_t * UE, int current_harq_pid) { gNB_MAC_INST *nr_mac = RC.nrmac[module_id]; const NR_ServingCellConfigCommon_t *scc = nr_mac->common_channels->ServingCellConfigCommon; - NR_UE_info_t *UE_info = &nr_mac->UE_info; - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; NR_sched_pdsch_t *retInfo = &sched_ctrl->harq_processes[current_harq_pid].sched_pdsch; - NR_CellGroupConfig_t *cg = UE_info->CellGroup[UE_id]; + NR_CellGroupConfig_t *cg = UE->CellGroup; NR_BWP_DownlinkDedicated_t *bwpd = cg && @@ -422,7 +418,7 @@ bool allocate_dl_retransmission(module_id_t module_id, rbStart++; if (rbStart >= bwpSize) { - LOG_D(NR_MAC, "cannot allocate retransmission for UE %d/RNTI %04x: no resources\n", UE_id, UE_info->rnti[UE_id]); + LOG_D(NR_MAC, "cannot allocate retransmission for RNTI %04x: no resources\n", UE->rnti); return false; } @@ -495,7 +491,7 @@ bool allocate_dl_retransmission(module_id_t module_id, /* Find a free CCE */ const int cid = sched_ctrl->coreset->controlResourceSetId; - const uint16_t Y = get_Y(cid%3, slot, UE_info->rnti[UE_id]); + const uint16_t Y = get_Y(cid%3, slot, UE->rnti); uint8_t nr_of_candidates; for (int i=0; i<5; i++) { @@ -517,8 +513,8 @@ bool allocate_dl_retransmission(module_id_t module_id, Y); if (CCEIndex<0) { - LOG_D(MAC, "%4d.%2d could not find CCE for DL DCI retransmission UE %d/RNTI %04x\n", - frame, slot, UE_id, UE_info->rnti[UE_id]); + LOG_D(MAC, "%4d.%2d could not find CCE for DL DCI retransmission RNTI %04x\n", + frame, slot, UE->rnti); return false; } @@ -526,12 +522,11 @@ bool allocate_dl_retransmission(module_id_t module_id, * allocation after CCE alloc fail would be more complex) */ int r_pucch = nr_get_pucch_resource(sched_ctrl->coreset, sched_ctrl->active_ubwp, ubwpd, CCEIndex); - const int alloc = nr_acknack_scheduling(module_id, UE_id, frame, slot, r_pucch, 0); + const int alloc = nr_acknack_scheduling(module_id, UE, frame, slot, r_pucch, 0); if (alloc<0) { LOG_D(MAC, - "could not find PUCCH for UE %d/%04x@%d.%d\n", - UE_id, - UE_info->rnti[UE_id], + "could not find PUCCH for UE %04x@%d.%d\n", + UE->rnti, frame, slot); RC.nrmac[module_id]->pdcch_cand[cid]--; @@ -557,48 +552,54 @@ bool allocate_dl_retransmission(module_id_t module_id, return true; } -float thr_ue[MAX_MOBILES_PER_GNB]; uint32_t pf_tbs[3][29]; // pre-computed, approximate TBS values for PF coefficient +typedef struct UEsched_s { + float coef; + NR_UE_info_t * UE; +} UEsched_t; + +static int comparator(const void *p, const void *q) { + return ((UEsched_t*)p)->coef < ((UEsched_t*)q)->coef; +} void pf_dl(module_id_t module_id, frame_t frame, sub_frame_t slot, - NR_list_t *UE_list, + NR_UE_info_t **UE_list, int max_num_ue, int n_rb_sched, uint16_t *rballoc_mask) { gNB_MAC_INST *mac = RC.nrmac[module_id]; - NR_UE_info_t *UE_info = &mac->UE_info; NR_ServingCellConfigCommon_t *scc=mac->common_channels[0].ServingCellConfigCommon; - float coeff_ue[MAX_MOBILES_PER_GNB]; // UEs that could be scheduled - int ue_array[MAX_MOBILES_PER_GNB]; - int layers[MAX_MOBILES_PER_GNB]; - NR_list_t UE_sched = { .head = -1, .next = ue_array, .tail = -1, .len = MAX_MOBILES_PER_GNB }; + UEsched_t UE_sched[MAX_MOBILES_PER_GNB] = {0}; + int remainUEs = max_num_ue; + int curUE = 0; /* Loop UE_info->list to check retransmission */ - for (int UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { - if (UE_info->Msg4_ACKed[UE_id] != true) continue; + UE_iterator(UE_list, UE) { + if (UE->Msg4_ACKed != true) + continue; - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; if (sched_ctrl->ul_failure==1 && get_softmodem_params()->phy_test==0) continue; - const NR_mac_dir_stats_t *stats = &UE_info->mac_stats[UE_id].dl; + const NR_mac_dir_stats_t *stats = &UE->mac_stats.dl; NR_sched_pdsch_t *sched_pdsch = &sched_ctrl->sched_pdsch; NR_pdsch_semi_static_t *ps = &sched_ctrl->pdsch_semi_static; /* get the PID of a HARQ process awaiting retrnasmission, or -1 otherwise */ sched_pdsch->dl_harq_pid = sched_ctrl->retrans_dl_harq.head; - layers[UE_id] = ps->nrOfLayers; // initialization of layers to the previous value in the strcuture + UE->layers = ps->nrOfLayers; // initialization of layers to the previous value in the strcuture /* Calculate Throughput */ const float a = 0.0005f; // corresponds to 200ms window - const uint32_t b = stats->current_bytes; - thr_ue[UE_id] = (1 - a) * thr_ue[UE_id] + a * b; + const uint32_t b = UE->mac_stats.dl.current_bytes; + UE->dl_thr_ue = (1 - a) * UE->dl_thr_ue + a * b; /* retransmission */ if (sched_pdsch->dl_harq_pid >= 0) { /* Allocate retransmission */ - bool r = allocate_dl_retransmission(module_id, frame, slot, rballoc_mask, &n_rb_sched, UE_id, sched_pdsch->dl_harq_pid); + bool r = allocate_dl_retransmission(module_id, frame, slot, rballoc_mask, &n_rb_sched, UE, sched_pdsch->dl_harq_pid); if (!r) { LOG_D(NR_MAC, "%4d.%2d retransmission can NOT be allocated\n", frame, slot); @@ -606,9 +607,12 @@ void pf_dl(module_id_t module_id, } /* reduce max_num_ue once we are sure UE can be allocated, i.e., has CCE */ - max_num_ue--; + remainUEs--; - if (max_num_ue < 0) return; + if (remainUEs == 0) + // we have filled all with mandatory retransmissions + // no need to schedule new transmissions + return; } else { /* Check DL buffer and skip this UE if no bytes and no TA necessary */ if (sched_ctrl->num_total_bytes == 0 && frame != (sched_ctrl->ta_frame + 10) % 1024) @@ -619,7 +623,7 @@ void pf_dl(module_id_t module_id, const int max_mcs_table = ps->mcsTableIdx == 1 ? 27 : 28; const int max_mcs = min(sched_ctrl->dl_max_mcs, max_mcs_table); sched_pdsch->mcs = get_mcs_from_bler(bo, stats, &sched_ctrl->dl_bler_stats, max_mcs, frame); - layers[UE_id] = set_dl_nrOfLayers(sched_ctrl); + UE->layers = set_dl_nrOfLayers(sched_ctrl); const uint8_t Qm = nr_get_Qm_dl(sched_pdsch->mcs, ps->mcsTableIdx); const uint16_t R = nr_get_code_rate_dl(sched_pdsch->mcs, ps->mcsTableIdx); uint32_t tbs = nr_compute_tbs(Qm, @@ -629,40 +633,26 @@ void pf_dl(module_id_t module_id, 0, /* N_PRB_DMRS * N_DMRS_SLOT */ 0 /* N_PRB_oh, 0 for initialBWP */, 0 /* tb_scaling */, - layers[UE_id]) >> 3; - coeff_ue[UE_id] = (float) tbs / thr_ue[UE_id]; - LOG_D(NR_MAC,"b %d, thr_ue[%d] %f, tbs %d, coeff_ue[%d] %f\n", - b, UE_id, thr_ue[UE_id], tbs, UE_id, coeff_ue[UE_id]); + UE->layers) >> 3; + float coeff_ue = (float) tbs / UE->dl_thr_ue; + LOG_D(NR_MAC,"UE %04x b %d, thr_ue %f, tbs %d, coeff_ue %f\n", + UE->rnti, b, UE->dl_thr_ue, tbs, coeff_ue); /* Create UE_sched list for UEs eligible for new transmission*/ - add_tail_nr_list(&UE_sched, UE_id); + UE_sched[curUE].coef=coeff_ue; + UE_sched[curUE].UE=UE; + curUE++; } } + qsort(UE_sched, sizeof(*UE_sched), sizeofArray(UE_sched), comparator); + UEsched_t *iterator = UE_sched; + const int min_rbSize = 5; /* Loop UE_sched to find max coeff and allocate transmission */ - while (max_num_ue > 0 && n_rb_sched >= min_rbSize && UE_sched.head >= 0) { - /* Find max coeff from UE_sched*/ - int *max = &UE_sched.head; /* assume head is max */ - int *p = &UE_sched.next[*max]; - - while (*p >= 0) { - /* if the current one has larger coeff, save for later */ - if (coeff_ue[*p] > coeff_ue[*max]) - max = p; - - p = &UE_sched.next[*p]; - } - - /* remove the max one: do not use remove_nr_list() it goes through the - * whole list every time. Note that UE_sched.tail might not be set - * correctly anymore */ - const int UE_id = *max; - p = &UE_sched.next[*max]; - *max = UE_sched.next[*max]; - *p = -1; - NR_CellGroupConfig_t *cg = UE_info->CellGroup[UE_id]; + while (remainUEs> 0 && n_rb_sched >= min_rbSize && iterator->UE != NULL) { + NR_CellGroupConfig_t *cg = iterator->UE->CellGroup; NR_BWP_DownlinkDedicated_t *bwpd = cg && @@ -677,26 +667,33 @@ void pf_dl(module_id_t module_id, cg->spCellConfig->spCellConfigDedicated->uplinkConfig ? cg->spCellConfig->spCellConfigDedicated->uplinkConfig->initialUplinkBWP : NULL; - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; - const uint16_t rnti = UE_info->rnti[UE_id]; - const NR_SIB1_t *sib1 = RC.nrmac[module_id]->common_channels[0].sib1 ? RC.nrmac[module_id]->common_channels[0].sib1->message.choice.c1->choice.systemInformationBlockType1 : NULL; + NR_UE_sched_ctrl_t *sched_ctrl = &iterator->UE->UE_sched_ctrl; + const uint16_t rnti = iterator->UE->rnti; + const NR_SIB1_t *sib1 = RC.nrmac[module_id]->common_channels[0].sib1 ? + RC.nrmac[module_id]->common_channels[0].sib1->message.choice.c1->choice.systemInformationBlockType1 : + NULL; NR_BWP_t *genericParameters = get_dl_bwp_genericParameters(sched_ctrl->active_bwp, RC.nrmac[module_id]->common_channels[0].ServingCellConfigCommon, sib1); - const int coresetid = (sched_ctrl->active_bwp||bwpd) ? sched_ctrl->coreset->controlResourceSetId : RC.nrmac[module_id]->sched_ctrlCommon->coreset->controlResourceSetId; - const uint16_t bwpSize = coresetid == 0 ? RC.nrmac[module_id]->cset0_bwp_size : NRRIV2BW(genericParameters->locationAndBandwidth, MAX_BWP_SIZE); + const int coresetid = (sched_ctrl->active_bwp||bwpd) ? + sched_ctrl->coreset->controlResourceSetId : + RC.nrmac[module_id]->sched_ctrlCommon->coreset->controlResourceSetId; + const uint16_t bwpSize = coresetid == 0 ? + RC.nrmac[module_id]->cset0_bwp_size : + NRRIV2BW(genericParameters->locationAndBandwidth, MAX_BWP_SIZE); int rbStart = 0; // start wrt BWPstart if (sched_ctrl->available_dl_harq.head < 0) { - LOG_D(MAC, "UE %d RNTI %04x has no free HARQ process, skipping\n", UE_id, UE_info->rnti[UE_id]); + LOG_D(MAC, "RNTI %04x has no free HARQ process, skipping\n", iterator->UE->rnti); + iterator++; continue; } /* Find a free CCE */ const int cid = sched_ctrl->coreset->controlResourceSetId; - const uint16_t Y = get_Y(cid%3, slot, UE_info->rnti[UE_id]); + const uint16_t Y = get_Y(cid%3, slot, iterator->UE->rnti); uint8_t nr_of_candidates; for (int i=0; i<5; i++) { @@ -718,7 +715,8 @@ void pf_dl(module_id_t module_id, Y); if (CCEIndex<0) { - LOG_D(NR_MAC, "%4d.%2d could not find CCE for DL DCI UE %d/RNTI %04x\n", frame, slot, UE_id, rnti); + LOG_D(NR_MAC, "%4d.%2d could not find CCE for DL DCI RNTI %04x\n", frame, slot, rnti); + iterator++; continue; } @@ -726,33 +724,25 @@ void pf_dl(module_id_t module_id, * allocation after CCE alloc fail would be more complex) */ int r_pucch = nr_get_pucch_resource(sched_ctrl->coreset, sched_ctrl->active_ubwp, ubwpd, CCEIndex); - const int alloc = nr_acknack_scheduling(module_id, UE_id, frame, slot, r_pucch, 0); + const int alloc = nr_acknack_scheduling(module_id, iterator->UE, frame, slot, r_pucch, 0); if (alloc<0) { LOG_D(NR_MAC, - "could not find PUCCH for UE %d/%04x@%d.%d\n", - UE_id, + "could not find PUCCH for %04x@%d.%d\n", rnti, frame, slot); mac->pdcch_cand[cid]--; + iterator++; continue; } - /* reduce max_num_ue once we are sure UE can be allocated, i.e., has CCE - * and PUCCH */ - max_num_ue--; - AssertFatal(max_num_ue >= 0, "Illegal max_num_ue %d\n", max_num_ue); sched_ctrl->cce_index = CCEIndex; fill_pdcch_vrb_map(mac, /* CC_id = */ 0, &sched_ctrl->sched_pdcch, CCEIndex, sched_ctrl->aggregation_level); - /* reduce max_num_ue once we are sure UE can be allocated, i.e., has CCE */ - max_num_ue--; - - if (max_num_ue < 0) return; /* MCS has been set above */ const int tda = get_dl_tda(RC.nrmac[module_id], scc, slot); @@ -760,14 +750,14 @@ void pf_dl(module_id_t module_id, NR_sched_pdsch_t *sched_pdsch = &sched_ctrl->sched_pdsch; NR_pdsch_semi_static_t *ps = &sched_ctrl->pdsch_semi_static; - if (ps->nrOfLayers != layers[UE_id] || ps->time_domain_allocation != tda) { + if (ps->nrOfLayers != iterator->UE->layers || ps->time_domain_allocation != tda ) { nr_set_pdsch_semi_static(sib1, scc, - UE_info->CellGroup[UE_id], + iterator->UE->CellGroup, sched_ctrl->active_bwp, bwpd, tda, - layers[UE_id], + iterator->UE->layers, sched_ctrl, ps); } @@ -812,21 +802,24 @@ void pf_dl(module_id_t module_id, for (int rb = 0; rb < sched_pdsch->rbSize; rb++) rballoc_mask[rb + sched_pdsch->rbStart] ^= slbitmap; + + remainUEs--; + iterator++; } } void nr_fr1_dlsch_preprocessor(module_id_t module_id, frame_t frame, sub_frame_t slot) { - NR_UE_info_t *UE_info = &RC.nrmac[module_id]->UE_info; + NR_UEs_t *UE_info = &RC.nrmac[module_id]->UE_info; - if (UE_info->num_UEs == 0) + if (UE_info->list[0] == NULL) return; NR_ServingCellConfigCommon_t *scc = RC.nrmac[module_id]->common_channels[0].ServingCellConfigCommon; const int CC_id = 0; /* Get bwpSize and TDAfrom the first UE */ /* This is temporary and it assumes all UEs have the same BWP and TDA*/ - int UE_id = UE_info->list.head; - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + NR_UE_info_t *UE=UE_info->list[0]; + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; const int tda = get_dl_tda(RC.nrmac[module_id], scc, slot); int startSymbolIndex, nrOfSymbols; const struct NR_PDSCH_TimeDomainResourceAllocationList *tdaList = sched_ctrl->active_bwp ? @@ -842,10 +835,10 @@ void nr_fr1_dlsch_preprocessor(module_id_t module_id, frame_t frame, sub_frame_t sib1); NR_BWP_DownlinkDedicated_t *bwpd = - UE_info->CellGroup[UE_id] && - UE_info->CellGroup[UE_id]->spCellConfig && - UE_info->CellGroup[UE_id]->spCellConfig->spCellConfigDedicated ? - UE_info->CellGroup[UE_id]->spCellConfig->spCellConfigDedicated->initialDownlinkBWP : NULL; + UE->CellGroup && + UE->CellGroup->spCellConfig && + UE->CellGroup->spCellConfig->spCellConfigDedicated ? + UE->CellGroup->spCellConfig->spCellConfigDedicated->initialDownlinkBWP : NULL; const int coresetid = (sched_ctrl->active_bwp||bwpd) ? sched_ctrl->coreset->controlResourceSetId : RC.nrmac[module_id]->sched_ctrlCommon->coreset->controlResourceSetId; @@ -874,7 +867,7 @@ void nr_fr1_dlsch_preprocessor(module_id_t module_id, frame_t frame, sub_frame_t pf_dl(module_id, frame, slot, - &UE_info->list, + UE_info->list, MAX_MOBILES_PER_GNB, n_rb_sched, rballoc_mask); @@ -921,18 +914,17 @@ void nr_schedule_ue_spec(module_id_t module_id, gNB_mac->pre_processor_dl(module_id, frame, slot); const int CC_id = 0; NR_ServingCellConfigCommon_t *scc = gNB_mac->common_channels[CC_id].ServingCellConfigCommon; - NR_UE_info_t *UE_info = &gNB_mac->UE_info; + NR_UEs_t *UE_info = &gNB_mac->UE_info; nfapi_nr_dl_tti_request_body_t *dl_req = &gNB_mac->DL_req[CC_id].dl_tti_request_body; - NR_list_t *UE_list = &UE_info->list; - for (int UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + UE_iterator(UE_info->list, UE) { + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; if (sched_ctrl->ul_failure==1 && get_softmodem_params()->phy_test==0) continue; NR_sched_pdsch_t *sched_pdsch = &sched_ctrl->sched_pdsch; - UE_info->mac_stats[UE_id].dl.current_bytes = 0; - NR_CellGroupConfig_t *cg = UE_info->CellGroup[UE_id]; + UE->mac_stats.dl.current_bytes = 0; + NR_CellGroupConfig_t *cg = UE->CellGroup; NR_BWP_DownlinkDedicated_t *bwpd = cg && @@ -946,13 +938,13 @@ void nr_schedule_ue_spec(module_id_t module_id, * If we add the CE, ta_apply will be reset */ if (frame == (sched_ctrl->ta_frame + 10) % 1024) { sched_ctrl->ta_apply = true; /* the timer is reset once TA CE is scheduled */ - LOG_D(NR_MAC, "[UE %d][%d.%d] UL timing alignment procedures: setting flag for Timing Advance command\n", UE_id, frame, slot); + LOG_D(NR_MAC, "[UE %04x][%d.%d] UL timing alignment procedures: setting flag for Timing Advance command\n", UE->rnti, frame, slot); } if (sched_pdsch->rbSize <= 0) continue; - const rnti_t rnti = UE_info->rnti[UE_id]; + const rnti_t rnti = UE->rnti; /* pre-computed PDSCH values that only change if time domain * allocation/DMRS parameters change. Updated in the preprocessor through * nr_set_pdsch_semi_static() */ @@ -968,8 +960,8 @@ void nr_schedule_ue_spec(module_id_t module_id, /* PP has not selected a specific HARQ Process, get a new one */ current_harq_pid = sched_ctrl->available_dl_harq.head; AssertFatal(current_harq_pid >= 0, - "no free HARQ process available for UE %d\n", - UE_id); + "no free HARQ process available for UE %04x\n", + UE->rnti); remove_front_nr_list(&sched_ctrl->available_dl_harq); sched_pdsch->dl_harq_pid = current_harq_pid; } else { @@ -989,12 +981,11 @@ void nr_schedule_ue_spec(module_id_t module_id, harq->feedback_frame = pucch->frame; harq->feedback_slot = pucch->ul_slot; harq->is_waiting = true; - UE_info->mac_stats[UE_id].dl.rounds[harq->round]++; + UE->mac_stats.dl.rounds[harq->round]++; LOG_D(NR_MAC, - "%4d.%2d [DLSCH/PDSCH/PUCCH] UE %d RNTI %04x DCI L %d start %3d RBs %3d startSymbol %2d nb_symbol %2d dmrspos %x MCS %2d nrOfLayers %d TBS %4d HARQ PID %2d round %d RV %d NDI %d dl_data_to_ULACK %d (%d.%d) PUCCH allocation %d TPC %d\n", + "%4d.%2d [DLSCH/PDSCH/PUCCH] RNTI %04x DCI L %d start %3d RBs %3d startSymbol %2d nb_symbol %2d dmrspos %x MCS %2d nrOfLayers %d TBS %4d HARQ PID %2d round %d RV %d NDI %d dl_data_to_ULACK %d (%d.%d) PUCCH allocation %d TPC %d\n", frame, slot, - UE_id, rnti, sched_ctrl->aggregation_level, sched_pdsch->rbStart, @@ -1037,7 +1028,7 @@ void nr_schedule_ue_spec(module_id_t module_id, dl_tti_pdcch_pdu->PDUSize = (uint8_t)(2+sizeof(nfapi_nr_dl_tti_pdcch_pdu)); dl_req->nPDUs += 1; pdcch_pdu = &dl_tti_pdcch_pdu->pdcch_pdu.pdcch_pdu_rel15; - LOG_D(NR_MAC,"Trying to configure DL pdcch for UE %d, bwp %d, cs %d\n",UE_id,bwp_id,coresetid); + LOG_D(NR_MAC,"Trying to configure DL pdcch for UE %04x, bwp %d, cs %d\n", UE->rnti, bwp_id, coresetid); NR_ControlResourceSet_t *coreset = (bwp||bwpd)? sched_ctrl->coreset:gNB_mac->sched_ctrlCommon->coreset; nr_configure_pdcch(pdcch_pdu, coreset, genericParameters, &sched_ctrl->sched_pdcch); gNB_mac->pdcch_pdu_idx[CC_id][coresetid] = pdcch_pdu; @@ -1099,8 +1090,8 @@ void nr_schedule_ue_spec(module_id_t module_id, pdsch_pdu->NrOfSymbols = ps->nrOfSymbols; // Precoding if (sched_ctrl->set_pmi) { - int report_id = sched_ctrl->CSI_report.cri_ri_li_pmi_cqi_report.csi_report_id; - nr_csi_report_t *csi_report = &UE_info->csi_report_template[UE_id][report_id]; + const int report_id = sched_ctrl->CSI_report.cri_ri_li_pmi_cqi_report.csi_report_id; + nr_csi_report_t *csi_report = &UE->csi_report_template[report_id]; pdsch_pdu->precodingAndBeamforming.prg_size = pdsch_pdu->rbSize; pdsch_pdu->precodingAndBeamforming.prgs_list[0].pm_idx = set_pm_index(sched_ctrl, nrOfLayers, @@ -1241,17 +1232,16 @@ void nr_schedule_ue_spec(module_id_t module_id, * from RLC or encode MAC CEs. The TX_req structure is filled below * or copy data to FAPI structures */ LOG_D(NR_MAC, - "%d.%2d DL retransmission UE %d/RNTI %04x HARQ PID %d round %d NDI %d\n", + "%d.%2d DL retransmission RNTI %04x HARQ PID %d round %d NDI %d\n", frame, slot, - UE_id, rnti, current_harq_pid, harq->round, harq->ndi); AssertFatal(harq->sched_pdsch.tb_size == TBS, - "UE %d mismatch between scheduled TBS and buffered TB for HARQ PID %d\n", - UE_id, + "UE %04x mismatch between scheduled TBS and buffered TB for HARQ PID %d\n", + UE->rnti, current_harq_pid); T(T_GNB_MAC_RETRANSMISSION_DL_PDU_WITH_DATA, T_INT(module_id), T_INT(CC_id), T_INT(rnti), T_INT(frame), T_INT(slot), T_INT(current_harq_pid), T_INT(harq->round), T_BUFFER(harq->transportBlock, TBS)); @@ -1323,7 +1313,7 @@ void nr_schedule_ue_spec(module_id_t module_id, lcid_bytes += len; } - UE_info->mac_stats[UE_id].dl.lc_bytes[lcid] += lcid_bytes; + UE->mac_stats.dl.lc_bytes[lcid] += lcid_bytes; } } else if (get_softmodem_params()->phy_test || get_softmodem_params()->do_ra) { /* we will need the large header, phy-test typically allocates all @@ -1361,10 +1351,8 @@ void nr_schedule_ue_spec(module_id_t module_id, buf=bufEnd; } - NR_mac_stats_t *mac_stats = &UE_info->mac_stats[UE_id]; - mac_stats->dl.total_bytes += TBS; - mac_stats->dl.current_bytes = TBS; - + UE->mac_stats.dl.total_bytes += TBS; + UE->mac_stats.dl.current_bytes = TBS; /* save retransmission information */ harq->sched_pdsch = *sched_pdsch; /* save which time allocation has been used, to be used on @@ -1376,10 +1364,10 @@ void nr_schedule_ue_spec(module_id_t module_id, sched_ctrl->ta_apply = false; sched_ctrl->ta_frame = frame; LOG_D(NR_MAC, - "%d.%2d UE %d TA scheduled, resetting TA frame\n", + "%d.%2d UE %04x TA scheduled, resetting TA frame\n", frame, slot, - UE_id); + UE->rnti); } T(T_GNB_MAC_DL_PDU_WITH_DATA, T_INT(module_id), T_INT(CC_id), T_INT(rnti), diff --git a/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_phytest.c b/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_phytest.c index 8cca07a51a9..8a0e966a8ad 100644 --- a/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_phytest.c +++ b/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_phytest.c @@ -69,7 +69,6 @@ void nr_schedule_css_dlsch_phytest(module_id_t module_idP, nfapi_nr_pdu_t *TX_req; uint16_t rnti = 0x1234; - // int time_domain_assignment,k0; NR_ServingCellConfigCommon_t *scc=cc->ServingCellConfigCommon; @@ -266,21 +265,16 @@ void nr_preprocessor_phytest(module_id_t module_id, { if (!is_xlsch_in_slot(dlsch_slot_bitmap, slot)) return; - NR_UE_info_t *UE_info = &RC.nrmac[module_id]->UE_info; + NR_UE_info_t *UE = RC.nrmac[module_id]->UE_info.list[0]; NR_ServingCellConfigCommon_t *scc = RC.nrmac[module_id]->common_channels[0].ServingCellConfigCommon; - const int UE_id = 0; + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; const int CC_id = 0; - AssertFatal(UE_info->active[UE_id], - "%s(): expected UE %d to be active\n", - __func__, - UE_id); - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; const int tda = get_dl_tda(RC.nrmac[module_id], scc, slot); NR_pdsch_semi_static_t *ps = &sched_ctrl->pdsch_semi_static; ps->nrOfLayers = target_dl_Nl; if (ps->time_domain_allocation != tda || ps->nrOfLayers != target_dl_Nl) - nr_set_pdsch_semi_static(NULL, scc, UE_info->CellGroup[UE_id], sched_ctrl->active_bwp, NULL, tda, target_dl_Nl, sched_ctrl, ps); + nr_set_pdsch_semi_static(NULL, scc, UE->CellGroup, sched_ctrl->active_bwp, NULL, tda, target_dl_Nl,sched_ctrl , ps); /* find largest unallocated chunk */ const int bwpSize = NRRIV2BW(sched_ctrl->active_bwp->bwp_Common->genericParameters.locationAndBandwidth, MAX_BWP_SIZE); @@ -316,7 +310,7 @@ void nr_preprocessor_phytest(module_id_t module_id, sched_ctrl->dl_lc_num = 1; const int lcid = DL_SCH_LCID_DTCH; sched_ctrl->dl_lc_ids[sched_ctrl->dl_lc_num - 1] = lcid; - const uint16_t rnti = UE_info->rnti[UE_id]; + const uint16_t rnti = UE->rnti; /* update sched_ctrl->num_total_bytes so that postprocessor schedules data, * if available */ sched_ctrl->rlc_status[lcid] = mac_rlc_status_ind(module_id, @@ -343,7 +337,7 @@ void nr_preprocessor_phytest(module_id_t module_id, AssertFatal(nr_of_candidates>0,"nr_of_candidates is 0\n"); const int cid = sched_ctrl->coreset->controlResourceSetId; - const uint16_t Y = get_Y(cid%3, slot, UE_info->rnti[UE_id]); + const uint16_t Y = get_Y(cid%3, slot, UE->rnti); int CCEIndex = find_pdcch_candidate(RC.nrmac[module_id], CC_id, @@ -354,17 +348,16 @@ void nr_preprocessor_phytest(module_id_t module_id, Y); AssertFatal(CCEIndex >= 0, - "%s(): could not find CCE for UE %d\n", + "%s(): could not find CCE for UE %04x\n", __func__, - UE_id); + UE->rnti); int r_pucch = nr_get_pucch_resource(sched_ctrl->coreset, sched_ctrl->active_ubwp, NULL, CCEIndex); - const int alloc = nr_acknack_scheduling(module_id, UE_id, frame, slot, r_pucch, 0); + const int alloc = nr_acknack_scheduling(module_id, UE, frame, slot, r_pucch, 0); if (alloc < 0) { LOG_D(MAC, - "%s(): could not find PUCCH for UE %d/%04x@%d.%d\n", + "%s(): could not find PUCCH for UE %04x@%d.%d\n", __func__, - UE_id, rnti, frame, slot); @@ -422,19 +415,16 @@ bool nr_ul_preprocessor_phytest(module_id_t module_id, frame_t frame, sub_frame_ NR_COMMON_channels_t *cc = nr_mac->common_channels; NR_ServingCellConfigCommon_t *scc = cc->ServingCellConfigCommon; const int mu = scc->uplinkConfigCommon->initialUplinkBWP->genericParameters.subcarrierSpacing; - NR_UE_info_t *UE_info = &nr_mac->UE_info; + NR_UE_info_t *UE = nr_mac->UE_info.list[0]; - AssertFatal(UE_info->num_UEs <= 1, - "%s() cannot handle more than one UE, but found %d\n", - __func__, - UE_info->num_UEs); - if (UE_info->num_UEs == 0) + AssertFatal(nr_mac->UE_info.list[1] == NULL, + "cannot handle more than one UE\n"); + if (UE == NULL) return false; - const int UE_id = 0; const int CC_id = 0; - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; const struct NR_PUSCH_TimeDomainResourceAllocationList *tdaList = sched_ctrl->active_ubwp->bwp_Common->pusch_ConfigCommon->choice.setup->pusch_TimeDomainAllocationList; @@ -523,7 +513,7 @@ bool nr_ul_preprocessor_phytest(module_id_t module_id, frame_t frame, sub_frame_ AssertFatal(nr_of_candidates>0,"nr_of_candidates is 0\n"); const int cid = sched_ctrl->coreset->controlResourceSetId; - const uint16_t Y = get_Y(cid%3, slot, UE_info->rnti[UE_id]); + const uint16_t Y = get_Y(cid%3, slot, UE->rnti); int CCEIndex = find_pdcch_candidate(nr_mac, CC_id, diff --git a/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_primitives.c b/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_primitives.c index 1e749a95923..574cfa26155 100644 --- a/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_primitives.c +++ b/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_primitives.c @@ -288,7 +288,7 @@ NR_PDSCH_TimeDomainResourceAllocationList_t *get_pdsch_TimeDomainAllocationList( } -NR_ControlResourceSet_t *get_coreset(module_id_t module_idP, +NR_ControlResourceSet_t *get_coreset(gNB_MAC_INST *nrmac, NR_ServingCellConfigCommon_t *scc, void *bwp, NR_SearchSpace_t *ss, @@ -298,7 +298,7 @@ NR_ControlResourceSet_t *get_coreset(module_id_t module_idP, if (ss_type == NR_SearchSpace__searchSpaceType_PR_common) { // common search space NR_ControlResourceSet_t *coreset; if(coreset_id == 0) { - coreset = RC.nrmac[module_idP]->sched_ctrlCommon->coreset; // this is coreset 0 + coreset = nrmac->sched_ctrlCommon->coreset; // this is coreset 0 } else if (bwp) { coreset = ((NR_BWP_Downlink_t*)bwp)->bwp_Common->pdcch_ConfigCommon->choice.setup->commonControlResourceSet; } else if (scc->downlinkConfigCommon->initialDownlinkBWP->pdcch_ConfigCommon->choice.setup->commonControlResourceSet) { @@ -607,11 +607,10 @@ void nr_set_pdsch_semi_static(const NR_SIB1_t *sib1, bwpd->pdsch_Config && bwpd->pdsch_Config->choice.setup && bwpd->pdsch_Config->choice.setup->mcs_Table) { - if (*bwpd->pdsch_Config->choice.setup->mcs_Table == 0) { + if (*bwpd->pdsch_Config->choice.setup->mcs_Table == 0) ps->mcsTableIdx = 1; - } else { + else ps->mcsTableIdx = 2; - } } else { ps->mcsTableIdx = 0; } @@ -628,9 +627,9 @@ void nr_set_pdsch_semi_static(const NR_SIB1_t *sib1, ps->mapping_type = tdaList->list.array[tda]->mappingType; if (pdsch_Config) { if (ps->mapping_type == NR_PDSCH_TimeDomainResourceAllocation__mappingType_typeB) - ps->dmrsConfigType = pdsch_Config->dmrs_DownlinkForPDSCH_MappingTypeB->choice.setup->dmrs_Type == NULL ? 0 : 1; + ps->dmrsConfigType = pdsch_Config->dmrs_DownlinkForPDSCH_MappingTypeB->choice.setup->dmrs_Type != NULL; else - ps->dmrsConfigType = pdsch_Config->dmrs_DownlinkForPDSCH_MappingTypeA->choice.setup->dmrs_Type == NULL ? 0 : 1; + ps->dmrsConfigType = pdsch_Config->dmrs_DownlinkForPDSCH_MappingTypeA->choice.setup->dmrs_Type != NULL; } else ps->dmrsConfigType = NFAPI_NR_DMRS_TYPE1; @@ -2135,10 +2134,11 @@ int extract_length(int startSymbolAndLength) { /* * Dump the UL or DL UE_info into LOG_T(MAC) */ -void dump_nr_list(NR_list_t *listP) +void dump_nr_list(NR_UE_info_t **list) { - for (int j = listP->head; j >= 0; j = listP->next[j]) - LOG_T(NR_MAC, "NR list node %d => %d\n", j, listP->next[j]); + UE_iterator(list, UE) { + LOG_T(NR_MAC, "NR list UEs rntis %04x\n", (*list)->rnti); + } } /* @@ -2193,7 +2193,7 @@ void destroy_nr_list(NR_list_t *list) { free(list->next); } - +//------------------------------------------------------------------------------ /* * Add an ID to an NR_list at the end, traversing the whole list. Note: * add_tail_nr_list() is a faster alternative, but this implementation ensures @@ -2222,16 +2222,7 @@ void remove_nr_list(NR_list_t *listP, int id) prev = cur; cur = &listP->next[*cur]; } - if (*cur == -1) { - cur = &listP->head; - prev=&listP->head; - while (*cur != -1 && *cur != id) { - LOG_I(NR_MAC,"remove_nr_list : id %d, *cur %d\n",id,*cur); - prev = cur; - cur = &listP->next[*cur]; - } - AssertFatal(1==0, "ID %d not found in UE_list\n", id); - } + AssertFatal(*cur != -1, "ID %d not found in UE_list\n", id); int *next = &listP->next[*cur]; *cur = listP->next[*cur]; *next = -1; @@ -2276,21 +2267,17 @@ void remove_front_nr_list(NR_list_t *listP) listP->tail = -1; } -int find_nr_UE_id(module_id_t mod_idP, rnti_t rntiP) +NR_UE_info_t * find_nr_UE(NR_UEs_t* UEs, rnti_t rntiP) //------------------------------------------------------------------------------ { - int UE_id; - NR_UE_info_t *UE_info = &RC.nrmac[mod_idP]->UE_info; - - for (UE_id = 0; UE_id < MAX_MOBILES_PER_GNB; UE_id++) { - if (UE_info->active[UE_id]) { - if (UE_info->rnti[UE_id] == rntiP) { - return UE_id; - } + UE_iterator(UEs->list, UE) { + if (UE->rnti == rntiP) { + LOG_D(NR_MAC,"Search and found rnti: %04x\n", rntiP); + return UE; } } - - return -1; + LOG_W(NR_MAC,"Search for not existing rnti: %04x\n", rntiP); + return NULL; } uint16_t get_Y(int cid, int slot, rnti_t rnti) { @@ -2363,39 +2350,80 @@ int get_ul_bwp_id(const NR_ServingCellConfig_t *servingCellConfig) return 1; } +/* hack data to remove UE in the phy */ +int rnti_to_remove[10]; +volatile int rnti_to_remove_count; +pthread_mutex_t rnti_to_remove_mutex = PTHREAD_MUTEX_INITIALIZER; + +void delete_nr_ue_data(NR_UE_info_t *UE, NR_COMMON_channels_t *ccPtr) +{ + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; + destroy_nr_list(&sched_ctrl->available_dl_harq); + destroy_nr_list(&sched_ctrl->feedback_dl_harq); + destroy_nr_list(&sched_ctrl->retrans_dl_harq); + destroy_nr_list(&sched_ctrl->available_ul_harq); + destroy_nr_list(&sched_ctrl->feedback_ul_harq); + destroy_nr_list(&sched_ctrl->retrans_ul_harq); + LOG_I(NR_MAC, "Remove NR rnti 0x%04x\n", UE->rnti); + const rnti_t rnti = UE->rnti; + free(UE); + /* hack to remove UE in the phy */ + if (pthread_mutex_lock(&rnti_to_remove_mutex)) + exit(1); + if (rnti_to_remove_count == 10) + exit(1); + rnti_to_remove[rnti_to_remove_count] = rnti; + LOG_W(NR_MAC, "to remove in mac rnti_to_remove[%d] = 0x%04x\n", rnti_to_remove_count, rnti); + rnti_to_remove_count++; + if (pthread_mutex_unlock(&rnti_to_remove_mutex)) + exit(1); + + /* clear RA process(es?) associated to the UE */ + for (int cc_id = 0; cc_id < NFAPI_CC_MAX; cc_id++) { + for (int i = 0; i < NR_NB_RA_PROC_MAX; i++) { + NR_COMMON_channels_t *cc = &ccPtr[cc_id]; + if (cc->ra[i].rnti == rnti) { + LOG_D(NR_MAC, "free RA process %d for rnti %04x\n", i, rnti); + /* is it enough? */ + cc->ra[i].cfra = false; + cc->ra[i].rnti = 0; + cc->ra[i].crnti = 0; + } + } + } +} + //------------------------------------------------------------------------------ -int add_new_nr_ue(module_id_t mod_idP, rnti_t rntiP, NR_CellGroupConfig_t *CellGroup) +NR_UE_info_t *add_new_nr_ue(gNB_MAC_INST *nr_mac, rnti_t rntiP, NR_CellGroupConfig_t *CellGroup) { - gNB_MAC_INST *nr_mac = RC.nrmac[mod_idP]; NR_ServingCellConfigCommon_t *scc = nr_mac->common_channels[0].ServingCellConfigCommon; - NR_UE_info_t *UE_info = &nr_mac->UE_info; - LOG_I(NR_MAC, "[gNB %d] Adding UE with rnti 0x%04x (num_UEs %d)\n", - mod_idP, - rntiP, - UE_info->num_UEs); - dump_nr_list(&UE_info->list); - - for (int i = 0; i < MAX_MOBILES_PER_GNB; i++) { - if (UE_info->active[i]) { - LOG_D(NR_MAC,"UE %x is active, skipping\n",rntiP); - continue; - } - int UE_id = i; - UE_info->num_UEs++; - UE_info->active[UE_id] = true; - if (CellGroup) UE_info->Msg4_ACKed[UE_id] = true; - else UE_info->Msg4_ACKed[UE_id] = false; - UE_info->rnti[UE_id] = rntiP; - UE_info->CellGroup[UE_id] = CellGroup; - add_nr_list(&UE_info->list, UE_id); - memset(&UE_info->mac_stats[UE_id], 0, sizeof(NR_mac_stats_t)); - if (CellGroup && - CellGroup->spCellConfig && - CellGroup->spCellConfig->spCellConfigDedicated && - CellGroup->spCellConfig->spCellConfigDedicated->csi_MeasConfig && - CellGroup->spCellConfig->spCellConfigDedicated->csi_MeasConfig->choice.setup) - compute_csi_bitlen (CellGroup->spCellConfig->spCellConfigDedicated->csi_MeasConfig->choice.setup, UE_info, UE_id, mod_idP); - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + NR_UEs_t *UE_info = &nr_mac->UE_info; + LOG_I(NR_MAC, "Adding UE with rnti 0x%04x\n", + rntiP); + dump_nr_list(UE_info->list); + + // We will attach at the end, to mitigate race conditions + // This is not good, but we will fix it progressively + NR_UE_info_t *UE=calloc(1,sizeof(NR_UE_info_t)); + if(!UE) { + LOG_E(NR_MAC,"want to add UE %04x but the fixed allocated size is full\n",rntiP); + return NULL; + } + + UE->rnti = rntiP; + UE->CellGroup = CellGroup; + + if (CellGroup) + UE->Msg4_ACKed = true; + else + UE->Msg4_ACKed = false; + if (CellGroup && + CellGroup->spCellConfig && + CellGroup->spCellConfig->spCellConfigDedicated && + CellGroup->spCellConfig->spCellConfigDedicated->csi_MeasConfig && + CellGroup->spCellConfig->spCellConfigDedicated->csi_MeasConfig->choice.setup) + compute_csi_bitlen (CellGroup->spCellConfig->spCellConfigDedicated->csi_MeasConfig->choice.setup, UE); + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; memset(sched_ctrl, 0, sizeof(*sched_ctrl)); sched_ctrl->dl_max_mcs = 28; /* do not limit MCS for individual UEs */ sched_ctrl->set_pmi = false; @@ -2406,7 +2434,6 @@ int add_new_nr_ue(module_id_t mod_idP, rnti_t rntiP, NR_CellGroupConfig_t *CellG sched_ctrl->pucch_consecutive_dtx_cnt = 0; sched_ctrl->pusch_consecutive_dtx_cnt = 0; sched_ctrl->ul_failure = 0; - sched_ctrl->sched_srs.frame = -1; sched_ctrl->sched_srs.slot = -1; sched_ctrl->sched_srs.srs_scheduled = false; @@ -2416,11 +2443,8 @@ int add_new_nr_ue(module_id_t mod_idP, rnti_t rntiP, NR_CellGroupConfig_t *CellG sched_ctrl->pusch_semi_static.time_domain_allocation = -1; const NR_ServingCellConfig_t *servingCellConfig = CellGroup && CellGroup->spCellConfig ? CellGroup->spCellConfig->spCellConfigDedicated : NULL; - /* Set default BWPs */ - const struct NR_ServingCellConfig__downlinkBWP_ToAddModList *bwpList = servingCellConfig ? servingCellConfig->downlinkBWP_ToAddModList : NULL; - if (bwpList) AssertFatal(bwpList->list.count <= NR_MAX_NUM_BWP, - "downlinkBWP_ToAddModList has %d BWP!\n", - bwpList->list.count); + /* Set default BWPs */ + const struct NR_ServingCellConfig__downlinkBWP_ToAddModList *bwpList = servingCellConfig ? servingCellConfig->downlinkBWP_ToAddModList : NULL; const int bwp_id = servingCellConfig ? *servingCellConfig->firstActiveDownlinkBWP_Id : 0; sched_ctrl->active_bwp = bwpList && bwp_id > 0 ? bwpList->list.array[bwp_id - 1] : NULL; @@ -2433,10 +2457,12 @@ int add_new_nr_ue(module_id_t mod_idP, rnti_t rntiP, NR_CellGroupConfig_t *CellG scc, sched_ctrl->active_bwp ? sched_ctrl->active_bwp->bwp_Dedicated : NULL, target_ss); - sched_ctrl->coreset = get_coreset(mod_idP, scc, - sched_ctrl->active_bwp ? (void*)sched_ctrl->active_bwp->bwp_Dedicated : NULL, - sched_ctrl->search_space, target_ss); - sched_ctrl->sched_pdcch = set_pdcch_structure(RC.nrmac[mod_idP], + sched_ctrl->coreset = get_coreset(nr_mac, + scc, + sched_ctrl->active_bwp ? sched_ctrl->active_bwp->bwp_Dedicated : NULL, + sched_ctrl->search_space, + target_ss); + sched_ctrl->sched_pdcch = set_pdcch_structure(nr_mac, sched_ctrl->search_space, sched_ctrl->coreset, scc, @@ -2445,16 +2471,18 @@ int add_new_nr_ue(module_id_t mod_idP, rnti_t rntiP, NR_CellGroupConfig_t *CellG sched_ctrl->next_dl_bwp_id = -1; sched_ctrl->next_ul_bwp_id = -1; const struct NR_UplinkConfig__uplinkBWP_ToAddModList *ubwpList = servingCellConfig ? servingCellConfig->uplinkConfig->uplinkBWP_ToAddModList : NULL; - if (ubwpList) AssertFatal(ubwpList->list.count <= NR_MAX_NUM_BWP, + if (ubwpList) + AssertFatal(ubwpList->list.count <= NR_MAX_NUM_BWP, "uplinkBWP_ToAddModList has %d BWP!\n", ubwpList->list.count); const int ul_bwp_id = servingCellConfig ? *servingCellConfig->uplinkConfig->firstActiveUplinkBWP_Id : 0; sched_ctrl->active_ubwp = ubwpList && ul_bwp_id > 0 ? ubwpList->list.array[ul_bwp_id - 1] : NULL; /* get Number of HARQ processes for this UE */ - if (servingCellConfig) AssertFatal(servingCellConfig->pdsch_ServingCellConfig->present == NR_SetupRelease_PDSCH_ServingCellConfig_PR_setup, - "no pdsch-ServingCellConfig found for UE %d\n", - UE_id); + if (servingCellConfig) + AssertFatal(servingCellConfig->pdsch_ServingCellConfig->present == NR_SetupRelease_PDSCH_ServingCellConfig_PR_setup, + "no pdsch-ServingCellConfig found for UE %04x\n", + UE->rnti); const NR_PDSCH_ServingCellConfig_t *pdsch = servingCellConfig ? servingCellConfig->pdsch_ServingCellConfig->choice.setup : NULL; // pdsch == NULL in SA -> will create default (8) number of HARQ processes create_dl_harq_list(sched_ctrl, pdsch); @@ -2465,21 +2493,27 @@ int add_new_nr_ue(module_id_t mod_idP, rnti_t rntiP, NR_CellGroupConfig_t *CellG add_tail_nr_list(&sched_ctrl->available_ul_harq, harq); create_nr_list(&sched_ctrl->feedback_ul_harq, 16); create_nr_list(&sched_ctrl->retrans_ul_harq, 16); - LOG_D(NR_MAC, "[gNB %d] Add NR UE_id %d : rnti %x\n", - mod_idP, - UE_id, - rntiP); - dump_nr_list(&UE_info->list); - return (UE_id); + + pthread_mutex_lock(&UE_info->mutex); + int i; + for(i=0; i<MAX_MOBILES_PER_GNB; i++) + if (UE_info->list[i] == NULL) { + UE_info->list[i] = UE; + break; + } + if (i == MAX_MOBILES_PER_GNB) { + LOG_E(NR_MAC,"Try to add UE %04x but the list is full\n", rntiP); + delete_nr_ue_data(UE, nr_mac->common_channels); + pthread_mutex_unlock(&UE_info->mutex); + return NULL; } + pthread_mutex_unlock(&UE_info->mutex); - // printf("MAC: cannot add new UE for rnti %x\n", rntiP); - LOG_E(NR_MAC, "error in add_new_ue(), could not find space in UE_info, Dumping UE list\n"); - dump_nr_list(&UE_info->list); - return -1; + LOG_D(NR_MAC, "Add NR rnti %x\n", rntiP); + dump_nr_list(UE_info->list); + return (UE); } - void create_dl_harq_list(NR_UE_sched_ctrl_t *sched_ctrl, const NR_PDSCH_ServingCellConfig_t *pdsch) { const int nrofHARQ = pdsch && pdsch->nrofHARQ_ProcessesForPDSCH ? @@ -2512,66 +2546,30 @@ void create_dl_harq_list(NR_UE_sched_ctrl_t *sched_ctrl, } } -/* hack data to remove UE in the phy */ -int rnti_to_remove[10]; -volatile int rnti_to_remove_count; -pthread_mutex_t rnti_to_remove_mutex = PTHREAD_MUTEX_INITIALIZER; - -void mac_remove_nr_ue(module_id_t mod_id, rnti_t rnti) +void mac_remove_nr_ue(gNB_MAC_INST *nr_mac, rnti_t rnti) { - int UE_id; - int i; - int cc_id; - NR_UE_info_t *UE_info = &RC.nrmac[mod_id]->UE_info; - - for (i = 0; i < MAX_MOBILES_PER_GNB; i++) { - - if (UE_info->active[i] != TRUE) - continue; - if (UE_info->rnti[i] != rnti) - continue; - - /* UE found, remove it */ - UE_id = i; - - UE_info->num_UEs--; - UE_info->active[UE_id] = FALSE; - UE_info->rnti[UE_id] = 0; - remove_nr_list(&UE_info->list, UE_id); - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; - destroy_nr_list(&sched_ctrl->available_dl_harq); - destroy_nr_list(&sched_ctrl->feedback_dl_harq); - destroy_nr_list(&sched_ctrl->retrans_dl_harq); - destroy_nr_list(&sched_ctrl->available_ul_harq); - destroy_nr_list(&sched_ctrl->feedback_ul_harq); - destroy_nr_list(&sched_ctrl->retrans_ul_harq); - LOG_I(NR_MAC, "[gNB %d] Remove NR UE_id %d: rnti 0x%04x\n", - mod_id, - UE_id, - rnti); - - /* hack to remove UE in the phy */ - if (pthread_mutex_lock(&rnti_to_remove_mutex)) exit(1); - if (rnti_to_remove_count == 10) exit(1); - rnti_to_remove[rnti_to_remove_count] = rnti; - LOG_W(NR_MAC, "to remove in mac rnti_to_remove[%d] = 0x%04x\n", rnti_to_remove_count, rnti); - rnti_to_remove_count++; - if (pthread_mutex_unlock(&rnti_to_remove_mutex)) exit(1); - } - - /* clear RA process(es?) associated to the UE */ - for (cc_id = 0; cc_id < NFAPI_CC_MAX; cc_id++) { - NR_COMMON_channels_t *cc = &RC.nrmac[mod_id]->common_channels[cc_id]; - for (i = 0; i < NR_NB_RA_PROC_MAX; i++) { - if (cc->ra[i].rnti == rnti) { - LOG_D(NR_MAC, "free RA process %d for rnti %d\n", i, rnti); - /* is it enough? */ - cc->ra[i].cfra = false; - cc->ra[i].rnti = 0; - cc->ra[i].crnti = 0; - } - } - } + NR_UEs_t *UE_info = &nr_mac->UE_info; + pthread_mutex_lock(&UE_info->mutex); + UE_iterator(UE_info->list, UE) { + if (UE->rnti==rnti) + break; + } + + if (!UE) { + LOG_W(NR_MAC,"Call to del rnti %04x, but not existing\n", rnti); + pthread_mutex_unlock(&UE_info->mutex); + return; + } + + NR_UE_info_t * newUEs[MAX_MOBILES_PER_GNB+1]={0}; + int newListIdx=0; + for (int i=0; i<MAX_MOBILES_PER_GNB; i++) + if(UE_info->list[i] && UE_info->list[i]->rnti != rnti) + newUEs[newListIdx++]=UE_info->list[i]; + memcpy(UE_info->list, newUEs, sizeof(UE_info->list)); + pthread_mutex_unlock(&UE_info->mutex); + + delete_nr_ue_data(UE, nr_mac->common_channels); } void nr_mac_remove_ra_rnti(module_id_t mod_id, rnti_t rnti) { @@ -2595,20 +2593,18 @@ uint8_t nr_get_tpc(int target, uint8_t cqi, int incr) { } -void get_pdsch_to_harq_feedback(int Mod_idP, - int UE_id, +void get_pdsch_to_harq_feedback( NR_UE_info_t * UE, int bwp_id, NR_SearchSpace__searchSpaceType_PR ss_type, int *max_fb_time, uint8_t *pdsch_to_harq_feedback) { - NR_UE_info_t *UE_info = &RC.nrmac[Mod_idP]->UE_info; - NR_CellGroupConfig_t *CellGroup = UE_info->CellGroup[UE_id]; + NR_CellGroupConfig_t *CellGroup = UE->CellGroup; NR_BWP_DownlinkDedicated_t *bwpd=NULL; NR_BWP_UplinkDedicated_t *ubwpd=NULL; if (ss_type == NR_SearchSpace__searchSpaceType_PR_ue_Specific) { - AssertFatal(CellGroup!=NULL,"Cellgroup is not defined for UE_id %d\n",UE_id); + AssertFatal(CellGroup!=NULL,"Cellgroup is not defined for UE %04x\n",UE->rnti); AssertFatal(CellGroup->spCellConfig!=NULL,"Cellgroup->spCellConfig is null\n"); AssertFatal(CellGroup->spCellConfig->spCellConfigDedicated!=NULL,"CellGroup->spCellConfig->spCellConfigDedicated is null\n"); } @@ -2692,22 +2688,18 @@ void nr_csirs_scheduling(int Mod_idP, int n_slots_frame){ int CC_id = 0; - NR_UE_info_t *UE_info = &RC.nrmac[Mod_idP]->UE_info; - NR_list_t *UE_list = &UE_info->list; + NR_UEs_t *UE_info = &RC.nrmac[Mod_idP]->UE_info; gNB_MAC_INST *gNB_mac = RC.nrmac[Mod_idP]; uint16_t *vrb_map = gNB_mac->common_channels[CC_id].vrb_map; - UE_info->sched_csirs = false; - - for (int UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + UE_iterator(UE_info->list, UE) { + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; if (sched_ctrl->rrc_processing_timer > 0) { continue; } - - NR_CellGroupConfig_t *CellGroup = UE_info->CellGroup[UE_id]; + NR_CellGroupConfig_t *CellGroup = UE->CellGroup; if (!CellGroup || !CellGroup->spCellConfig || !CellGroup->spCellConfig->spCellConfigDedicated || !CellGroup->spCellConfig->spCellConfigDedicated->csi_MeasConfig) continue; @@ -2723,6 +2715,7 @@ void nr_csirs_scheduling(int Mod_idP, NR_BWP_t *genericParameters = sched_ctrl->active_bwp ? &sched_ctrl->active_bwp->bwp_Common->genericParameters : &gNB_mac->common_channels[0].ServingCellConfigCommon->downlinkConfigCommon->initialDownlinkBWP->genericParameters; + for (int id = 0; id < csi_measconfig->nzp_CSI_RS_ResourceToAddModList->list.count; id++){ nzpcsi = csi_measconfig->nzp_CSI_RS_ResourceToAddModList->list.array[id]; NR_CSI_RS_ResourceMapping_t resourceMapping = nzpcsi->resourceMapping; @@ -2731,7 +2724,7 @@ void nr_csirs_scheduling(int Mod_idP, if((frame*n_slots_frame+slot-offset)%period == 0) { LOG_D(NR_MAC,"Scheduling CSI-RS in frame %d slot %d\n",frame,slot); - UE_info->sched_csirs = true; + UE->sched_csirs = true; nfapi_nr_dl_tti_request_pdu_t *dl_tti_csirs_pdu = &dl_req->dl_tti_pdu_list[dl_req->nPDUs]; memset((void*)dl_tti_csirs_pdu,0,sizeof(nfapi_nr_dl_tti_request_pdu_t)); @@ -2899,18 +2892,17 @@ void nr_csirs_scheduling(int Mod_idP, void nr_mac_update_timers(module_id_t module_id, frame_t frame, sub_frame_t slot) { + NR_UEs_t *UE_info = &RC.nrmac[module_id]->UE_info; - NR_UE_info_t *UE_info = &RC.nrmac[module_id]->UE_info; - const NR_list_t *UE_list = &UE_info->list; - for (int UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + UE_iterator(UE_info->list, UE) { + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; if (sched_ctrl->rrc_processing_timer > 0) { sched_ctrl->rrc_processing_timer--; if (sched_ctrl->rrc_processing_timer == 0) { - LOG_I(NR_MAC, "(%d.%d) De-activating RRC processing timer for UE %d\n", frame, slot, UE_id); + LOG_I(NR_MAC, "(%d.%d) De-activating RRC processing timer for UE %04x\n", frame, slot, UE->rnti); const NR_SIB1_t *sib1 = RC.nrmac[module_id]->common_channels[0].sib1 ? RC.nrmac[module_id]->common_channels[0].sib1->message.choice.c1->choice.systemInformationBlockType1 : NULL; - NR_CellGroupConfig_t *cg = UE_info->CellGroup[UE_id]; + NR_CellGroupConfig_t *cg = UE->CellGroup; NR_ServingCellConfigCommon_t *scc = RC.nrmac[module_id]->common_channels[0].ServingCellConfigCommon; const NR_ServingCellConfig_t *spCellConfigDedicated = cg && cg->spCellConfig ? cg->spCellConfig->spCellConfigDedicated : NULL; @@ -2984,23 +2976,22 @@ void schedule_nr_bwp_switch(module_id_t module_id, frame_t frame, sub_frame_t slot) { - NR_UE_info_t *UE_info = &RC.nrmac[module_id]->UE_info; - const NR_list_t *UE_list = &UE_info->list; + NR_UEs_t *UE_info = &RC.nrmac[module_id]->UE_info; // TODO: Implementation of a algorithm to perform: // - DL BWP selection: sched_ctrl->next_dl_bwp_id = dl_bwp_id // - UL BWP selection: sched_ctrl->next_ul_bwp_id = ul_bwp_id - for (int UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + UE_iterator(UE_info->list, UE) { + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; if (sched_ctrl->rrc_processing_timer == 0 && - UE_info->Msg4_ACKed[UE_id] == true && + UE->Msg4_ACKed && ((sched_ctrl->next_dl_bwp_id >= 0 && sched_ctrl->active_bwp && sched_ctrl->active_bwp->bwp_Id != sched_ctrl->next_dl_bwp_id) || (sched_ctrl->next_ul_bwp_id >= 0 && sched_ctrl->active_ubwp && sched_ctrl->active_ubwp->bwp_Id != sched_ctrl->next_ul_bwp_id))) { - LOG_W(NR_MAC,"(%d.%d) [UE_id %d] Schedule BWP switch from dl_bwp_id %ld to %ld and from ul_bwp_id %ld to %ld\n", - frame, slot, UE_id, sched_ctrl->active_bwp->bwp_Id, sched_ctrl->next_dl_bwp_id, sched_ctrl->active_ubwp->bwp_Id, sched_ctrl->next_ul_bwp_id); - nr_mac_rrc_bwp_switch_req(module_id, frame, slot, UE_info->rnti[UE_id], sched_ctrl->next_dl_bwp_id, sched_ctrl->next_ul_bwp_id); + LOG_W(NR_MAC,"%4d.%2d UE %04x Schedule BWP switch from dl_bwp_id %ld to %ld and from ul_bwp_id %ld to %ld\n", + frame, slot, UE->rnti, sched_ctrl->active_bwp->bwp_Id, sched_ctrl->next_dl_bwp_id, sched_ctrl->active_ubwp->bwp_Id, sched_ctrl->next_ul_bwp_id); + nr_mac_rrc_bwp_switch_req(module_id, frame, slot, UE->rnti, sched_ctrl->next_dl_bwp_id, sched_ctrl->next_ul_bwp_id); } } } diff --git a/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_srs.c b/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_srs.c index 8680cc25d13..c156ebf36db 100644 --- a/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_srs.c +++ b/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_srs.c @@ -34,18 +34,17 @@ extern RAN_CONTEXT_t RC; -void nr_configure_srs(nfapi_nr_srs_pdu_t *srs_pdu, int module_id, int CC_id, int UE_id, NR_SRS_Resource_t *srs_resource) { +void nr_configure_srs(nfapi_nr_srs_pdu_t *srs_pdu, int module_id, int CC_id,NR_UE_info_t* UE, NR_SRS_Resource_t *srs_resource) { gNB_MAC_INST *nrmac = RC.nrmac[module_id]; NR_ServingCellConfigCommon_t *scc = nrmac->common_channels[CC_id].ServingCellConfigCommon; - NR_UE_info_t *UE_info = &nrmac->UE_info; - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; NR_BWP_t ubwp = sched_ctrl->active_ubwp ? sched_ctrl->active_ubwp->bwp_Common->genericParameters : scc->uplinkConfigCommon->initialUplinkBWP->genericParameters; - srs_pdu->rnti = UE_info->rnti[UE_id]; + srs_pdu->rnti = UE->rnti; srs_pdu->handle = 0; srs_pdu->bwp_size = NRRIV2BW(ubwp.locationAndBandwidth, MAX_BWP_SIZE);; srs_pdu->bwp_start = NRRIV2PRBOFFSET(ubwp.locationAndBandwidth, MAX_BWP_SIZE);; @@ -82,7 +81,7 @@ void nr_configure_srs(nfapi_nr_srs_pdu_t *srs_pdu, int module_id, int CC_id, int srs_pdu->t_offset = get_nr_srs_offset(srs_resource->resourceType.choice.periodic->periodicityAndOffset_p); } -void nr_fill_nfapi_srs(int module_id, int CC_id, int UE_id, sub_frame_t slot, NR_SRS_Resource_t *srs_resource) { +void nr_fill_nfapi_srs(int module_id, int CC_id, NR_UE_info_t* UE, sub_frame_t slot, NR_SRS_Resource_t *srs_resource) { nfapi_nr_ul_tti_request_t *future_ul_tti_req = &RC.nrmac[module_id]->UL_tti_req_ahead[0][slot]; AssertFatal(future_ul_tti_req->n_pdus < @@ -94,7 +93,7 @@ void nr_fill_nfapi_srs(int module_id, int CC_id, int UE_id, sub_frame_t slot, NR memset(srs_pdu, 0, sizeof(nfapi_nr_srs_pdu_t)); future_ul_tti_req->n_pdus += 1; - nr_configure_srs(srs_pdu, module_id, CC_id, UE_id, srs_resource); + nr_configure_srs(srs_pdu, module_id, CC_id, UE, srs_resource); } /******************************************************************* @@ -111,21 +110,19 @@ void nr_fill_nfapi_srs(int module_id, int CC_id, int UE_id, sub_frame_t slot, NR void nr_schedule_srs(int module_id, frame_t frame) { gNB_MAC_INST *nrmac = RC.nrmac[module_id]; - NR_UE_info_t *UE_info = &nrmac->UE_info; - const NR_list_t *UE_list = &UE_info->list; - - for (int UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { + NR_UEs_t *UE_info = &nrmac->UE_info; + UE_iterator(UE_info->list, UE) { const int CC_id = 0; NR_ServingCellConfigCommon_t *scc = RC.nrmac[module_id]->common_channels[CC_id].ServingCellConfigCommon; - NR_CellGroupConfig_t *cg = UE_info->CellGroup[UE_id]; - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + NR_CellGroupConfig_t *cg = UE->CellGroup; + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; sched_ctrl->sched_srs.frame = -1; sched_ctrl->sched_srs.slot = -1; sched_ctrl->sched_srs.srs_scheduled = false; - if(!UE_info->Msg4_ACKed[UE_id] || sched_ctrl->rrc_processing_timer > 0) { + if(!UE->Msg4_ACKed || sched_ctrl->rrc_processing_timer > 0) { continue; } @@ -178,11 +175,11 @@ void nr_schedule_srs(int module_id, frame_t frame) { // Check if UE will transmit the SRS in this frame if ( ((frame - offset/n_slots_frame)*n_slots_frame)%period == 0) { LOG_D(NR_MAC,"Scheduling SRS reception for %d.%d\n", frame, offset%n_slots_frame); - nr_fill_nfapi_srs(module_id, CC_id, UE_id, offset%n_slots_frame, srs_resource); + nr_fill_nfapi_srs(module_id, CC_id, UE, offset%n_slots_frame, srs_resource); sched_ctrl->sched_srs.frame = frame; sched_ctrl->sched_srs.slot = offset%n_slots_frame; sched_ctrl->sched_srs.srs_scheduled = true; } } } -} \ No newline at end of file +} diff --git a/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_uci.c b/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_uci.c index b9292876e35..9dd147d21fe 100644 --- a/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_uci.c +++ b/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_uci.c @@ -38,17 +38,15 @@ extern RAN_CONTEXT_t RC; -void nr_fill_nfapi_pucch(module_id_t mod_id, - frame_t frame, - sub_frame_t slot, - const NR_sched_pucch_t *pucch, - int UE_id) -{ - gNB_MAC_INST *nr_mac = RC.nrmac[mod_id]; - NR_UE_info_t *UE_info = &nr_mac->UE_info; +static void nr_fill_nfapi_pucch(gNB_MAC_INST *nrmac, + frame_t frame, + sub_frame_t slot, + const NR_sched_pucch_t *pucch, + NR_UE_info_t* UE) +{ nfapi_nr_ul_tti_request_t *future_ul_tti_req = - &RC.nrmac[mod_id]->UL_tti_req_ahead[0][pucch->ul_slot]; + &nrmac->UL_tti_req_ahead[0][pucch->ul_slot]; AssertFatal(future_ul_tti_req->SFN == pucch->frame && future_ul_tti_req->Slot == pucch->ul_slot, "Current %4d.%2d : future UL_tti_req's frame.slot %4d.%2d does not match PUCCH %4d.%2d\n", @@ -57,8 +55,7 @@ void nr_fill_nfapi_pucch(module_id_t mod_id, future_ul_tti_req->Slot, pucch->frame, pucch->ul_slot); - AssertFatal(future_ul_tti_req->n_pdus < - sizeof(future_ul_tti_req->pdus_list) / sizeof(future_ul_tti_req->pdus_list[0]), + AssertFatal(future_ul_tti_req->n_pdus < sizeofArray(future_ul_tti_req->pdus_list), "Invalid future_ul_tti_req->n_pdus %d\n", future_ul_tti_req->n_pdus); future_ul_tti_req->pdus_list[future_ul_tti_req->n_pdus].pdu_type = NFAPI_NR_UL_CONFIG_PUCCH_PDU_TYPE; future_ul_tti_req->pdus_list[future_ul_tti_req->n_pdus].pdu_size = sizeof(nfapi_nr_pucch_pdu_t); @@ -77,9 +74,9 @@ void nr_fill_nfapi_pucch(module_id_t mod_id, pucch->dai_c, pucch->csi_bits, pucch->resource_indicator); - - NR_ServingCellConfigCommon_t *scc = RC.nrmac[mod_id]->common_channels->ServingCellConfigCommon; - NR_CellGroupConfig_t *cg=UE_info->CellGroup[UE_id]; + NR_COMMON_channels_t * common_ch=nrmac->common_channels; + NR_ServingCellConfigCommon_t *scc = common_ch->ServingCellConfigCommon; + NR_CellGroupConfig_t *cg=UE->CellGroup; NR_BWP_UplinkDedicated_t *ubwpd = cg && cg->spCellConfig && cg->spCellConfig->spCellConfigDedicated && cg->spCellConfig->spCellConfigDedicated->uplinkConfig ? @@ -88,14 +85,14 @@ void nr_fill_nfapi_pucch(module_id_t mod_id, LOG_D(NR_MAC,"%4d.%2d Calling nr_configure_pucch (ubwpd %p,r_pucch %d) pucch to be scheduled in %4d.%2d\n", frame,slot,ubwpd,pucch->r_pucch,pucch->frame,pucch->ul_slot); - const NR_SIB1_t *sib1 = nr_mac->common_channels[0].sib1 ? nr_mac->common_channels[0].sib1->message.choice.c1->choice.systemInformationBlockType1 : NULL; + const NR_SIB1_t *sib1 = common_ch->sib1 ? common_ch->sib1->message.choice.c1->choice.systemInformationBlockType1 : NULL; nr_configure_pucch(sib1, pucch_pdu, scc, - UE_info->CellGroup[UE_id], - UE_info->UE_sched_ctrl[UE_id].active_ubwp, + UE->CellGroup, + UE->UE_sched_ctrl.active_ubwp, ubwpd, - UE_info->rnti[UE_id], + UE->rnti, pucch->resource_indicator, pucch->csi_bits, pucch->dai_c, @@ -139,22 +136,18 @@ int diff_rsrp_ssb_csi_meas_10_1_6_1_2[16] = { }; -void nr_schedule_pucch(int Mod_idP, +void nr_schedule_pucch(gNB_MAC_INST *nrmac, frame_t frameP, sub_frame_t slotP) { - gNB_MAC_INST *nrmac = RC.nrmac[Mod_idP]; if (!is_xlsch_in_slot(nrmac->ulsch_slot_bitmap[slotP / 64], slotP)) return; - NR_UE_info_t *UE_info = &nrmac->UE_info; - const NR_list_t *UE_list = &UE_info->list; - - for (int UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + UE_iterator(nrmac->UE_info.list, UE) { + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; const int n = sizeof(sched_ctrl->sched_pucch) / sizeof(*sched_ctrl->sched_pucch); for (int i = 0; i < n; i++) { - NR_sched_pucch_t *curr_pucch = &UE_info->UE_sched_ctrl[UE_id].sched_pucch[i]; + NR_sched_pucch_t *curr_pucch = &UE->UE_sched_ctrl.sched_pucch[i]; const uint16_t O_ack = curr_pucch->dai_c; const uint16_t O_csi = curr_pucch->csi_bits; const uint8_t O_sr = curr_pucch->sr_flag; @@ -162,9 +155,11 @@ void nr_schedule_pucch(int Mod_idP, || frameP != curr_pucch->frame || slotP != curr_pucch->ul_slot) continue; - if (O_csi > 0) LOG_D(NR_MAC,"Scheduling PUCCH[%d] RX for UE %d in %4d.%2d O_ack %d, O_sr %d, O_csi %d\n", - i,UE_id,curr_pucch->frame,curr_pucch->ul_slot,O_ack,O_sr,O_csi); - nr_fill_nfapi_pucch(Mod_idP, frameP, slotP, curr_pucch, UE_id); + + if (O_csi > 0) + LOG_D(NR_MAC,"Scheduling PUCCH[%d] RX for UE %04x in %4d.%2d O_ack %d, O_sr %d, O_csi %d\n", + i,UE->rnti,curr_pucch->frame,curr_pucch->ul_slot,O_ack,O_sr,O_csi); + nr_fill_nfapi_pucch(nrmac, frameP, slotP, curr_pucch, UE); memset(curr_pucch, 0, sizeof(*curr_pucch)); } } @@ -565,7 +560,7 @@ void compute_cqi_bitlen(struct NR_CSI_ReportConfig *csi_reportconfig, //!TODO : same function can be written to handle csi_resources -void compute_csi_bitlen(NR_CSI_MeasConfig_t *csi_MeasConfig, NR_UE_info_t *UE_info, int UE_id, module_id_t Mod_idP){ +void compute_csi_bitlen(NR_CSI_MeasConfig_t *csi_MeasConfig, NR_UE_info_t *UE){ uint8_t csi_report_id = 0; uint8_t nb_resources = 0; NR_CSI_ReportConfig__reportQuantity_PR reportQuantity_type; @@ -577,7 +572,7 @@ void compute_csi_bitlen(NR_CSI_MeasConfig_t *csi_MeasConfig, NR_UE_info_t *UE_in for (csi_report_id=0; csi_report_id < csi_MeasConfig->csi_ReportConfigToAddModList->list.count; csi_report_id++){ struct NR_CSI_ReportConfig *csi_reportconfig = csi_MeasConfig->csi_ReportConfigToAddModList->list.array[csi_report_id]; // MAC structure for CSI measurement reports (per UE and per report) - nr_csi_report_t *csi_report = &UE_info->csi_report_template[UE_id][csi_report_id]; + nr_csi_report_t *csi_report = &UE->csi_report_template[csi_report_id]; // csi-ResourceConfigId of a CSI-ResourceConfig included in the configuration // (either CSI-RS or SSB) csi_ResourceConfigId = csi_reportconfig->resourcesForChannelMeasurement; @@ -662,24 +657,22 @@ void compute_csi_bitlen(NR_CSI_MeasConfig_t *csi_MeasConfig, NR_UE_info_t *UE_in } -uint16_t nr_get_csi_bitlen(int Mod_idP, - int UE_id, +uint16_t nr_get_csi_bitlen(NR_UE_info_t *UE, uint8_t csi_report_id) { uint16_t csi_bitlen = 0; uint16_t max_bitlen = 0; - NR_UE_info_t *UE_info = &RC.nrmac[Mod_idP]->UE_info; L1_RSRP_bitlen_t * CSI_report_bitlen = NULL; CSI_Meas_bitlen_t * csi_meas_bitlen = NULL; - if (NR_CSI_ReportConfig__reportQuantity_PR_ssb_Index_RSRP==UE_info->csi_report_template[UE_id][csi_report_id].reportQuantity_type|| - NR_CSI_ReportConfig__reportQuantity_PR_cri_RSRP==UE_info->csi_report_template[UE_id][csi_report_id].reportQuantity_type){ - CSI_report_bitlen = &(UE_info->csi_report_template[UE_id][csi_report_id].CSI_report_bitlen); //This might need to be moodif for Aperiodic CSI-RS measurements + if (NR_CSI_ReportConfig__reportQuantity_PR_ssb_Index_RSRP==UE->csi_report_template[csi_report_id].reportQuantity_type|| + NR_CSI_ReportConfig__reportQuantity_PR_cri_RSRP==UE->csi_report_template[csi_report_id].reportQuantity_type){ + CSI_report_bitlen = &(UE->csi_report_template[csi_report_id].CSI_report_bitlen); //This might need to be moodif for Aperiodic CSI-RS measurements csi_bitlen+= ((CSI_report_bitlen->cri_ssbri_bitlen * CSI_report_bitlen->nb_ssbri_cri) + CSI_report_bitlen->rsrp_bitlen +(CSI_report_bitlen->diff_rsrp_bitlen * (CSI_report_bitlen->nb_ssbri_cri -1 ))); } else{ - csi_meas_bitlen = &(UE_info->csi_report_template[UE_id][csi_report_id].csi_meas_bitlen); //This might need to be moodif for Aperiodic CSI-RS measurements + csi_meas_bitlen = &(UE->csi_report_template[csi_report_id].csi_meas_bitlen); //This might need to be moodif for Aperiodic CSI-RS measurements uint16_t temp_bitlen; for (int i=0; i<8; i++) { temp_bitlen = (csi_meas_bitlen->cri_bitlen+ @@ -705,11 +698,10 @@ void nr_csi_meas_reporting(int Mod_idP, NR_ServingCellConfigCommon_t *scc = RC.nrmac[Mod_idP]->common_channels->ServingCellConfigCommon; const int n_slots_frame = nr_slots_per_frame[*scc->ssbSubcarrierSpacing]; - NR_UE_info_t *UE_info = &RC.nrmac[Mod_idP]->UE_info; - NR_list_t *UE_list = &UE_info->list; - for (int UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { - const NR_CellGroupConfig_t *CellGroup = UE_info->CellGroup[UE_id]; - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + + UE_iterator(RC.nrmac[Mod_idP]->UE_info.list, UE ) { + const NR_CellGroupConfig_t *CellGroup = UE->CellGroup; + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; if ((sched_ctrl->rrc_processing_timer > 0) || (sched_ctrl->ul_failure==1 && get_softmodem_params()->phy_test==0)) { continue; } @@ -761,13 +753,13 @@ void nr_csi_meas_reporting(int Mod_idP, && !curr_pucch->sr_flag && curr_pucch->dai_c == 0, "PUCCH not free at index 1 for UE %04x\n", - UE_info->rnti[UE_id]); + UE->rnti); curr_pucch->r_pucch = -1; curr_pucch->frame = frame; curr_pucch->ul_slot = sched_slot; curr_pucch->resource_indicator = res_index; curr_pucch->csi_bits += - nr_get_csi_bitlen(Mod_idP,UE_id,csi_report_id); + nr_get_csi_bitlen(UE,csi_report_id); const NR_SIB1_t *sib1 = RC.nrmac[Mod_idP]->common_channels[0].sib1 ? RC.nrmac[Mod_idP]->common_channels[0].sib1->message.choice.c1->choice.systemInformationBlockType1 : NULL; NR_BWP_t *genericParameters = get_ul_bwp_genericParameters(sched_ctrl->active_ubwp, @@ -819,37 +811,37 @@ void nr_csi_meas_reporting(int Mod_idP, } } -__attribute__((unused)) -static void handle_dl_harq(module_id_t mod_id, - int UE_id, +static void handle_dl_harq(NR_UE_info_t * UE, int8_t harq_pid, - bool success) + bool success, + int harq_round_max) { - NR_UE_info_t *UE_info = &RC.nrmac[mod_id]->UE_info; - NR_UE_harq_t *harq = &UE_info->UE_sched_ctrl[UE_id].harq_processes[harq_pid]; + NR_UE_harq_t *harq = &UE->UE_sched_ctrl.harq_processes[harq_pid]; harq->feedback_slot = -1; harq->is_waiting = false; if (success) { - add_tail_nr_list(&UE_info->UE_sched_ctrl[UE_id].available_dl_harq, harq_pid); + add_tail_nr_list(&UE->UE_sched_ctrl.available_dl_harq, harq_pid); harq->round = 0; harq->ndi ^= 1; - } else if (harq->round >= RC.nrmac[mod_id]->harq_round_max - 1) { - add_tail_nr_list(&UE_info->UE_sched_ctrl[UE_id].available_dl_harq, harq_pid); + + } else if (harq->round >= harq_round_max - 1) { + add_tail_nr_list(&UE->UE_sched_ctrl.available_dl_harq, harq_pid); harq->round = 0; harq->ndi ^= 1; - NR_mac_stats_t *stats = &UE_info->mac_stats[UE_id]; + + NR_mac_stats_t *stats = &UE->mac_stats; stats->dl.errors++; - LOG_D(NR_MAC, "retransmission error for UE %d (total %"PRIu64")\n", UE_id, stats->dl.errors); + LOG_D(NR_MAC, "retransmission error for UE %04x (total %"PRIu64")\n", UE->rnti, stats->dl.errors); + } else { - LOG_D(PHY,"NACK for: pid %d, ue %x\n",harq_pid, UE_id); - add_tail_nr_list(&UE_info->UE_sched_ctrl[UE_id].retrans_dl_harq, harq_pid); + LOG_D(PHY,"NACK for: pid %d, ue %04x\n",harq_pid, UE->rnti); + add_tail_nr_list(&UE->UE_sched_ctrl.retrans_dl_harq, harq_pid); harq->round++; } } -int checkTargetSSBInFirst64TCIStates_pdschConfig(int ssb_index_t, int Mod_idP, int UE_id) { - NR_UE_info_t *UE_info = &RC.nrmac[Mod_idP]->UE_info; - NR_CellGroupConfig_t *CellGroup = UE_info->CellGroup[UE_id] ; +int checkTargetSSBInFirst64TCIStates_pdschConfig(int ssb_index_t, NR_UE_info_t * UE) { + NR_CellGroupConfig_t *CellGroup = UE->CellGroup; int nb_tci_states = CellGroup->spCellConfig->spCellConfigDedicated->initialDownlinkBWP->pdsch_Config->choice.setup->tci_StatesToAddModList->list.count; NR_TCI_State_t *tci =NULL; int i; @@ -874,13 +866,12 @@ int checkTargetSSBInFirst64TCIStates_pdschConfig(int ssb_index_t, int Mod_idP, i return -1; } -int checkTargetSSBInTCIStates_pdcchConfig(int ssb_index_t, int Mod_idP, int UE_id) { - NR_UE_info_t *UE_info = &RC.nrmac[Mod_idP]->UE_info; - NR_CellGroupConfig_t *CellGroup = UE_info->CellGroup[UE_id] ; +int checkTargetSSBInTCIStates_pdcchConfig(int ssb_index_t, NR_UE_info_t *UE) { + NR_CellGroupConfig_t *CellGroup = UE->CellGroup ; int nb_tci_states = CellGroup->spCellConfig->spCellConfigDedicated->initialDownlinkBWP->pdsch_Config->choice.setup->tci_StatesToAddModList->list.count; NR_TCI_State_t *tci =NULL; NR_TCI_StateId_t *tci_id = NULL; - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; NR_ControlResourceSet_t *coreset = sched_ctrl->coreset; int i; int flag = 0; @@ -935,7 +926,7 @@ int get_diff_rsrp(uint8_t index, int strongest_rsrp) { //identifies the target SSB Beam index //keeps the required date for PDCCH and PDSCH TCI state activation/deactivation CE consutruction globally //handles triggering of PDCCH and PDSCH MAC CEs -void tci_handling(module_id_t Mod_idP, int UE_id, frame_t frame, slot_t slot) { +void tci_handling(NR_UE_info_t *UE, frame_t frame, slot_t slot) { int strongest_ssb_rsrp = 0; int cqi_idx = 0; @@ -947,10 +938,10 @@ void tci_handling(module_id_t Mod_idP, int UE_id, frame_t frame, slot_t slot) { int ssb_index[MAX_NUM_SSB] = {0}; int ssb_rsrp[MAX_NUM_SSB] = {0}; uint8_t idx = 0; - NR_UE_info_t *UE_info = &RC.nrmac[Mod_idP]->UE_info; - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; - const int bwp_id = sched_ctrl->active_bwp ? sched_ctrl->active_bwp->bwp_Id : 0; - NR_CellGroupConfig_t *CellGroup = UE_info->CellGroup[UE_id]; + + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; + const int bwp_id = sched_ctrl->active_bwp ? 1 : 0; + NR_CellGroupConfig_t *CellGroup = UE->CellGroup; //bwp indicator int n_dl_bwp=0; @@ -960,7 +951,7 @@ void tci_handling(module_id_t Mod_idP, int UE_id, frame_t frame, slot_t slot) { n_dl_bwp = CellGroup->spCellConfig->spCellConfigDedicated->downlinkBWP_ToAddModList->list.count; uint8_t nr_ssbri_cri = 0; - uint8_t nb_of_csi_ssb_report = UE_info->csi_report_template[UE_id][cqi_idx].nb_of_csi_ssb_report; + uint8_t nb_of_csi_ssb_report = UE->csi_report_template[cqi_idx].nb_of_csi_ssb_report; int better_rsrp_reported = -140-(-0); /*minimum_measured_RSRP_value - minimum_differntail_RSRP_value*///considering the minimum RSRP value as better RSRP initially uint8_t diff_rsrp_idx = 0; uint8_t i, j; @@ -1031,7 +1022,7 @@ void tci_handling(module_id_t Mod_idP, int UE_id, frame_t frame, slot_t slot) { The length of the field is 7 bits */ if(sched_ctrl->UE_mac_ce_ctrl.pdcch_state_ind.coresetId == 0) { - int tci_state_id = checkTargetSSBInFirst64TCIStates_pdschConfig(ssb_index[target_ssb_beam_index], Mod_idP, UE_id); + int tci_state_id = checkTargetSSBInFirst64TCIStates_pdschConfig(ssb_index[target_ssb_beam_index], UE); if( tci_state_id != -1) sched_ctrl->UE_mac_ce_ctrl.pdcch_state_ind.tciStateId = tci_state_id; @@ -1041,7 +1032,7 @@ void tci_handling(module_id_t Mod_idP, int UE_id, frame_t frame, slot_t slot) { int flag = 0; for(i =0; ssb_index_sorted[i]!=0; i++) { - tci_state_id = checkTargetSSBInFirst64TCIStates_pdschConfig(ssb_index_sorted[i], Mod_idP, UE_id) ; + tci_state_id = checkTargetSSBInFirst64TCIStates_pdschConfig(ssb_index_sorted[i],UE) ; if(tci_state_id != -1 && ssb_rsrp_sorted[i] > ssb_rsrp[curr_ssb_beam_index] && ssb_rsrp_sorted[i] - ssb_rsrp[curr_ssb_beam_index] > L1_RSRP_HYSTERIS) { sched_ctrl->UE_mac_ce_ctrl.pdcch_state_ind.tciStateId = tci_state_id; @@ -1055,7 +1046,7 @@ void tci_handling(module_id_t Mod_idP, int UE_id, frame_t frame, slot_t slot) { } } } else { - int tci_state_id = checkTargetSSBInTCIStates_pdcchConfig(ssb_index[target_ssb_beam_index], Mod_idP, UE_id); + int tci_state_id = checkTargetSSBInTCIStates_pdcchConfig(ssb_index[target_ssb_beam_index], UE); if (tci_state_id !=-1) sched_ctrl->UE_mac_ce_ctrl.pdcch_state_ind.tciStateId = tci_state_id; @@ -1065,7 +1056,7 @@ void tci_handling(module_id_t Mod_idP, int UE_id, frame_t frame, slot_t slot) { int flag = 0; for(i =0; ssb_index_sorted[i]!=0; i++) { - tci_state_id = checkTargetSSBInTCIStates_pdcchConfig(ssb_index_sorted[i], Mod_idP, UE_id); + tci_state_id = checkTargetSSBInTCIStates_pdcchConfig(ssb_index_sorted[i], UE); if( tci_state_id != -1 && ssb_rsrp_sorted[i] > ssb_rsrp[curr_ssb_beam_index] && ssb_rsrp_sorted[i] - ssb_rsrp[curr_ssb_beam_index] > L1_RSRP_HYSTERIS) { sched_ctrl->UE_mac_ce_ctrl.pdcch_state_ind.tciStateId = tci_state_id; @@ -1126,15 +1117,14 @@ uint8_t pickandreverse_bits(uint8_t *payload, uint16_t bitlen, uint8_t start_bit } -void evaluate_rsrp_report(NR_UE_info_t *UE_info, +void evaluate_rsrp_report(NR_UE_info_t *UE, NR_UE_sched_ctrl_t *sched_ctrl, - int UE_id, uint8_t csi_report_id, uint8_t *payload, int *cumul_bits, NR_CSI_ReportConfig__reportQuantity_PR reportQuantity_type){ - nr_csi_report_t *csi_report = &UE_info->csi_report_template[UE_id][csi_report_id]; + nr_csi_report_t *csi_report = &UE->csi_report_template[csi_report_id]; uint8_t cri_ssbri_bitlen = csi_report->CSI_report_bitlen.cri_ssbri_bitlen; uint16_t curr_payload; @@ -1186,7 +1176,7 @@ void evaluate_rsrp_report(NR_UE_info_t *UE_info, } csi_report->nb_of_csi_ssb_report++; int strongest_ssb_rsrp = get_measured_rsrp(sched_ctrl->CSI_report.ssb_cri_report.RSRP); - NR_mac_stats_t *stats = &UE_info->mac_stats[UE_id]; + NR_mac_stats_t *stats = &UE->mac_stats; // including ssb rsrp in mac stats stats->cumul_rsrp += strongest_ssb_rsrp; stats->num_rsrp_meas++; @@ -1324,22 +1314,19 @@ void extract_pucch_csi_report(NR_CSI_MeasConfig_t *csi_MeasConfig, const nfapi_nr_uci_pucch_pdu_format_2_3_4_t *uci_pdu, frame_t frame, slot_t slot, - int UE_id, - module_id_t Mod_idP) { - + NR_UE_info_t *UE, + NR_ServingCellConfigCommon_t *scc) +{ /** From Table 6.3.1.1.2-3: RI, LI, CQI, and CRI of codebookType=typeI-SinglePanel */ - NR_ServingCellConfigCommon_t *scc = - RC.nrmac[Mod_idP]->common_channels->ServingCellConfigCommon; const int n_slots_frame = nr_slots_per_frame[*scc->ssbSubcarrierSpacing]; uint8_t *payload = uci_pdu->csi_part1.csi_part1_payload; uint16_t bitlen = uci_pdu->csi_part1.csi_part1_bit_len; NR_CSI_ReportConfig__reportQuantity_PR reportQuantity_type = NR_CSI_ReportConfig__reportQuantity_PR_NOTHING; - NR_UE_info_t *UE_info = &(RC.nrmac[Mod_idP]->UE_info); - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; int cumul_bits = 0; int r_index = -1; for (int csi_report_id = 0; csi_report_id < csi_MeasConfig->csi_ReportConfigToAddModList->list.count; csi_report_id++ ) { - nr_csi_report_t *csi_report = &UE_info->csi_report_template[UE_id][csi_report_id]; + nr_csi_report_t *csi_report = &UE->csi_report_template[csi_report_id]; csi_report->nb_of_csi_ssb_report = 0; uint8_t cri_bitlen = 0; uint8_t ri_bitlen = 0; @@ -1354,10 +1341,10 @@ void extract_pucch_csi_report(NR_CSI_MeasConfig_t *csi_MeasConfig, LOG_D(MAC,"SFN/SF:%d/%d reportQuantity type = %d\n",frame,slot,reportQuantity_type); switch(reportQuantity_type){ case NR_CSI_ReportConfig__reportQuantity_PR_cri_RSRP: - evaluate_rsrp_report(UE_info,sched_ctrl,UE_id,csi_report_id,payload,&cumul_bits,reportQuantity_type); + evaluate_rsrp_report(UE,sched_ctrl,csi_report_id,payload,&cumul_bits,reportQuantity_type); break; case NR_CSI_ReportConfig__reportQuantity_PR_ssb_Index_RSRP: - evaluate_rsrp_report(UE_info,sched_ctrl,UE_id,csi_report_id,payload,&cumul_bits,reportQuantity_type); + evaluate_rsrp_report(UE,sched_ctrl,csi_report_id,payload,&cumul_bits,reportQuantity_type); break; case NR_CSI_ReportConfig__reportQuantity_PR_cri_RI_CQI: cri_bitlen = csi_report->csi_meas_bitlen.cri_bitlen; @@ -1413,7 +1400,7 @@ void extract_pucch_csi_report(NR_CSI_MeasConfig_t *csi_MeasConfig, } } -static NR_UE_harq_t *find_harq(module_id_t mod_id, frame_t frame, sub_frame_t slot, int UE_id) +static NR_UE_harq_t *find_harq(frame_t frame, sub_frame_t slot, NR_UE_info_t * UE, int harq_round_max) { /* In case of realtime problems: we can only identify a HARQ process by * timing. If the HARQ process's feedback_frame/feedback_slot is not the one we @@ -1421,7 +1408,7 @@ static NR_UE_harq_t *find_harq(module_id_t mod_id, frame_t frame, sub_frame_t sl * skip this HARQ process, which is what happens in the loop below. * Similarly, we might be "in advance", in which case we need to skip * this result. */ - NR_UE_sched_ctrl_t *sched_ctrl = &RC.nrmac[mod_id]->UE_info.UE_sched_ctrl[UE_id]; + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; int8_t pid = sched_ctrl->feedback_dl_harq.head; if (pid < 0) return NULL; @@ -1437,7 +1424,7 @@ static NR_UE_harq_t *find_harq(module_id_t mod_id, frame_t frame, sub_frame_t sl frame, slot); remove_front_nr_list(&sched_ctrl->feedback_dl_harq); - handle_dl_harq(mod_id, UE_id, pid, 0); + handle_dl_harq(UE, pid, 0, harq_round_max); pid = sched_ctrl->feedback_dl_harq.head; if (pid < 0) return NULL; @@ -1462,20 +1449,19 @@ void handle_nr_uci_pucch_0_1(module_id_t mod_id, sub_frame_t slot, const nfapi_nr_uci_pucch_pdu_format_0_1_t *uci_01) { - int UE_id = find_nr_UE_id(mod_id, uci_01->rnti); - if (UE_id < 0) { + NR_UE_info_t * UE = find_nr_UE(&RC.nrmac[mod_id]->UE_info, uci_01->rnti); + if (!UE) { LOG_E(NR_MAC, "%s(): unknown RNTI %04x in PUCCH UCI\n", __func__, uci_01->rnti); return; } - NR_UE_info_t *UE_info = &RC.nrmac[mod_id]->UE_info; - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; if (((uci_01->pduBitmap >> 1) & 0x01)) { // iterate over received harq bits for (int harq_bit = 0; harq_bit < uci_01->harq->num_harq; harq_bit++) { const uint8_t harq_value = uci_01->harq->harq_list[harq_bit].harq_value; const uint8_t harq_confidence = uci_01->harq->harq_confidence_level; - NR_UE_harq_t *harq = find_harq(mod_id, frame, slot, UE_id); + NR_UE_harq_t *harq = find_harq(frame, slot, UE, RC.nrmac[mod_id]->harq_round_max); if (!harq) { LOG_E(NR_MAC, "Oh no! Could not find a harq in %s!\n", __FUNCTION__); break; @@ -1484,8 +1470,8 @@ void handle_nr_uci_pucch_0_1(module_id_t mod_id, const int8_t pid = sched_ctrl->feedback_dl_harq.head; remove_front_nr_list(&sched_ctrl->feedback_dl_harq); LOG_D(NR_MAC,"%4d.%2d bit %d pid %d ack/nack %d\n",frame, slot, harq_bit,pid,harq_value); - handle_dl_harq(mod_id, UE_id, pid, harq_value == 0 && harq_confidence == 0); - if (harq_confidence == 1) UE_info->mac_stats[UE_id].pucch0_DTX++; + handle_dl_harq(UE, pid, harq_value == 0 && harq_confidence == 0, RC.nrmac[mod_id]->harq_round_max); + if (harq_confidence == 1) UE->mac_stats.pucch0_DTX++; } } @@ -1510,19 +1496,21 @@ void handle_nr_uci_pucch_2_3_4(module_id_t mod_id, sub_frame_t slot, const nfapi_nr_uci_pucch_pdu_format_2_3_4_t *uci_234) { - int UE_id = find_nr_UE_id(mod_id, uci_234->rnti); - if (UE_id < 0) { + NR_UE_info_t * UE = find_nr_UE(&RC.nrmac[mod_id]->UE_info, uci_234->rnti); + if (!UE) { LOG_E(NR_MAC, "%s(): unknown RNTI %04x in PUCCH UCI\n", __func__, uci_234->rnti); return; } - AssertFatal(RC.nrmac[mod_id]->UE_info.CellGroup[UE_id],"Cellgroup is null for UE %d/%x\n",UE_id,uci_234->rnti); - AssertFatal(RC.nrmac[mod_id]->UE_info.CellGroup[UE_id]->spCellConfig, "Cellgroup->spCellConfig is null for UE %d/%x\n",UE_id,uci_234->rnti); - AssertFatal(RC.nrmac[mod_id]->UE_info.CellGroup[UE_id]->spCellConfig->spCellConfigDedicated, "Cellgroup->spCellConfig->spCellConfigDedicated is null for UE %d/%x\n",UE_id,uci_234->rnti); - if ( RC.nrmac[mod_id]->UE_info.CellGroup[UE_id]->spCellConfig->spCellConfigDedicated->csi_MeasConfig==NULL) return; + AssertFatal(UE->CellGroup,"Cellgroup is null for UE %04x\n", uci_234->rnti); + AssertFatal(UE->CellGroup->spCellConfig, + "Cellgroup->spCellConfig is null for UE %04x\n", uci_234->rnti); + AssertFatal(UE->CellGroup->spCellConfig->spCellConfigDedicated, + "Cellgroup->spCellConfig->spCellConfigDedicated is null for UE %04x\n", uci_234->rnti); + if ( UE->CellGroup->spCellConfig->spCellConfigDedicated->csi_MeasConfig==NULL) + return; - NR_CSI_MeasConfig_t *csi_MeasConfig = RC.nrmac[mod_id]->UE_info.CellGroup[UE_id]->spCellConfig->spCellConfigDedicated->csi_MeasConfig->choice.setup; - NR_UE_info_t *UE_info = &RC.nrmac[mod_id]->UE_info; - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + NR_CSI_MeasConfig_t *csi_MeasConfig = UE->CellGroup->spCellConfig->spCellConfigDedicated->csi_MeasConfig->choice.setup; + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; // tpc (power control) // TODO PUCCH2 SNR computation is not correct -> ignore the following @@ -1535,20 +1523,20 @@ void handle_nr_uci_pucch_2_3_4(module_id_t mod_id, // iterate over received harq bits for (int harq_bit = 0; harq_bit < uci_234->harq.harq_bit_len; harq_bit++) { const int acknack = ((uci_234->harq.harq_payload[harq_bit >> 3]) >> harq_bit) & 0x01; - NR_UE_harq_t *harq = find_harq(mod_id, frame, slot, UE_id); + NR_UE_harq_t *harq = find_harq(frame, slot, UE, RC.nrmac[mod_id]->harq_round_max); if (!harq) break; DevAssert(harq->is_waiting); const int8_t pid = sched_ctrl->feedback_dl_harq.head; remove_front_nr_list(&sched_ctrl->feedback_dl_harq); - handle_dl_harq(mod_id, UE_id, pid, uci_234->harq.harq_crc != 1 && acknack); + handle_dl_harq(UE, pid, uci_234->harq.harq_crc != 1 && acknack, RC.nrmac[mod_id]->harq_round_max); } } if ((uci_234->pduBitmap >> 2) & 0x01) { //API to parse the csi report and store it into sched_ctrl - extract_pucch_csi_report(csi_MeasConfig, uci_234, frame, slot, UE_id, mod_id); + extract_pucch_csi_report(csi_MeasConfig, uci_234, frame, slot, UE, RC.nrmac[mod_id]->common_channels->ServingCellConfigCommon); //TCI handling function - tci_handling(mod_id, UE_id,frame, slot); + tci_handling(UE,frame, slot); } if ((uci_234->pduBitmap >> 3) & 0x01) { //@TODO:Handle CSI Report 2 @@ -1616,12 +1604,11 @@ bool test_acknack_vrb_occupation(NR_UE_sched_ctrl_t *sched_ctrl, // if the function returns -1 it was not possible to schedule acknack // when current pucch is ready to be scheduled nr_fill_nfapi_pucch is called int nr_acknack_scheduling(int mod_id, - int UE_id, + NR_UE_info_t * UE, frame_t frame, sub_frame_t slot, int r_pucch, int is_common) { - const int CC_id = 0; const int minfbtime = RC.nrmac[mod_id]->minRXTXTIMEpdsch; const NR_ServingCellConfigCommon_t *scc = RC.nrmac[mod_id]->common_channels[CC_id].ServingCellConfigCommon; @@ -1639,8 +1626,8 @@ int nr_acknack_scheduling(int mod_id, * * SR uses format 0 and is allocated in the first UL (mixed) slot (and not * later) * * each UE has dedicated PUCCH Format 0 resources, and we use index 0! */ - NR_UE_sched_ctrl_t *sched_ctrl = &RC.nrmac[mod_id]->UE_info.UE_sched_ctrl[UE_id]; - NR_CellGroupConfig_t *cg = RC.nrmac[mod_id]->UE_info.CellGroup[UE_id]; + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; + NR_CellGroupConfig_t *cg = UE->CellGroup; NR_PUCCH_Config_t *pucch_Config = NULL; if (sched_ctrl->active_ubwp) { @@ -1675,11 +1662,13 @@ int nr_acknack_scheduling(int mod_id, const int f = pucch->frame; const int s = pucch->ul_slot; LOG_D(NR_MAC, "In %s: %4d.%2d DAI = 2 pucch currently in %4d.%2d, advancing by 1 slot\n", __FUNCTION__, frame, slot, f, s); - if (!(csi_pucch - && csi_pucch->csi_bits > 0 - && csi_pucch->frame == f - && csi_pucch->ul_slot == s)) - nr_fill_nfapi_pucch(mod_id, frame, slot, pucch, UE_id); + + if (!(csi_pucch + && csi_pucch->csi_bits > 0 + && csi_pucch->frame == f + && csi_pucch->ul_slot == s)) + nr_fill_nfapi_pucch(RC.nrmac[mod_id], frame, slot, pucch, UE); + memset(pucch, 0, sizeof(*pucch)); pucch->frame = s == n_slots_frame - 1 ? (f + 1) % 1024 : f; if(((s + 1)%nr_slots_period) == 0) @@ -1696,7 +1685,8 @@ int nr_acknack_scheduling(int mod_id, && csi_pucch->ul_slot == pucch->ul_slot && !csi_pucch->simultaneous_harqcsi) { LOG_D(NR_MAC,"Cannot multiplex csi_pucch for %d.%d\n",csi_pucch->frame,csi_pucch->ul_slot); - nr_fill_nfapi_pucch(mod_id, frame, slot, csi_pucch, UE_id); + nr_fill_nfapi_pucch(RC.nrmac[mod_id], frame, slot, csi_pucch, UE); + memset(csi_pucch, 0, sizeof(*csi_pucch)); pucch->frame = pucch->ul_slot == n_slots_frame - 1 ? (pucch->frame + 1) % 1024 : pucch->frame; if(((pucch->ul_slot + 1)%nr_slots_period) == 0) @@ -1723,7 +1713,7 @@ int nr_acknack_scheduling(int mod_id, const int bwp_id = sched_ctrl->active_bwp ? sched_ctrl->active_bwp->bwp_Id : 0; int max_fb_time = 0; - get_pdsch_to_harq_feedback(mod_id, UE_id, bwp_id, ss_type, &max_fb_time, pdsch_to_harq_feedback); + get_pdsch_to_harq_feedback(UE, bwp_id, ss_type, &max_fb_time, pdsch_to_harq_feedback); LOG_D(NR_MAC, "In %s: 1b. DL %4d.%2d, UL_ACK %4d.%2d, DAI_C %d\n", __FUNCTION__, frame,slot,pucch->frame,pucch->ul_slot,pucch->dai_c); /* there is a HARQ. Check whether we can use it for this ACKNACK */ @@ -1750,14 +1740,14 @@ int nr_acknack_scheduling(int mod_id, csi_pucch->csi_bits > 0 && csi_pucch->frame == f && csi_pucch->ul_slot == s)) - nr_fill_nfapi_pucch(mod_id, frame, slot, pucch, UE_id); + nr_fill_nfapi_pucch(RC.nrmac[mod_id], frame, slot, pucch, UE); memset(pucch, 0, sizeof(*pucch)); pucch->frame = s == n_slots_frame - 1 ? (f + 1) % 1024 : f; if(((s + 1)%nr_slots_period) == 0) pucch->ul_slot = (s + 1 + first_ul_slot_period) % n_slots_frame; else pucch->ul_slot = (s + 1) % n_slots_frame; - return nr_acknack_scheduling(mod_id, UE_id, frame, slot, r_pucch,is_common); + return nr_acknack_scheduling(mod_id, UE, frame, slot, r_pucch,is_common); } pucch->timing_indicator = i; @@ -1783,10 +1773,11 @@ int nr_acknack_scheduling(int mod_id, ((pucch->frame*n_slots_frame + pucch->ul_slot) < (frame*n_slots_frame + slot))) { AssertFatal(pucch->sr_flag + pucch->dai_c == 0, - "expected no SR/AckNack for UE %d in %4d.%2d, but has %d/%d for %4d.%2d\n", - UE_id, frame, slot, pucch->sr_flag, pucch->dai_c, pucch->frame, pucch->ul_slot); + "expected no SR/AckNack for UE %04x in %4d.%2d, but has %d/%d for %4d.%2d\n", + UE->rnti, frame, slot, pucch->sr_flag, pucch->dai_c, pucch->frame, pucch->ul_slot); const int s = next_ul_slot; pucch->frame = s < n_slots_frame ? frame : (frame + 1) % 1024; + pucch->ul_slot = s % n_slots_frame; } @@ -1834,11 +1825,11 @@ int nr_acknack_scheduling(int mod_id, } if (ind_found==-1) { LOG_D(NR_MAC, - "%4d.%2d could not find pdsch_to_harq_feedback for UE %d: earliest " + "%4d.%2d could not find pdsch_to_harq_feedback for UE %04x: earliest " "ack slot %d\n", frame, slot, - UE_id, + UE->rnti, pucch->ul_slot); return -1; } @@ -1852,8 +1843,9 @@ int nr_acknack_scheduling(int mod_id, // FIXME currently we support at most 11 bits in pucch2 so skip also in that case if(!csi_pucch->simultaneous_harqcsi || ((csi_pucch->csi_bits + csi_pucch->dai_c) >= 11)) { + LOG_D(NR_MAC,"Cannot multiplex csi_pucch %d +csi_pucch->dai_c %d for %d.%d\n",csi_pucch->csi_bits,csi_pucch->dai_c,csi_pucch->frame,csi_pucch->ul_slot); - nr_fill_nfapi_pucch(mod_id, frame, slot, csi_pucch, UE_id); + nr_fill_nfapi_pucch(RC.nrmac[mod_id], frame, slot, csi_pucch, UE); memset(csi_pucch, 0, sizeof(*csi_pucch)); /* advance the UL slot information in PUCCH by one so we won't schedule in * the same slot again */ @@ -1865,7 +1857,7 @@ int nr_acknack_scheduling(int mod_id, pucch->ul_slot = (s + 1 + first_ul_slot_period) % n_slots_frame; else pucch->ul_slot = (s + 1) % n_slots_frame; - return nr_acknack_scheduling(mod_id, UE_id, frame, slot, r_pucch,is_common); + return nr_acknack_scheduling(mod_id, UE, frame, slot, r_pucch,is_common); } // multiplexing harq and csi in a pucch else { @@ -1902,29 +1894,26 @@ int nr_acknack_scheduling(int mod_id, } -void nr_sr_reporting(int Mod_idP, frame_t SFN, sub_frame_t slot) +void nr_sr_reporting(gNB_MAC_INST *nrmac, frame_t SFN, sub_frame_t slot) { - gNB_MAC_INST *nrmac = RC.nrmac[Mod_idP]; if (!is_xlsch_in_slot(nrmac->ulsch_slot_bitmap[slot / 64], slot)) return; NR_ServingCellConfigCommon_t *scc = nrmac->common_channels->ServingCellConfigCommon; const int n_slots_frame = nr_slots_per_frame[*scc->ssbSubcarrierSpacing]; - NR_UE_info_t *UE_info = &nrmac->UE_info; - NR_list_t *UE_list = &UE_info->list; - for (int UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + UE_iterator(nrmac->UE_info.list, UE) { + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; if (sched_ctrl->ul_failure==1) continue; NR_PUCCH_Config_t *pucch_Config = NULL; if (sched_ctrl->active_ubwp) { pucch_Config = sched_ctrl->active_ubwp->bwp_Dedicated->pucch_Config->choice.setup; - } else if (RC.nrmac[Mod_idP]->UE_info.CellGroup[UE_id] && - RC.nrmac[Mod_idP]->UE_info.CellGroup[UE_id]->spCellConfig && - RC.nrmac[Mod_idP]->UE_info.CellGroup[UE_id]->spCellConfig->spCellConfigDedicated && - RC.nrmac[Mod_idP]->UE_info.CellGroup[UE_id]->spCellConfig->spCellConfigDedicated->uplinkConfig && - RC.nrmac[Mod_idP]->UE_info.CellGroup[UE_id]->spCellConfig->spCellConfigDedicated->uplinkConfig->initialUplinkBWP && - RC.nrmac[Mod_idP]->UE_info.CellGroup[UE_id]->spCellConfig->spCellConfigDedicated->uplinkConfig->initialUplinkBWP->pucch_Config->choice.setup) { - pucch_Config = RC.nrmac[Mod_idP]->UE_info.CellGroup[UE_id]->spCellConfig->spCellConfigDedicated->uplinkConfig->initialUplinkBWP->pucch_Config->choice.setup; + } else if (UE->CellGroup && + UE->CellGroup->spCellConfig && + UE->CellGroup->spCellConfig->spCellConfigDedicated && + UE->CellGroup->spCellConfig->spCellConfigDedicated->uplinkConfig && + UE->CellGroup->spCellConfig->spCellConfigDedicated->uplinkConfig->initialUplinkBWP && + UE->CellGroup->spCellConfig->spCellConfigDedicated->uplinkConfig->initialUplinkBWP->pucch_Config->choice.setup) { + pucch_Config = UE->CellGroup->spCellConfig->spCellConfigDedicated->uplinkConfig->initialUplinkBWP->pucch_Config->choice.setup; } else continue; @@ -1971,7 +1960,7 @@ void nr_sr_reporting(int Mod_idP, frame_t SFN, sub_frame_t slot) continue; nfapi_nr_pucch_pdu_t *pdu = &ul_tti_req->pdus_list[i].pucch_pdu; /* check that it is our PUCCH F0. Assuming there can be only one */ - if (pdu->rnti == UE_info->rnti[UE_id] + if (pdu->rnti == UE->rnti && pdu->format_type == 0 // does not use NR_PUCCH_Resource__format_PR_format0 && pdu->initial_cyclic_shift == pucch_res->format.choice.format0->initialCyclicShift && pdu->nr_of_symbols == pucch_res->format.choice.format0->nrofSymbols @@ -1981,7 +1970,7 @@ void nr_sr_reporting(int Mod_idP, frame_t SFN, sub_frame_t slot) nfapi_allocated = true; break; } - else if (pdu->rnti == UE_info->rnti[UE_id] + else if (pdu->rnti == UE->rnti && pdu->format_type == 2 // does not use NR_PUCCH_Resource__format_PR_format0 && pdu->nr_of_symbols == pucch_res->format.choice.format2->nrofSymbols && pdu->start_symbol_index == pucch_res->format.choice.format2->startingSymbolIndex) { @@ -1991,7 +1980,7 @@ void nr_sr_reporting(int Mod_idP, frame_t SFN, sub_frame_t slot) break; } - else if (pdu->rnti == UE_info->rnti[UE_id] + else if (pdu->rnti == UE->rnti && pdu->format_type == 1 // does not use NR_PUCCH_Resource__format_PR_format0 && pdu->nr_of_symbols == pucch_res->format.choice.format1->nrofSymbols && pdu->start_symbol_index == pucch_res->format.choice.format1->startingSymbolIndex) { @@ -2001,7 +1990,7 @@ void nr_sr_reporting(int Mod_idP, frame_t SFN, sub_frame_t slot) break; } - else if (pdu->rnti == UE_info->rnti[UE_id] + else if (pdu->rnti == UE->rnti && pdu->format_type == 3 // does not use NR_PUCCH_Resource__format_PR_format0 && pdu->nr_of_symbols == pucch_res->format.choice.format3->nrofSymbols && pdu->start_symbol_index == pucch_res->format.choice.format3->startingSymbolIndex) { @@ -2011,7 +2000,7 @@ void nr_sr_reporting(int Mod_idP, frame_t SFN, sub_frame_t slot) break; } - else if (pdu->rnti == UE_info->rnti[UE_id] + else if (pdu->rnti == UE->rnti && pdu->format_type == 4 // does not use NR_PUCCH_Resource__format_PR_format0 && pdu->nr_of_symbols == pucch_res->format.choice.format4->nrofSymbols && pdu->start_symbol_index == pucch_res->format.choice.format4->startingSymbolIndex) { @@ -2043,7 +2032,7 @@ void nr_sr_reporting(int Mod_idP, frame_t SFN, sub_frame_t slot) .resource_indicator = found, .r_pucch = -1 }; - nr_fill_nfapi_pucch(Mod_idP, SFN, slot, &sched_sr, UE_id); + nr_fill_nfapi_pucch(nrmac, SFN, slot, &sched_sr, UE); } } } diff --git a/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_ulsch.c b/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_ulsch.c index 591a549c0a2..f2f50d86c69 100644 --- a/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_ulsch.c +++ b/openair2/LAYER2/NR_MAC_gNB/gNB_scheduler_ulsch.c @@ -84,8 +84,8 @@ const int get_ul_tda(const gNB_MAC_INST *nrmac, const NR_ServingCellConfigCommon // F: length of L is 0:8 or 1:16 bits wide // R: Reserved bit, set to zero. -int nr_process_mac_pdu(module_id_t module_idP, - int UE_id, +int nr_process_mac_pdu( instance_t module_idP, + NR_UE_info_t* UE, uint8_t CC_id, frame_t frameP, sub_frame_t slot, @@ -96,11 +96,10 @@ int nr_process_mac_pdu(module_id_t module_idP, uint8_t done = 0; - NR_UE_info_t *UE_info = &RC.nrmac[module_idP]->UE_info; - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; if ( pduP[0] != UL_SCH_LCID_PADDING ) - trace_NRpdu(DIRECTION_UPLINK, pduP, pdu_len, UE_id, WS_C_RNTI, UE_info->rnti[UE_id], frameP, 0, 0, 0); + trace_NRpdu(DIRECTION_UPLINK, pduP, pdu_len, WS_C_RNTI, UE->rnti, frameP, 0, 0, 0); #ifdef ENABLE_MAC_PAYLOAD_DEBUG LOG_I(NR_MAC, "In %s: dumping MAC PDU in %d.%d:\n", __func__, frameP, slot); @@ -197,7 +196,7 @@ int nr_process_mac_pdu(module_id_t module_idP, for (int i = 0; i < NR_NB_RA_PROC_MAX; i++) { NR_RA_t *ra = &RC.nrmac[module_idP]->common_channels[CC_id].ra[i]; - if (ra->state >= WAIT_Msg3 && ra->rnti == UE_info->rnti[UE_id]) { + if (ra->state >= WAIT_Msg3 && ra->rnti == UE->rnti) { ra->crnti = ((pduP[1]&0xFF)<<8)|(pduP[2]&0xFF); ra->msg3_dcch_dtch = true; LOG_I(NR_MAC, "Received UL_SCH_LCID_C_RNTI with C-RNTI 0x%04x\n", ra->crnti); @@ -258,22 +257,23 @@ int nr_process_mac_pdu(module_id_t module_idP, if (!get_mac_len(pduP, pdu_len, &mac_len, &mac_subheader_len)) return 0; - rnti_t crnti = UE_info->rnti[UE_id]; - int UE_idx = UE_id; + rnti_t crnti = UE->rnti; + NR_UE_info_t* UE_idx = UE; for (int i = 0; i < NR_NB_RA_PROC_MAX; i++) { NR_RA_t *ra = &RC.nrmac[module_idP]->common_channels[CC_id].ra[i]; - if (ra->state >= WAIT_Msg3 && ra->rnti == UE_info->rnti[UE_id]) { + if (ra->state >= WAIT_Msg3 && ra->rnti == UE->rnti) { uint8_t *next_subpduP = pduP + mac_subheader_len + mac_len; if ((pduP[mac_subheader_len+mac_len] & 0x3F) == UL_SCH_LCID_C_RNTI) { crnti = ((next_subpduP[1]&0xFF)<<8)|(next_subpduP[2]&0xFF); - UE_idx = find_nr_UE_id(module_idP, crnti); + LOG_W(NR_MAC, " UL_SCH_LCID_SRB for rnti %04x\n", crnti); + UE_idx = find_nr_UE(&RC.nrmac[module_idP]->UE_info, crnti); break; } } } - if (UE_info->CellGroup[UE_idx]) { - LOG_D(NR_MAC, "[UE %d] Frame %d : ULSCH -> UL-DCCH %d (gNB %d, %d bytes), rnti: 0x%04x \n", module_idP, frameP, rx_lcid, module_idP, mac_len, crnti); + if (UE_idx->CellGroup) { + LOG_D(NR_MAC, "Frame %d : ULSCH -> UL-DCCH %d (gNB %ld, %d bytes), rnti: 0x%04x \n", frameP, rx_lcid, module_idP, mac_len, crnti); mac_rlc_data_ind(module_idP, crnti, module_idP, @@ -286,7 +286,7 @@ int nr_process_mac_pdu(module_id_t module_idP, 1, NULL); } else { - AssertFatal(1==0,"[UE %d] Frame/Slot %d.%d : Received LCID %d which is not configured, dropping packet\n",UE_id,frameP,slot,rx_lcid); + AssertFatal(1==0,"[UE %04x] Frame/Slot %d.%d : Received LCID %d which is not configured, dropping packet\n",UE->rnti,frameP,slot,rx_lcid); } break; case UL_SCH_LCID_SRB3: @@ -324,7 +324,7 @@ int nr_process_mac_pdu(module_id_t module_idP, frameP, 0, 0, - UE_info->rnti[UE_id], + UE->rnti, CCCH, pduP + mac_subheader_len, mac_len, @@ -336,18 +336,19 @@ int nr_process_mac_pdu(module_id_t module_idP, if (!get_mac_len(pduP, pdu_len, &mac_len, &mac_subheader_len)) return 0; - LOG_D(NR_MAC, "[UE %x] %d.%d: ULSCH -> UL-%s %d (gNB %d, %d bytes)\n", - UE_info->rnti[UE_id], + + LOG_D(NR_MAC, "[UE %04x] %d.%d : ULSCH -> UL-%s %d (gNB %ld, %d bytes)\n", + UE->rnti, frameP, slot, rx_lcid<4?"DCCH":"DTCH", rx_lcid, module_idP, mac_len); - UE_info->mac_stats[UE_id].ul.lc_bytes[rx_lcid] += mac_len; + UE->mac_stats.ul.lc_bytes[rx_lcid] += mac_len; mac_rlc_data_ind(module_idP, - UE_info->rnti[UE_id], + UE->rnti, module_idP, frameP, ENB_FLAG_YES, @@ -398,15 +399,14 @@ int nr_process_mac_pdu(module_id_t module_idP, return 0; } -void abort_nr_ul_harq(module_id_t mod_id, int UE_id, int8_t harq_pid) +void abort_nr_ul_harq( NR_UE_info_t* UE, int8_t harq_pid) { - NR_UE_info_t *UE_info = &RC.nrmac[mod_id]->UE_info; - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; NR_UE_ul_harq_t *harq = &sched_ctrl->ul_harq_processes[harq_pid]; harq->ndi ^= 1; harq->round = 0; - UE_info->mac_stats[UE_id].ul.errors++; + UE->mac_stats.ul.errors++; add_tail_nr_list(&sched_ctrl->available_ul_harq, harq_pid); /* the transmission failed: the UE won't send the data we expected initially, @@ -422,11 +422,11 @@ void handle_nr_ul_harq(const int CC_idP, sub_frame_t slot, const nfapi_nr_crc_t *crc_pdu) { - gNB_MAC_INST *gNB_mac = RC.nrmac[mod_id]; - int UE_id = find_nr_UE_id(mod_id, crc_pdu->rnti); - if (UE_id < 0) { + NR_UE_info_t* UE = find_nr_UE(&RC.nrmac[mod_id]->UE_info, crc_pdu->rnti); + if (!UE) { + LOG_W(NR_MAC, "handle harq for rnti %04x, in RA process\n", crc_pdu->rnti); for (int i = 0; i < NR_NB_RA_PROC_MAX; ++i) { - NR_RA_t *ra = &gNB_mac->common_channels[CC_idP].ra[i]; + NR_RA_t *ra = &RC.nrmac[mod_id]->common_channels[CC_idP].ra[i]; if (ra->state >= WAIT_Msg3 && ra->rnti == crc_pdu->rnti) return; @@ -434,8 +434,7 @@ void handle_nr_ul_harq(const int CC_idP, LOG_E(NR_MAC, "%s(): unknown RNTI 0x%04x in PUSCH\n", __func__, crc_pdu->rnti); return; } - NR_UE_info_t *UE_info = &RC.nrmac[mod_id]->UE_info; - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; int8_t harq_pid = sched_ctrl->feedback_ul_harq.head; LOG_D(NR_MAC, "Comparing crc_pdu->harq_id vs feedback harq_pid = %d %d\n",crc_pdu->harq_id, harq_pid); while (crc_pdu->harq_id != harq_pid || harq_pid < 0) { @@ -449,8 +448,9 @@ void handle_nr_ul_harq(const int CC_idP, remove_front_nr_list(&sched_ctrl->feedback_ul_harq); sched_ctrl->ul_harq_processes[harq_pid].is_waiting = false; - if(sched_ctrl->ul_harq_processes[harq_pid].round >= gNB_mac->harq_round_max - 1) { - abort_nr_ul_harq(mod_id, UE_id, harq_pid); + + if(sched_ctrl->ul_harq_processes[harq_pid].round >= RC.nrmac[mod_id]->harq_round_max - 1) { + abort_nr_ul_harq(UE, harq_pid); } else { sched_ctrl->ul_harq_processes[harq_pid].round++; add_tail_nr_list(&sched_ctrl->retrans_ul_harq, harq_pid); @@ -470,8 +470,8 @@ void handle_nr_ul_harq(const int CC_idP, harq_pid, crc_pdu->rnti); add_tail_nr_list(&sched_ctrl->available_ul_harq, harq_pid); - } else if (harq->round >= gNB_mac->harq_round_max - 1) { - abort_nr_ul_harq(mod_id, UE_id, harq_pid); + } else if (harq->round >= RC.nrmac[mod_id]->harq_round_max - 1) { + abort_nr_ul_harq(UE, harq_pid); LOG_D(NR_MAC, "RNTI %04x: Ulharq id %d crc failed in all rounds\n", crc_pdu->rnti, @@ -501,15 +501,15 @@ void nr_rx_sdu(const module_id_t gnb_mod_idP, const uint16_t rssi){ gNB_MAC_INST *gNB_mac = RC.nrmac[gnb_mod_idP]; - NR_UE_info_t *UE_info = &gNB_mac->UE_info; const int current_rnti = rntiP; - const int UE_id = find_nr_UE_id(gnb_mod_idP, current_rnti); + LOG_D(NR_MAC, "rx_sdu for rnti %04x\n", current_rnti); const int target_snrx10 = gNB_mac->pusch_target_snrx10; const int pusch_failure_thres = gNB_mac->pusch_failure_thres; - - if (UE_id != -1) { - NR_UE_sched_ctrl_t *UE_scheduling_control = &UE_info->UE_sched_ctrl[UE_id]; + + NR_UE_info_t* UE = find_nr_UE(&gNB_mac->UE_info, current_rnti); + if (UE) { + NR_UE_sched_ctrl_t *UE_scheduling_control = &UE->UE_sched_ctrl; const int8_t harq_pid = UE_scheduling_control->feedback_ul_harq.head; if (sduP) @@ -517,15 +517,14 @@ void nr_rx_sdu(const module_id_t gnb_mod_idP, T_INT(rntiP), T_INT(frameP), T_INT(slotP), T_INT(harq_pid), T_BUFFER(sduP, sdu_lenP)); - UE_info->mac_stats[UE_id].ul.total_bytes += sdu_lenP; - LOG_D(NR_MAC, "[gNB %d][PUSCH %d] CC_id %d %d.%d Received ULSCH sdu from PHY (rnti %x, UE_id %d) ul_cqi %d TA %d sduP %p, rssi %d\n", + UE->mac_stats.ul.total_bytes += sdu_lenP; + LOG_D(NR_MAC, "[gNB %d][PUSCH %d] CC_id %d %d.%d Received ULSCH sdu from PHY (rnti %04x) ul_cqi %d TA %d sduP %p, rssi %d\n", gnb_mod_idP, harq_pid, CC_idP, frameP, slotP, current_rnti, - UE_id, ul_cqi, timing_advance, sduP, @@ -538,11 +537,13 @@ void nr_rx_sdu(const module_id_t gnb_mod_idP, UE_scheduling_control->ta_update = timing_advance; UE_scheduling_control->raw_rssi = rssi; UE_scheduling_control->pusch_snrx10 = ul_cqi * 5 - 640; - LOG_D(NR_MAC, "[UE %d] PUSCH TPC %d(SNRx10 %d) and TA %d\n",UE_id,UE_scheduling_control->tpc0,UE_scheduling_control->pusch_snrx10,UE_scheduling_control->ta_update); + + LOG_D(NR_MAC, "[UE %04x] PUSCH TPC %d and TA %d\n",UE->rnti,UE_scheduling_control->tpc0,UE_scheduling_control->ta_update); } else{ - LOG_D(NR_MAC,"[UE %d] Detected DTX : increasing UE TX power\n",UE_id); + LOG_D(NR_MAC,"[UE %04x] Detected DTX : increasing UE TX power\n",UE->rnti); UE_scheduling_control->tpc0 = 1; + } #if defined(ENABLE_MAC_PAYLOAD_DEBUG) @@ -560,13 +561,13 @@ void nr_rx_sdu(const module_id_t gnb_mod_idP, if (sduP != NULL){ LOG_D(NR_MAC, "Received PDU at MAC gNB \n"); - UE_info->UE_sched_ctrl[UE_id].pusch_consecutive_dtx_cnt = 0; + UE->UE_sched_ctrl.pusch_consecutive_dtx_cnt = 0; const uint32_t tb_size = UE_scheduling_control->ul_harq_processes[harq_pid].sched_pusch.tb_size; UE_scheduling_control->sched_ul_bytes -= tb_size; if (UE_scheduling_control->sched_ul_bytes < 0) UE_scheduling_control->sched_ul_bytes = 0; - nr_process_mac_pdu(gnb_mod_idP, UE_id, CC_idP, frameP, slotP, sduP, sdu_lenP); + nr_process_mac_pdu(gnb_mod_idP, UE, CC_idP, frameP, slotP, sduP, sdu_lenP); } else { NR_UE_ul_harq_t *cur_harq = &UE_scheduling_control->ul_harq_processes[harq_pid]; @@ -578,13 +579,15 @@ void nr_rx_sdu(const module_id_t gnb_mod_idP, UE_scheduling_control->sched_ul_bytes = 0; } if (ul_cqi <= 128) { - UE_info->UE_sched_ctrl[UE_id].pusch_consecutive_dtx_cnt++; - UE_info->mac_stats[UE_id].ulsch_DTX++; + UE->UE_sched_ctrl.pusch_consecutive_dtx_cnt++; + UE->mac_stats.ulsch_DTX++; } - if (!get_softmodem_params()->phy_test && UE_info->UE_sched_ctrl[UE_id].pusch_consecutive_dtx_cnt >= pusch_failure_thres) { - LOG_W(NR_MAC,"%d.%d Detected UL Failure on PUSCH after %d PUSCH DTX, stopping scheduling\n", - frameP,slotP,UE_info->UE_sched_ctrl[UE_id].pusch_consecutive_dtx_cnt); - UE_info->UE_sched_ctrl[UE_id].ul_failure = 1; + + if (!get_softmodem_params()->phy_test && UE->UE_sched_ctrl.pusch_consecutive_dtx_cnt >= pusch_failure_thres) { + LOG_W(NR_MAC,"Detected UL Failure on PUSCH after %d PUSCH DTX, stopping scheduling\n", + UE->UE_sched_ctrl.pusch_consecutive_dtx_cnt); + UE->UE_sched_ctrl.ul_failure = 1; + nr_mac_gNB_rrc_ul_failure(gnb_mod_idP,CC_idP,frameP,slotP,rntiP); } } @@ -605,7 +608,7 @@ void nr_rx_sdu(const module_id_t gnb_mod_idP, T(T_GNB_MAC_UL_PDU_WITH_DATA, T_INT(gnb_mod_idP), T_INT(CC_idP), T_INT(rntiP), T_INT(frameP), T_INT(slotP), T_INT(-1) /* harq_pid */, T_BUFFER(sduP, sdu_lenP)); - + /* we don't know this UE (yet). Check whether there is a ongoing RA (Msg 3) * and check the corresponding UE's RNTI match, in which case we activate * it. */ @@ -613,7 +616,7 @@ void nr_rx_sdu(const module_id_t gnb_mod_idP, NR_RA_t *ra = &gNB_mac->common_channels[CC_idP].ra[i]; if (ra->state != WAIT_Msg3) continue; - + if(no_sig) { LOG_D(NR_MAC, "Random Access %i failed at state %i (no signal)\n", i, ra->state); nr_mac_remove_ra_rnti(gnb_mod_idP, ra->rnti); @@ -636,20 +639,20 @@ void nr_rx_sdu(const module_id_t gnb_mod_idP, continue; } - int UE_id=-1; - UE_id = add_new_nr_ue(gnb_mod_idP, ra->rnti, ra->CellGroup); - if (UE_id<0) { - LOG_D(NR_MAC, "Random Access %i discarded at state %i (TC_RNTI %04x RNTI %04x): max number of users achieved!\n", i, ra->state,ra->rnti,current_rnti); + NR_UE_info_t* UE = add_new_nr_ue(gNB_mac, ra->rnti, ra->CellGroup); + if (!UE) { + LOG_W(NR_MAC, "Random Access %i discarded at state %i (TC_RNTI %04x RNTI %04x): max number of users achieved!\n", i, ra->state,ra->rnti,current_rnti); + nr_mac_remove_ra_rnti(gnb_mod_idP, ra->rnti); nr_clear_ra_proc(gnb_mod_idP, CC_idP, frameP, ra); return; } - UE_info->UE_beam_index[UE_id] = ra->beam_id; + UE->UE_beam_index = ra->beam_id; // re-initialize ta update variables after RA procedure completion - UE_info->UE_sched_ctrl[UE_id].ta_frame = frameP; + UE->UE_sched_ctrl.ta_frame = frameP; LOG_D(NR_MAC, "reset RA state information for RA-RNTI 0x%04x/index %d\n", @@ -658,27 +661,24 @@ void nr_rx_sdu(const module_id_t gnb_mod_idP, LOG_I(NR_MAC, "[gNB %d][RAPROC] PUSCH with TC_RNTI 0x%04x received correctly, " - "adding UE MAC Context UE_id %d/RNTI 0x%04x\n", + "adding UE MAC Context RNTI 0x%04x\n", gnb_mod_idP, current_rnti, - UE_id, ra->rnti); - NR_UE_sched_ctrl_t *UE_scheduling_control = &UE_info->UE_sched_ctrl[UE_id]; + NR_UE_sched_ctrl_t *UE_scheduling_control = &UE->UE_sched_ctrl; UE_scheduling_control->tpc0 = nr_get_tpc(target_snrx10,ul_cqi,30); if (timing_advance != 0xffff) UE_scheduling_control->ta_update = timing_advance; UE_scheduling_control->raw_rssi = rssi; UE_scheduling_control->pusch_snrx10 = ul_cqi * 5 - 640; - LOG_D(NR_MAC, "[UE %d] PUSCH TPC %d and TA %d\n",UE_id,UE_scheduling_control->tpc0,UE_scheduling_control->ta_update); + LOG_D(NR_MAC, "[UE %04x] PUSCH TPC %d and TA %d\n",UE->rnti,UE_scheduling_control->tpc0,UE_scheduling_control->ta_update); if(ra->cfra) { - LOG_A(NR_MAC, "(ue %i, rnti 0x%04x) CFRA procedure succeeded!\n", UE_id, ra->rnti); + LOG_A(NR_MAC, "(rnti 0x%04x) CFRA procedure succeeded!\n", ra->rnti); nr_mac_remove_ra_rnti(gnb_mod_idP, ra->rnti); nr_clear_ra_proc(gnb_mod_idP, CC_idP, frameP, ra); - UE_info->active[UE_id] = true; - process_CellGroup(ra->CellGroup, UE_scheduling_control); } else { @@ -694,25 +694,25 @@ void nr_rx_sdu(const module_id_t gnb_mod_idP, // First byte corresponds to R/LCID MAC sub-header memcpy(ra->cont_res_id, &sduP[1], sizeof(uint8_t) * 6); - if (nr_process_mac_pdu(gnb_mod_idP, UE_id, CC_idP, frameP, slotP, sduP, sdu_lenP) == 0) { + if (nr_process_mac_pdu(gnb_mod_idP, UE, CC_idP, frameP, slotP, sduP, sdu_lenP) == 0) { ra->state = Msg4; ra->Msg4_frame = (frameP + 2) % 1024; ra->Msg4_slot = 1; if (ra->msg3_dcch_dtch) { // Check if the UE identified by C-RNTI still exists at the gNB - int UE_id_C = find_nr_UE_id(gnb_mod_idP, ra->crnti); - if (UE_id_C < 0) { + NR_UE_info_t * UE_C = find_nr_UE(&gNB_mac->UE_info, ra->crnti); + if (!UE_C) { // The UE identified by C-RNTI no longer exists at the gNB // Let's abort the current RA, so the UE will trigger a new RA later but using RRCSetupRequest instead. A better solution may be implemented - mac_remove_nr_ue(gnb_mod_idP, ra->rnti); + mac_remove_nr_ue(gNB_mac, ra->rnti); nr_clear_ra_proc(gnb_mod_idP, CC_idP, frameP, ra); return; } else { // The UE identified by C-RNTI still exists at the gNB // Reset uplink failure flags/counters/timers at MAC and at RRC so gNB will resume again scheduling resources for this UE - UE_info->UE_sched_ctrl[UE_id_C].pusch_consecutive_dtx_cnt = 0; - UE_info->UE_sched_ctrl[UE_id_C].ul_failure = 0; + UE_C->UE_sched_ctrl.pusch_consecutive_dtx_cnt = 0; + UE_C->UE_sched_ctrl.ul_failure = 0; nr_mac_gNB_rrc_ul_failure_reset(gnb_mod_idP, frameP, slotP, ra->crnti); } } @@ -751,7 +751,9 @@ void nr_rx_sdu(const module_id_t gnb_mod_idP, return; } + LOG_D(NR_MAC, "Random Access %i Msg3 CRC did not pass)\n", i); + ra->msg3_round++; ra->state = Msg3_retransmission; } @@ -783,19 +785,20 @@ long get_K2(NR_ServingCellConfigCommon_t *scc, return 3; } -bool nr_UE_is_to_be_scheduled(module_id_t mod_id, int CC_id, int UE_id, frame_t frame, sub_frame_t slot) +static bool nr_UE_is_to_be_scheduled(const NR_ServingCellConfigCommon_t *scc, + int CC_id, NR_UE_info_t* UE, frame_t frame, sub_frame_t slot, uint32_t ulsch_max_frame_inactivity) { - const NR_ServingCellConfigCommon_t *scc = RC.nrmac[mod_id]->common_channels->ServingCellConfigCommon; const int n = nr_slots_per_frame[*scc->ssbSubcarrierSpacing]; const int now = frame * n + slot; - const struct gNB_MAC_INST_s *nrmac = RC.nrmac[mod_id]; - const NR_UE_sched_ctrl_t *sched_ctrl = &nrmac->UE_info.UE_sched_ctrl[UE_id]; + + const NR_UE_sched_ctrl_t *sched_ctrl =&UE->UE_sched_ctrl; + const NR_TDD_UL_DL_Pattern_t *tdd = scc->tdd_UL_DL_ConfigurationCommon ? &scc->tdd_UL_DL_ConfigurationCommon->pattern1 : NULL; int num_slots_per_period; - int last_ul_slot,last_ul_sched; + int last_ul_slot; int tdd_period_len[8] = {500,625,1000,1250,2000,2500,5000,10000}; if (tdd) { // Force the default transmission in a full slot as early as possible in the UL portion of TDD period (last_ul_slot) num_slots_per_period = n*tdd_period_len[tdd->dl_UL_TransmissionPeriodicity]/10000; @@ -806,14 +809,14 @@ bool nr_UE_is_to_be_scheduled(module_id_t mod_id, int CC_id, int UE_id, frame_t last_ul_slot = sched_ctrl->last_ul_slot; } - last_ul_sched = sched_ctrl->last_ul_frame * n + last_ul_slot; + const int last_ul_sched = sched_ctrl->last_ul_frame * n + last_ul_slot; const int diff = (now - last_ul_sched + 1024 * n) % (1024 * n); /* UE is to be scheduled if * (1) we think the UE has more bytes awaiting than what we scheduled * (2) there is a scheduling request * (3) or we did not schedule it in more than 10 frames */ const bool has_data = sched_ctrl->estimated_ul_buffer > sched_ctrl->sched_ul_bytes; - const bool high_inactivity = diff >= (nrmac->ulsch_max_frame_inactivity>0 ? (nrmac->ulsch_max_frame_inactivity * n) : num_slots_per_period); + const bool high_inactivity = diff >= (ulsch_max_frame_inactivity > 0 ? ulsch_max_frame_inactivity * n : num_slots_per_period); LOG_D(NR_MAC, "%4d.%2d UL inactivity %d slots has_data %d SR %d\n", frame, @@ -824,34 +827,26 @@ bool nr_UE_is_to_be_scheduled(module_id_t mod_id, int CC_id, int UE_id, frame_t return has_data || sched_ctrl->SR || high_inactivity; } -int next_list_entry_looped(NR_list_t *list, int UE_id) -{ - if (UE_id < 0) - return list->head; - return list->next[UE_id] < 0 ? list->head : list->next[UE_id]; -} - -bool allocate_ul_retransmission(module_id_t module_id, - frame_t frame, - sub_frame_t slot, - uint16_t *rballoc_mask, - int *n_rb_sched, - int UE_id, - int harq_pid) +static bool allocate_ul_retransmission(gNB_MAC_INST *nrmac, + frame_t frame, + sub_frame_t slot, + uint16_t *rballoc_mask, + int *n_rb_sched, + NR_UE_info_t* UE, + int harq_pid, + const NR_SIB1_t *sib1, + const NR_ServingCellConfigCommon_t *scc, + const int tda) { const int CC_id = 0; - gNB_MAC_INST *nr_mac = RC.nrmac[module_id]; - const NR_ServingCellConfigCommon_t *scc = nr_mac->common_channels[CC_id].ServingCellConfigCommon; - NR_UE_info_t *UE_info = &nr_mac->UE_info; - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; NR_sched_pusch_t *retInfo = &sched_ctrl->ul_harq_processes[harq_pid].sched_pusch; - NR_CellGroupConfig_t *cg = UE_info->CellGroup[UE_id]; + NR_CellGroupConfig_t *cg = UE->CellGroup; NR_BWP_UplinkDedicated_t *ubwpd = cg && cg->spCellConfig && cg->spCellConfig->spCellConfigDedicated && cg->spCellConfig->spCellConfigDedicated->uplinkConfig ? cg->spCellConfig->spCellConfigDedicated->uplinkConfig->initialUplinkBWP : NULL; - const NR_SIB1_t *sib1 = RC.nrmac[module_id]->common_channels[0].sib1 ? RC.nrmac[module_id]->common_channels[0].sib1->message.choice.c1->choice.systemInformationBlockType1 : NULL; NR_BWP_t *genericParameters = get_ul_bwp_genericParameters(sched_ctrl->active_ubwp, (NR_ServingCellConfigCommon_t *)scc, sib1); @@ -860,7 +855,6 @@ bool allocate_ul_retransmission(module_id_t module_id, const uint16_t bwpSize = NRRIV2BW(genericParameters->locationAndBandwidth, MAX_BWP_SIZE); const uint8_t nrOfLayers = 1; const uint8_t num_dmrs_cdm_grps_no_data = (sched_ctrl->active_bwp || ubwpd) ? 1 : 2; - const int tda = get_ul_tda(nr_mac, scc, retInfo->slot); LOG_D(NR_MAC,"retInfo->time_domain_allocation = %d, tda = %d\n", retInfo->time_domain_allocation, tda); LOG_D(NR_MAC,"num_dmrs_cdm_grps_no_data %d, tbs %d\n",num_dmrs_cdm_grps_no_data, retInfo->tb_size); if (tda == retInfo->time_domain_allocation) { @@ -890,7 +884,7 @@ bool allocate_ul_retransmission(module_id_t module_id, while (rbStart < bwpSize && (rballoc_mask[rbStart] & slbitmap) != slbitmap) rbStart++; if (rbStart + retInfo->rbSize > bwpSize) { - LOG_W(NR_MAC, "cannot allocate retransmission of UE %d/RNTI %04x: no resources (rbStart %d, retInfo->rbSize %d, bwpSize %d\n", UE_id, UE_info->rnti[UE_id], rbStart, retInfo->rbSize, bwpSize); + LOG_W(NR_MAC, "cannot allocate retransmission of RNTI %04x: no resources (rbStart %d, retInfo->rbSize %d, bwpSize %d\n", UE->rnti, rbStart, retInfo->rbSize, bwpSize); return false; } LOG_D(NR_MAC, "%s(): retransmission keeping TDA %d and TBS %d\n", __func__, tda, retInfo->tb_size); @@ -941,7 +935,7 @@ bool allocate_ul_retransmission(module_id_t module_id, /* Find a free CCE */ const int cid = sched_ctrl->coreset->controlResourceSetId; - const uint16_t Y = get_Y(cid%3, slot, UE_info->rnti[UE_id]); + const uint16_t Y = get_Y(cid%3, slot, UE->rnti); uint8_t nr_of_candidates; for (int i=0; i<5; i++) { // for now taking the lowest value among the available aggregation levels @@ -951,7 +945,7 @@ bool allocate_ul_retransmission(module_id_t module_id, 1<<i); if(nr_of_candidates>0) break; } - int CCEIndex = find_pdcch_candidate(RC.nrmac[module_id], + int CCEIndex = find_pdcch_candidate(nrmac, CC_id, sched_ctrl->aggregation_level, nr_of_candidates, @@ -960,12 +954,12 @@ bool allocate_ul_retransmission(module_id_t module_id, Y); if (CCEIndex<0) { - LOG_D(NR_MAC, "%4d.%2d no free CCE for retransmission UL DCI UE %04x\n", frame, slot, UE_info->rnti[UE_id]); + LOG_D(NR_MAC, "%4d.%2d no free CCE for retransmission UL DCI UE %04x\n", frame, slot, UE->rnti); return false; } sched_ctrl->cce_index = CCEIndex; - fill_pdcch_vrb_map(RC.nrmac[module_id], + fill_pdcch_vrb_map(nrmac, CC_id, &sched_ctrl->sched_pdcch, CCEIndex, @@ -981,11 +975,10 @@ bool allocate_ul_retransmission(module_id_t module_id, NR_sched_pusch_t *sched_pusch = &sched_ctrl->sched_pusch; LOG_D(NR_MAC, - "%4d.%2d Allocate UL retransmission UE %d/RNTI %04x sched %4d.%2d (%d RBs)\n", + "%4d.%2d Allocate UL retransmission RNTI %04x sched %4d.%2d (%d RBs)\n", frame, slot, - UE_id, - UE_info->rnti[UE_id], + UE->rnti, sched_pusch->frame, sched_pusch->slot, sched_pusch->rbSize); @@ -1012,12 +1005,20 @@ void update_ul_ue_R_Qm(NR_sched_pusch_t *sched_pusch, const NR_pusch_semi_static } } -float ul_thr_ue[MAX_MOBILES_PER_GNB]; uint32_t ul_pf_tbs[3][29]; // pre-computed, approximate TBS values for PF coefficient +typedef struct UEsched_s { + float coef; + NR_UE_info_t * UE; +} UEsched_t; + +static int comparator(const void *p, const void *q) { + return ((UEsched_t*)p)->coef < ((UEsched_t*)q)->coef; +} + void pf_ul(module_id_t module_id, frame_t frame, sub_frame_t slot, - NR_list_t *UE_list, + NR_UE_info_t *UE_list[], int max_num_ue, int n_rb_sched, uint16_t *rballoc_mask) { @@ -1025,28 +1026,29 @@ void pf_ul(module_id_t module_id, const int CC_id = 0; gNB_MAC_INST *nrmac = RC.nrmac[module_id]; NR_ServingCellConfigCommon_t *scc = nrmac->common_channels[CC_id].ServingCellConfigCommon; - NR_UE_info_t *UE_info = &nrmac->UE_info; const NR_SIB1_t *sib1 = RC.nrmac[module_id]->common_channels[0].sib1 ? RC.nrmac[module_id]->common_channels[0].sib1->message.choice.c1->choice.systemInformationBlockType1 : NULL; - const int min_rb = nrmac->min_grant_prb; - float coeff_ue[MAX_MOBILES_PER_GNB]; + + const int min_rb = 5; // UEs that could be scheduled - int ue_array[MAX_MOBILES_PER_GNB]; - NR_list_t UE_sched = { .head = -1, .next = ue_array, .tail = -1, .len = MAX_MOBILES_PER_GNB }; + UEsched_t UE_sched[MAX_MOBILES_PER_GNB] = {0}; + int remainUEs=max_num_ue; + int curUE=0; /* Loop UE_list to calculate throughput and coeff */ - for (int UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { + UE_iterator(UE_list, UE) { - if (UE_info->Msg4_ACKed[UE_id] != true) continue; + if (UE->Msg4_ACKed != true) + continue; - LOG_D(NR_MAC,"pf_ul: preparing UL scheduling for UE %d\n",UE_id); - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + LOG_D(NR_MAC,"pf_ul: preparing UL scheduling for UE %04x\n",UE->rnti); + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; NR_BWP_t *genericParameters = get_ul_bwp_genericParameters(sched_ctrl->active_ubwp, scc, sib1); int rbStart = 0; // wrt BWP start - NR_CellGroupConfig_t *cg = UE_info->CellGroup[UE_id]; + NR_CellGroupConfig_t *cg = UE->CellGroup; NR_BWP_UplinkDedicated_t *ubwpd = cg && cg->spCellConfig && cg->spCellConfig->spCellConfigDedicated && cg->spCellConfig->spCellConfigDedicated->uplinkConfig ? cg->spCellConfig->spCellConfigDedicated->uplinkConfig->initialUplinkBWP : NULL; @@ -1054,45 +1056,46 @@ void pf_ul(module_id_t module_id, const uint16_t bwpSize = NRRIV2BW(genericParameters->locationAndBandwidth, MAX_BWP_SIZE); NR_sched_pusch_t *sched_pusch = &sched_ctrl->sched_pusch; NR_pusch_semi_static_t *ps = &sched_ctrl->pusch_semi_static; - const NR_mac_dir_stats_t *stats = &UE_info->mac_stats[UE_id].ul; + const NR_mac_dir_stats_t *stats = &UE->mac_stats.ul; /* Calculate throughput */ const float a = 0.0005f; // corresponds to 200ms window const uint32_t b = stats->current_bytes; - ul_thr_ue[UE_id] = (1 - a) * ul_thr_ue[UE_id] + a * b; + UE->ul_thr_ue = (1 - a) * UE->ul_thr_ue + a * b; /* Check if retransmission is necessary */ sched_pusch->ul_harq_pid = sched_ctrl->retrans_ul_harq.head; - LOG_D(NR_MAC,"pf_ul: UE %d harq_pid %d\n",UE_id,sched_pusch->ul_harq_pid); + LOG_D(NR_MAC,"pf_ul: UE %04x harq_pid %d\n",UE->rnti,sched_pusch->ul_harq_pid); if (sched_pusch->ul_harq_pid >= 0) { /* Allocate retransmission*/ - bool r = allocate_ul_retransmission( - module_id, frame, slot, rballoc_mask, &n_rb_sched, UE_id, sched_pusch->ul_harq_pid); + const int tda = get_ul_tda(nrmac, scc, slot); + bool r = allocate_ul_retransmission(nrmac, frame, slot, rballoc_mask, &n_rb_sched, UE, sched_pusch->ul_harq_pid, sib1, scc, tda); if (!r) { - LOG_D(NR_MAC, "%4d.%2d UL retransmission UE RNTI %04x can NOT be allocated\n", frame, slot, UE_info->rnti[UE_id]); + LOG_D(NR_MAC, "%4d.%2d UL retransmission UE RNTI %04x can NOT be allocated\n", frame, slot, UE->rnti); continue; } - else LOG_D(NR_MAC,"%4d.%2d UL Retransmission UE RNTI %04x to be allocated, max_num_ue %d\n",frame,slot,UE_info->rnti[UE_id],max_num_ue); + else LOG_D(NR_MAC,"%4d.%2d UL Retransmission UE RNTI %04x to be allocated, max_num_ue %d\n",frame,slot,UE->rnti,max_num_ue); /* reduce max_num_ue once we are sure UE can be allocated, i.e., has CCE */ - max_num_ue--; - if (max_num_ue < 0) - return; - continue; - } + remainUEs--; + if (remainUEs == 0) + // we have filled all with mandatory retransmissions + // no need to schedule new transmissions + return; + continue; + } const int B = max(0, sched_ctrl->estimated_ul_buffer - sched_ctrl->sched_ul_bytes); /* preprocessor computed sched_frame/sched_slot */ - const bool do_sched = nr_UE_is_to_be_scheduled(module_id, 0, UE_id, sched_pusch->frame, sched_pusch->slot); + const bool do_sched = nr_UE_is_to_be_scheduled(scc, 0, UE, sched_pusch->frame, sched_pusch->slot, nrmac->ulsch_max_frame_inactivity); - LOG_D(NR_MAC,"pf_ul: do_sched UE %d => %s\n",UE_id,do_sched ? "yes" : "no"); - if ((B == 0 && !do_sched) || (sched_ctrl->rrc_processing_timer > 0)) { + LOG_D(NR_MAC,"pf_ul: do_sched UE %04x => %s\n",UE->rnti,do_sched ? "yes" : "no"); + if ((B == 0 && !do_sched) || (sched_ctrl->rrc_processing_timer > 0)) continue; - } - + const NR_bler_options_t *bo = &nrmac->ul_bler; const int max_mcs = bo->max_mcs; /* no per-user maximum MCS yet */ - sched_pusch->mcs = get_mcs_from_bler(bo, stats, &sched_ctrl->ul_bler_stats, max_mcs, frame); + sched_pusch->mcs = get_mcs_from_bler(bo, stats, &UE->UE_sched_ctrl.ul_bler_stats, max_mcs, frame); /* Schedule UE on SR or UL inactivity and no data (otherwise, will be scheduled * based on data to transmit) */ @@ -1100,32 +1103,35 @@ void pf_ul(module_id_t module_id, /* if no data, pre-allocate 5RB */ /* Find a free CCE */ const int cid = sched_ctrl->coreset->controlResourceSetId; - const uint16_t Y = get_Y(cid%3, slot, UE_info->rnti[UE_id]); + const uint16_t Y = get_Y(cid%3, slot, UE->rnti); uint8_t nr_of_candidates; for (int i=0; i<5; i++) { - // for now taking the lowest value among the available aggregation levels - find_aggregation_candidates(&sched_ctrl->aggregation_level, - &nr_of_candidates, - sched_ctrl->search_space, - 1<<i); - if(nr_of_candidates>0) break; + // for now taking the lowest value among the available aggregation levels + find_aggregation_candidates(&sched_ctrl->aggregation_level, + &nr_of_candidates, + sched_ctrl->search_space, + 1<<i); + if(nr_of_candidates>0) break; } int CCEIndex = find_pdcch_candidate(RC.nrmac[module_id], - CC_id, - sched_ctrl->aggregation_level, - nr_of_candidates, - &sched_ctrl->sched_pdcch, - sched_ctrl->coreset, - Y); - + CC_id, + sched_ctrl->aggregation_level, + nr_of_candidates, + &sched_ctrl->sched_pdcch, + sched_ctrl->coreset, + Y); + if (CCEIndex<0) { - LOG_D(NR_MAC, "%4d.%2d no free CCE for UL DCI UE %04x (BSR 0)\n", frame, slot, UE_info->rnti[UE_id]); - continue; + LOG_D(NR_MAC, "%4d.%2d no free CCE for UL DCI UE %04x (BSR 0)\n", frame, slot, UE->rnti); + continue; } /* reduce max_num_ue once we are sure UE can be allocated, i.e., has CCE */ - max_num_ue--; - if (max_num_ue < 0) - return; + remainUEs--; + + if (remainUEs == 0) + // we have filled all with mandatory retransmissions + // no need to schedule new transmissions + return; /* Save PUSCH field */ /* we want to avoid a lengthy deduction of DMRS and other parameters in @@ -1156,8 +1162,8 @@ void pf_ul(module_id_t module_id, while (rbStart < bwpSize && (rballoc_mask[rbStart] & slbitmap) != slbitmap) rbStart++; if (rbStart + min_rb >= bwpSize) { - LOG_W(NR_MAC, "cannot allocate continuous UL data for UE %d/RNTI %04x: no resources (rbStart %d, min_rb %d, bwpSize %d\n", - UE_id, UE_info->rnti[UE_id],rbStart,min_rb,bwpSize); + LOG_W(NR_MAC, "cannot allocate continuous UL data for RNTI %04x: no resources (rbStart %d, min_rb %d, bwpSize %d\n", + UE->rnti,rbStart,min_rb,bwpSize); return; } @@ -1192,40 +1198,27 @@ void pf_ul(module_id_t module_id, } /* Create UE_sched for UEs eligibale for new data transmission*/ - add_tail_nr_list(&UE_sched, UE_id); - /* Calculate coefficient*/ const uint32_t tbs = ul_pf_tbs[ps->mcs_table][sched_pusch->mcs]; - coeff_ue[UE_id] = (float) tbs / ul_thr_ue[UE_id]; - LOG_D(NR_MAC,"b %d, ul_thr_ue[%d] %f, tbs %d, coeff_ue[%d] %f\n", - b, UE_id, ul_thr_ue[UE_id], tbs, UE_id, coeff_ue[UE_id]); + float coeff_ue = (float) tbs / UE->ul_thr_ue; + LOG_D(NR_MAC,"rnti %04x b %d, ul_thr_ue %f, tbs %d, coeff_ue %f\n", + UE->rnti, b, UE->ul_thr_ue, tbs, coeff_ue); + UE_sched[curUE].coef=coeff_ue; + UE_sched[curUE].UE=UE; + curUE++; } - + qsort(UE_sched, sizeof(*UE_sched), sizeofArray(UE_sched), comparator); + UEsched_t *iterator=UE_sched; + const int min_rbSize = 5; /* Loop UE_sched to find max coeff and allocate transmission */ - while (UE_sched.head >= 0 && max_num_ue> 0 && n_rb_sched >= min_rbSize) { - /* Find max coeff */ - int *max = &UE_sched.head; /* Find max coeff: assume head is max */ - int *p = &UE_sched.next[*max]; - while (*p >= 0) { - /* Find max coeff: if the current one has larger coeff, save for later */ - if (coeff_ue[*p] > coeff_ue[*max]) - max = p; - p = &UE_sched.next[*p]; - } - /* Find max coeff: remove the max one: do not use remove_nr_list() since it - * goes through the whole list every time. Note that UE_sched.tail might - * not be set correctly anymore */ - const int UE_id = *max; - p = &UE_sched.next[*max]; - *max = UE_sched.next[*max]; - *p = -1; + while (remainUEs> 0 && n_rb_sched >= min_rbSize && iterator->UE != NULL) { - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + NR_UE_sched_ctrl_t *sched_ctrl = &iterator->UE->UE_sched_ctrl; const int cid = sched_ctrl->coreset->controlResourceSetId; - const uint16_t Y = get_Y(cid%3, slot, UE_info->rnti[UE_id]); + const uint16_t Y = get_Y(cid%3, slot, iterator->UE->rnti); uint8_t nr_of_candidates; for (int i=0; i<5; i++) { // for now taking the lowest value among the available aggregation levels @@ -1233,7 +1226,8 @@ void pf_ul(module_id_t module_id, &nr_of_candidates, sched_ctrl->search_space, 1<<i); - if(nr_of_candidates>0) break; + if(nr_of_candidates>0) + break; } int CCEIndex = find_pdcch_candidate(RC.nrmac[module_id], CC_id, @@ -1243,16 +1237,13 @@ void pf_ul(module_id_t module_id, sched_ctrl->coreset, Y); if (CCEIndex<0) { - LOG_D(NR_MAC, "%4d.%2d no free CCE for UL DCI UE %04x\n", frame, slot, UE_info->rnti[UE_id]); + LOG_D(NR_MAC, "%4d.%2d no free CCE for UL DCI UE %04x\n", frame, slot, iterator->UE->rnti); + iterator++; continue; } - else LOG_D(NR_MAC, "%4d.%2d free CCE for UL DCI UE %04x\n",frame,slot, UE_info->rnti[UE_id]); + else LOG_D(NR_MAC, "%4d.%2d free CCE for UL DCI UE %04x\n",frame,slot, iterator->UE->rnti); - /* reduce max_num_ue once we are sure UE can be allocated, i.e., has CCE */ - max_num_ue--; - AssertFatal(max_num_ue >= 0, "Illegal max_num_ue %d\n", max_num_ue); - - NR_CellGroupConfig_t *cg = UE_info->CellGroup[UE_id]; + NR_CellGroupConfig_t *cg = iterator->UE->CellGroup; NR_BWP_UplinkDedicated_t *ubwpd = cg && cg->spCellConfig && cg->spCellConfig->spCellConfigDedicated && cg->spCellConfig->spCellConfigDedicated->uplinkConfig ? cg->spCellConfig->spCellConfigDedicated->uplinkConfig->initialUplinkBWP : NULL; @@ -1299,11 +1290,12 @@ void pf_ul(module_id_t module_id, max_rbSize++; if (rbStart + min_rb >= bwpSize) { - LOG_W(NR_MAC, "cannot allocate UL data for UE %d/RNTI %04x: no resources (rbStart %d, min_rb %d, bwpSize %d\n", - UE_id, UE_info->rnti[UE_id],rbStart,min_rb,bwpSize); + LOG_W(NR_MAC, "cannot allocate UL data for RNTI %04x: no resources (rbStart %d, min_rb %d, bwpSize %d)\n", + iterator->UE->rnti,rbStart,min_rb,bwpSize); return; } - else LOG_D(NR_MAC,"allocating UL data for UE %d/RNTI %04x (rbStsart %d, min_rb %d, bwpSize %d\n",UE_id, UE_info->rnti[UE_id],rbStart,min_rb,bwpSize); + else + LOG_D(NR_MAC,"allocating UL data for RNTI %04x (rbStsart %d, min_rb %d, bwpSize %d)\n", iterator->UE->rnti,rbStart,min_rb,bwpSize); /* Calculate the current scheduling bytes and the necessary RBs */ const int B = cmax(sched_ctrl->estimated_ul_buffer - sched_ctrl->sched_ul_bytes, 0); @@ -1336,7 +1328,11 @@ void pf_ul(module_id_t module_id, n_rb_sched -= sched_pusch->rbSize; for (int rb = 0; rb < sched_ctrl->sched_pusch.rbSize; rb++) + rballoc_mask[rb + sched_ctrl->sched_pusch.rbStart] ^= slbitmap; + /* reduce max_num_ue once we are sure UE can be allocated, i.e., has CCE */ + remainUEs--; + iterator++; } } @@ -1353,9 +1349,8 @@ bool nr_fr1_ulsch_preprocessor(module_id_t module_id, frame_t frame, sub_frame_t const int mu = scc ? scc->uplinkConfigCommon->initialUplinkBWP->genericParameters.subcarrierSpacing : scc_sib1->uplinkConfigCommon->initialUplinkBWP.genericParameters.subcarrierSpacing; - NR_UE_info_t *UE_info = &nr_mac->UE_info; - - if (UE_info->num_UEs == 0) + if (nr_mac->UE_info.list[0] == NULL) + // no UEs return false; const int CC_id = 0; @@ -1364,8 +1359,7 @@ bool nr_fr1_ulsch_preprocessor(module_id_t module_id, frame_t frame, sub_frame_t * have the same K2 (we don't support multiple/different K2s via different * TDAs yet). If the TDA is negative, it means that there is no UL slot to * schedule now (slot + k2 is not UL slot) */ - int UE_id = UE_info->list.head; - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + NR_UE_sched_ctrl_t *sched_ctrl = &nr_mac->UE_info.list[0]->UE_sched_ctrl; const int temp_tda = get_ul_tda(nr_mac, scc, slot); int K2 = get_K2(scc, scc_sib1, sched_ctrl->active_ubwp, temp_tda, mu); const int sched_frame = (frame + (slot + K2 >= nr_slots_per_frame[mu])) & 1023; @@ -1391,9 +1385,8 @@ bool nr_fr1_ulsch_preprocessor(module_id_t module_id, frame_t frame, sub_frame_t return false; // Avoid slots with the SRS - const NR_list_t *UE_list = &UE_info->list; - for (int UE_idx = UE_list->head; UE_idx >= 0; UE_idx = UE_list->next[UE_idx]) { - NR_sched_srs_t sched_srs = UE_info->UE_sched_ctrl[UE_idx].sched_srs; + UE_iterator(nr_mac->UE_info.list, UE) { + NR_sched_srs_t sched_srs = UE->UE_sched_ctrl.sched_srs; if(sched_srs.srs_scheduled && sched_srs.frame==sched_frame && sched_srs.slot==sched_slot) { return false; } @@ -1401,10 +1394,11 @@ bool nr_fr1_ulsch_preprocessor(module_id_t module_id, frame_t frame, sub_frame_t sched_ctrl->sched_pusch.slot = sched_slot; sched_ctrl->sched_pusch.frame = sched_frame; - for (UE_id = UE_info->list.next[UE_id]; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) { - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + UE_iterator(nr_mac->UE_info.list, UE2) { + NR_UE_sched_ctrl_t *sched_ctrl = &UE2->UE_sched_ctrl; AssertFatal(K2 == get_K2(scc,scc_sib1,sched_ctrl->active_ubwp, tda, mu), - "Different K2, %d(UE%d) != %ld(UE%d)\n", K2, 0, get_K2(scc,scc_sib1,sched_ctrl->active_ubwp, tda, mu), UE_id); + "Different K2, %d(UE%d) != %ld(UE%04x)\n", + K2, 0, get_K2(scc,scc_sib1,sched_ctrl->active_ubwp, tda, mu), UE2->rnti); sched_ctrl->sched_pusch.slot = sched_slot; sched_ctrl->sched_pusch.frame = sched_frame; } @@ -1464,7 +1458,7 @@ bool nr_fr1_ulsch_preprocessor(module_id_t module_id, frame_t frame, sub_frame_t pf_ul(module_id, frame, slot, - &UE_info->list, + nr_mac->UE_info.list, 2, len, rballoc_mask); @@ -1525,30 +1519,28 @@ void nr_schedule_ulsch(module_id_t module_id, frame_t frame, sub_frame_t slot) NR_ServingCellConfigCommon_t *scc = RC.nrmac[module_id]->common_channels[0].ServingCellConfigCommon; - NR_UE_info_t *UE_info = &RC.nrmac[module_id]->UE_info; + NR_UEs_t *UE_info = &RC.nrmac[module_id]->UE_info; const NR_SIB1_t *sib1 = RC.nrmac[module_id]->common_channels[0].sib1 ? RC.nrmac[module_id]->common_channels[0].sib1->message.choice.c1->choice.systemInformationBlockType1 : NULL; - const NR_list_t *UE_list = &UE_info->list; - for (int UE_id = UE_list->head; UE_id >= 0; UE_id = UE_list->next[UE_id]) { - NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; + UE_iterator( UE_info->list, UE) { + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; if (sched_ctrl->ul_failure == 1 && get_softmodem_params()->phy_test==0) continue; - NR_CellGroupConfig_t *cg = UE_info->CellGroup[UE_id]; + NR_CellGroupConfig_t *cg = UE->CellGroup; NR_BWP_UplinkDedicated_t *ubwpd = cg && cg->spCellConfig && cg->spCellConfig->spCellConfigDedicated && cg->spCellConfig->spCellConfigDedicated->uplinkConfig ? cg->spCellConfig->spCellConfigDedicated->uplinkConfig->initialUplinkBWP : NULL; - NR_mac_stats_t *mac_stats = &UE_info->mac_stats[UE_id]; - mac_stats->ul.current_bytes = 0; + UE->mac_stats.ul.current_bytes = 0; /* dynamic PUSCH values (RB alloc, MCS, hence R, Qm, TBS) that change in * every TTI are pre-populated by the preprocessor and used below */ NR_sched_pusch_t *sched_pusch = &sched_ctrl->sched_pusch; - LOG_D(NR_MAC,"UE %x : sched_pusch->rbSize %d\n",UE_info->rnti[UE_id],sched_pusch->rbSize); + LOG_D(NR_MAC,"UE %04x : sched_pusch->rbSize %d\n",UE->rnti,sched_pusch->rbSize); if (sched_pusch->rbSize <= 0) continue; - uint16_t rnti = UE_info->rnti[UE_id]; + uint16_t rnti = UE->rnti; sched_ctrl->SR = false; int8_t harq_id = sched_pusch->ul_harq_pid; @@ -1556,8 +1548,8 @@ void nr_schedule_ulsch(module_id_t module_id, frame_t frame, sub_frame_t slot) /* PP has not selected a specific HARQ Process, get a new one */ harq_id = sched_ctrl->available_ul_harq.head; AssertFatal(harq_id >= 0, - "no free HARQ process available for UE %d\n", - UE_id); + "no free HARQ process available for UE %04x\n", + UE->rnti); remove_front_nr_list(&sched_ctrl->available_ul_harq); sched_pusch->ul_harq_pid = harq_id; } else { @@ -1583,10 +1575,10 @@ void nr_schedule_ulsch(module_id_t module_id, frame_t frame, sub_frame_t slot) NR_pusch_semi_static_t *ps = &sched_ctrl->pusch_semi_static; /* Statistics */ - AssertFatal(cur_harq->round < 8, "Indexing UL rounds[%d] is out of bounds\n", cur_harq->round); - mac_stats->ul.rounds[cur_harq->round]++; + AssertFatal(cur_harq->round < 8, "Indexing ulsch_rounds[%d] is out of bounds\n", cur_harq->round); + UE->mac_stats.ul.rounds[cur_harq->round]++; if (cur_harq->round == 0) { - mac_stats->ulsch_total_bytes_scheduled += sched_pusch->tb_size; + UE->mac_stats.ulsch_total_bytes_scheduled += sched_pusch->tb_size; /* Save information on MCS, TBS etc for the current initial transmission * so we have access to it when retransmitting */ cur_harq->sched_pusch = *sched_pusch; @@ -1606,7 +1598,7 @@ void nr_schedule_ulsch(module_id_t module_id, frame_t frame, sub_frame_t slot) cur_harq->round, cur_harq->ndi); } - mac_stats->ul.current_bytes = sched_pusch->tb_size; + UE->mac_stats.ul.current_bytes = sched_pusch->tb_size; sched_ctrl->last_ul_frame = sched_pusch->frame; sched_ctrl->last_ul_slot = sched_pusch->slot; @@ -1789,7 +1781,7 @@ void nr_schedule_ulsch(module_id_t module_id, frame_t frame, sub_frame_t slot) pdcch_pdu_coreset[coresetid] = pdcch_pdu; } - LOG_D(NR_MAC,"Configuring ULDCI/PDCCH in %d.%d at CCE %d, rnti %x\n", frame,slot,sched_ctrl->cce_index,rnti); + LOG_D(NR_MAC,"Configuring ULDCI/PDCCH in %d.%d at CCE %d, rnti %04x\n", frame,slot,sched_ctrl->cce_index,rnti); /* Fill PDCCH DL DCI PDU */ nfapi_nr_dl_dci_pdu_t *dci_pdu = &pdcch_pdu->dci_pdu[pdcch_pdu->numDlDci]; @@ -1827,7 +1819,7 @@ void nr_schedule_ulsch(module_id_t module_id, frame_t frame, sub_frame_t slot) &uldci_payload, ps->dci_format, ps->time_domain_allocation, - UE_info->UE_sched_ctrl[UE_id].tpc0, + UE->UE_sched_ctrl.tpc0, n_ubwp, bwp_id); fill_dci_pdu_rel15(scc, diff --git a/openair2/LAYER2/NR_MAC_gNB/mac_proto.h b/openair2/LAYER2/NR_MAC_gNB/mac_proto.h index f568cf3f650..f3dd5208d62 100644 --- a/openair2/LAYER2/NR_MAC_gNB/mac_proto.h +++ b/openair2/LAYER2/NR_MAC_gNB/mac_proto.h @@ -203,7 +203,7 @@ void config_uldci(const NR_SIB1_t *sib1, int n_ubwp, int bwp_id); -void nr_schedule_pucch(int Mod_idP, +void nr_schedule_pucch(gNB_MAC_INST* nrmac, frame_t frameP, sub_frame_t slotP); @@ -218,15 +218,14 @@ void nr_csi_meas_reporting(int Mod_idP, frame_t frameP, sub_frame_t slotP); -int nr_acknack_scheduling(int Mod_idP, - int UE_id, +int nr_acknack_scheduling( int Mod_idP, + NR_UE_info_t * UE, frame_t frameP, sub_frame_t slotP, int r_pucch, int do_common); -void get_pdsch_to_harq_feedback(int Mod_idP, - int UE_id, +void get_pdsch_to_harq_feedback(NR_UE_info_t *, int bwp_id, NR_SearchSpace__searchSpaceType_PR ss_type, int *max_fb_time, @@ -336,7 +335,7 @@ NR_PDSCH_TimeDomainResourceAllocationList_t *get_pdsch_TimeDomainAllocationList( const NR_SIB1_t *sib1); /* find coreset within the search space */ -NR_ControlResourceSet_t *get_coreset(module_id_t module_idP, +NR_ControlResourceSet_t *get_coreset(gNB_MAC_INST *nrmac, NR_ServingCellConfigCommon_t *scc, void *bwp, NR_SearchSpace_t *ss, @@ -394,7 +393,6 @@ int NRRIV2BW(int locationAndBandwidth,int N_RB); int NRRIV2PRBOFFSET(int locationAndBandwidth,int N_RB); /* Functions to manage an NR_list_t */ -void dump_nr_list(NR_list_t *listP); void create_nr_list(NR_list_t *listP, int len); void resize_nr_list(NR_list_t *list, int new_len); void destroy_nr_list(NR_list_t *list); @@ -404,13 +402,13 @@ void add_tail_nr_list(NR_list_t *listP, int id); void add_front_nr_list(NR_list_t *listP, int id); void remove_front_nr_list(NR_list_t *listP); -int find_nr_UE_id(module_id_t mod_idP, rnti_t rntiP); +NR_UE_info_t * find_nr_UE(NR_UEs_t* UEs, rnti_t rntiP); int find_nr_RA_id(module_id_t mod_idP, int CC_idP, rnti_t rntiP); -int add_new_nr_ue(module_id_t mod_idP, rnti_t rntiP, NR_CellGroupConfig_t *CellGroup); +NR_UE_info_t*add_new_nr_ue(gNB_MAC_INST *nr_mac, rnti_t rntiP, NR_CellGroupConfig_t *CellGroup); -void mac_remove_nr_ue(module_id_t mod_id, rnti_t rnti); +void mac_remove_nr_ue(gNB_MAC_INST *nr_mac, rnti_t rnti); void nr_mac_remove_ra_rnti(module_id_t mod_id, rnti_t rnti); @@ -424,7 +422,7 @@ int allocate_nr_CCEs(gNB_MAC_INST *nr_mac, int nr_get_default_pucch_res(int pucch_ResourceCommon); -void compute_csi_bitlen(NR_CSI_MeasConfig_t *csi_MeasConfig, NR_UE_info_t *UE_info, int UE_id, module_id_t Mod_idP); +void compute_csi_bitlen(NR_CSI_MeasConfig_t *csi_MeasConfig, NR_UE_info_t *UE); int get_dlscs(nfapi_nr_config_request_t *cfg); @@ -515,7 +513,7 @@ int get_dci_format(NR_UE_sched_ctrl_t *sched_ctrl); const int get_dl_tda(const gNB_MAC_INST *nrmac, const NR_ServingCellConfigCommon_t *scc, int slot); const int get_ul_tda(const gNB_MAC_INST *nrmac, const NR_ServingCellConfigCommon_t *scc, int slot); -bool find_free_CCE(module_id_t module_id, sub_frame_t slot, int UE_id); +bool find_free_CCE(sub_frame_t slot, NR_UE_info_t *UE); bool nr_find_nb_rb(uint16_t Qm, uint16_t R, @@ -534,7 +532,7 @@ int get_mcs_from_bler(const NR_bler_options_t *bler_options, int max_mcs, frame_t frame); -void nr_sr_reporting(int Mod_idP, frame_t frameP, sub_frame_t slotP); +void nr_sr_reporting(gNB_MAC_INST *nrmac, frame_t frameP, sub_frame_t slotP); void dump_mac_stats(gNB_MAC_INST *gNB, char *output, int strlen, bool reset_rsrp); diff --git a/openair2/LAYER2/NR_MAC_gNB/main.c b/openair2/LAYER2/NR_MAC_gNB/main.c index 4c14233a32a..e318b4be8e4 100644 --- a/openair2/LAYER2/NR_MAC_gNB/main.c +++ b/openair2/LAYER2/NR_MAC_gNB/main.c @@ -57,7 +57,7 @@ void *nrmac_stats_thread(void *arg) { AssertFatal(fd!=NULL,"Cannot open nrMAC_stats.log, error %s\n",strerror(errno)); while (oai_exit == 0) { - dump_mac_stats(gNB,output,MACSTATSSTRLEN,false); + dump_mac_stats(gNB,output,MACSTATSSTRLEN,false); fprintf(fd,"%s\n",output); fflush(fd); usleep(200000); @@ -68,40 +68,40 @@ void *nrmac_stats_thread(void *arg) { } void clear_mac_stats(gNB_MAC_INST *gNB) { - memset((void*)gNB->UE_info.mac_stats,0,MAX_MOBILES_PER_GNB*sizeof(NR_mac_stats_t)); + UE_iterator(gNB->UE_info.list, UE) { + memset(&UE->mac_stats,0,sizeof(UE->mac_stats)); + } } void dump_mac_stats(gNB_MAC_INST *gNB, char *output, int strlen, bool reset_rsrp) { - NR_UE_info_t *UE_info = &gNB->UE_info; int num = 1; int stroff=0; - if (UE_info->num_UEs == 0) return; + pthread_mutex_lock(&gNB->UE_info.mutex); + UE_iterator(gNB->UE_info.list, UE) { + NR_UE_sched_ctrl_t *sched_ctrl = &UE->UE_sched_ctrl; + NR_mac_stats_t *stats = &UE->mac_stats; + const int avg_rsrp = stats->num_rsrp_meas > 0 ? stats->cumul_rsrp / stats->num_rsrp_meas : 0; - for (int UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) { - const NR_UE_sched_ctrl_t *sched_ctrl = &UE_info->UE_sched_ctrl[UE_id]; - NR_mac_stats_t *stats = &UE_info->mac_stats[UE_id]; - const int avg_rsrp = stats->num_rsrp_meas > 0 ? stats->cumul_rsrp / stats->num_rsrp_meas : 0; - stroff+=sprintf(output+stroff,"UE ID %d RNTI %04x (%d/%d) PH %d dB PCMAX %d dBm, average RSRP %d (%d meas)\n", - UE_id, - UE_info->rnti[UE_id], - num++, - UE_info->num_UEs, - sched_ctrl->ph, - sched_ctrl->pcmax, - avg_rsrp, - stats->num_rsrp_meas); - stroff+=sprintf(output+stroff,"UE %d: CQI %d, RI %d, PMI (%d,%d)\n", - UE_id, - UE_info->UE_sched_ctrl[UE_id].CSI_report.cri_ri_li_pmi_cqi_report.wb_cqi_1tb, - UE_info->UE_sched_ctrl[UE_id].CSI_report.cri_ri_li_pmi_cqi_report.ri+1, - UE_info->UE_sched_ctrl[UE_id].CSI_report.cri_ri_li_pmi_cqi_report.pmi_x1, - UE_info->UE_sched_ctrl[UE_id].CSI_report.cri_ri_li_pmi_cqi_report.pmi_x2); - - stroff+=sprintf(output+stroff,"UE %d: dlsch_rounds %"PRIu64"/%"PRIu64"/%"PRIu64"/%"PRIu64", dlsch_errors %"PRIu64", pucch0_DTX %d, BLER %.5f MCS %d\n", - UE_id, + stroff+=sprintf(output+stroff,"UE RNTI %04x (%d) PH %d dB PCMAX %d dBm, average RSRP %d (%d meas)\n", + UE->rnti, + num++, + sched_ctrl->ph, + sched_ctrl->pcmax, + avg_rsrp, + stats->num_rsrp_meas); + stroff+=sprintf(output+stroff,"UE %04x: CQI %d, RI %d, PMI (%d,%d)\n", + UE->rnti, + UE->UE_sched_ctrl.CSI_report.cri_ri_li_pmi_cqi_report.wb_cqi_1tb, + UE->UE_sched_ctrl.CSI_report.cri_ri_li_pmi_cqi_report.ri+1, + UE->UE_sched_ctrl.CSI_report.cri_ri_li_pmi_cqi_report.pmi_x1, + UE->UE_sched_ctrl.CSI_report.cri_ri_li_pmi_cqi_report.pmi_x2); + + stroff+=sprintf(output+stroff,"UE %04x: dlsch_rounds %"PRIu64"/%"PRIu64"/%"PRIu64"/%"PRIu64", dlsch_errors %"PRIu64", pucch0_DTX %d, BLER %.5f MCS %d\n", + UE->rnti, + stats->dl.rounds[0], stats->dl.rounds[1], stats->dl.rounds[2], stats->dl.rounds[3], stats->dl.errors, @@ -112,9 +112,9 @@ void dump_mac_stats(gNB_MAC_INST *gNB, char *output, int strlen, bool reset_rsrp stats->num_rsrp_meas = 0; stats->cumul_rsrp = 0; } - stroff+=sprintf(output+stroff,"UE %d: dlsch_total_bytes %"PRIu64"\n", UE_id, stats->dl.total_bytes); - stroff+=sprintf(output+stroff,"UE %d: ulsch_rounds %"PRIu64"/%"PRIu64"/%"PRIu64"/%"PRIu64", ulsch_DTX %d, ulsch_errors %"PRIu64", BLER %.5f MCS %d\n", - UE_id, + stroff+=sprintf(output+stroff,"UE %04x: dlsch_total_bytes %"PRIu64"\n", UE->rnti, stats->dl.total_bytes); + stroff+=sprintf(output+stroff,"UE %04x: ulsch_rounds %"PRIu64"/%"PRIu64"/%"PRIu64"/%"PRIu64", ulsch_DTX %d, ulsch_errors %"PRIu64"\n", + UE->rnti, stats->ul.rounds[0], stats->ul.rounds[1], stats->ul.rounds[2], stats->ul.rounds[3], stats->ulsch_DTX, @@ -122,20 +122,22 @@ void dump_mac_stats(gNB_MAC_INST *gNB, char *output, int strlen, bool reset_rsrp sched_ctrl->ul_bler_stats.bler, sched_ctrl->ul_bler_stats.mcs); stroff+=sprintf(output+stroff, - "UE %d: ulsch_total_bytes_scheduled %"PRIu64", ulsch_total_bytes_received %"PRIu64"\n", - UE_id, + "UE %04x: ulsch_total_bytes_scheduled %"PRIu64", ulsch_total_bytes_received %"PRIu64"\n", + UE->rnti, stats->ulsch_total_bytes_scheduled, stats->ul.total_bytes); for (int lc_id = 0; lc_id < 63; lc_id++) { if (stats->dl.lc_bytes[lc_id] > 0) { - stroff+=sprintf(output+stroff, "UE %d: LCID %d: %"PRIu64" bytes TX\n", UE_id, lc_id, stats->dl.lc_bytes[lc_id]); - LOG_D(NR_MAC, "UE %d: LCID %d: %"PRIu64" bytes TX\n", UE_id, lc_id, stats->dl.lc_bytes[lc_id]); + stroff+=sprintf(output+stroff, "UE %04x: LCID %d: %"PRIu64" bytes TX\n", UE->rnti, lc_id, stats->dl.lc_bytes[lc_id]); + LOG_D(NR_MAC, "UE %04x: LCID %d: %"PRIu64" bytes TX\n", UE->rnti, lc_id, stats->dl.lc_bytes[lc_id]); } if (stats->ul.lc_bytes[lc_id] > 0) { - stroff+=sprintf(output+stroff, "UE %d: LCID %d: %"PRIu64" bytes RX\n", UE_id, lc_id, stats->ul.lc_bytes[lc_id]); - LOG_D(NR_MAC, "UE %d: LCID %d: %"PRIu64" bytes RX\n", UE_id, lc_id, stats->ul.lc_bytes[lc_id]); + stroff+=sprintf(output+stroff, "UE %04x: LCID %d: %"PRIu64" bytes RX\n", UE->rnti, lc_id, stats->ul.lc_bytes[lc_id]); + LOG_D(NR_MAC, "UE %04x: LCID %d: %"PRIu64" bytes RX\n", UE->rnti, lc_id, stats->ul.lc_bytes[lc_id]); + } } } + pthread_mutex_unlock(&gNB->UE_info.mutex); print_meas(&gNB->eNB_scheduler, "DL & UL scheduling timing stats", NULL, NULL); print_meas(&gNB->schedule_dlsch,"dlsch scheduler",NULL,NULL); print_meas(&gNB->rlc_data_req, "rlc_data_req",NULL,NULL); @@ -146,8 +148,6 @@ void dump_mac_stats(gNB_MAC_INST *gNB, char *output, int strlen, bool reset_rsrp void mac_top_init_gNB(void) { module_id_t i; - int list_el; - NR_UE_info_t *UE_info; gNB_MAC_INST *nrmac; LOG_I(MAC, "[MAIN] Init function start:nb_nr_macrlc_inst=%d\n",RC.nb_nr_macrlc_inst); @@ -181,6 +181,8 @@ void mac_top_init_gNB(void) RC.nrmac[i]->first_MIB = true; + pthread_mutex_init(&RC.nrmac[i]->UE_info.mutex, NULL); + if (get_softmodem_params()->phy_test) { RC.nrmac[i]->pre_processor_dl = nr_preprocessor_phytest; RC.nrmac[i]->pre_processor_ul = nr_ul_preprocessor_phytest; @@ -212,13 +214,7 @@ void mac_top_init_gNB(void) nrmac = RC.nrmac[i]; nrmac->if_inst = NR_IF_Module_init(i); - - UE_info = &nrmac->UE_info; - UE_info->num_UEs = 0; - create_nr_list(&UE_info->list, MAX_MOBILES_PER_GNB); - for (list_el = 0; list_el < MAX_MOBILES_PER_GNB; list_el++) { - UE_info->active[list_el] = false; - } + memset(&nrmac->UE_info, 0, sizeof(nrmac->UE_info)); } srand48(0); diff --git a/openair2/LAYER2/NR_MAC_gNB/nr_mac_gNB.h b/openair2/LAYER2/NR_MAC_gNB/nr_mac_gNB.h index 4918e4ee087..1aca7cae577 100644 --- a/openair2/LAYER2/NR_MAC_gNB/nr_mac_gNB.h +++ b/openair2/LAYER2/NR_MAC_gNB/nr_mac_gNB.h @@ -91,6 +91,7 @@ typedef struct { int len; } NR_list_t; + typedef enum { RA_IDLE = 0, Msg2 = 1, @@ -703,24 +704,34 @@ typedef struct NR_bler_options { /*! \brief UE list used by gNB to order UEs/CC for scheduling*/ #define MAX_CSI_REPORTCONFIG 48 typedef struct { + rnti_t rnti; /// scheduling control info - nr_csi_report_t csi_report_template[MAX_MOBILES_PER_GNB][MAX_CSI_REPORTCONFIG]; - NR_UE_sched_ctrl_t UE_sched_ctrl[MAX_MOBILES_PER_GNB]; - NR_mac_stats_t mac_stats[MAX_MOBILES_PER_GNB]; - NR_list_t list; - int num_UEs; - bool active[MAX_MOBILES_PER_GNB]; - rnti_t rnti[MAX_MOBILES_PER_GNB]; - NR_CellGroupConfig_t *CellGroup[MAX_MOBILES_PER_GNB]; + nr_csi_report_t csi_report_template[MAX_CSI_REPORTCONFIG]; + NR_UE_sched_ctrl_t UE_sched_ctrl; + NR_mac_stats_t mac_stats; + NR_CellGroupConfig_t *CellGroup; /// CCE indexing - int m[MAX_MOBILES_PER_GNB]; + int m; // UE selected beam index - uint8_t UE_beam_index[MAX_MOBILES_PER_GNB]; - bool Msg4_ACKed[MAX_MOBILES_PER_GNB]; + uint8_t UE_beam_index; + bool Msg4_ACKed; /// Sched CSI-RS: scheduling decisions bool sched_csirs; + NR_gNB_UCI_STATS_t uci_statS; + float ul_thr_ue; + float dl_thr_ue; + int layers; } NR_UE_info_t; +typedef struct { + /// scheduling control info + // last element always NULL + pthread_mutex_t mutex; + NR_UE_info_t *list[MAX_MOBILES_PER_GNB+1]; +} NR_UEs_t; + +#define UE_iterator(BaSe, VaR) NR_UE_info_t ** VaR##pptr=BaSe, *VaR; while ((VaR=*(VaR##pptr++))) + typedef void (*nr_pp_impl_dl)(module_id_t mod_id, frame_t frame, sub_frame_t slot); @@ -779,7 +790,7 @@ typedef struct gNB_MAC_INST_s { /// NFAPI DL PDU structure nfapi_nr_tx_data_request_t TX_req[NFAPI_CC_MAX]; int pdcch_cand[MAX_NUM_CORESET]; - NR_UE_info_t UE_info; + NR_UEs_t UE_info; /// UL handle uint32_t ul_handle; diff --git a/openair2/NR_PHY_INTERFACE/NR_IF_Module.c b/openair2/NR_PHY_INTERFACE/NR_IF_Module.c index c860d847d95..8d4de8823bc 100644 --- a/openair2/NR_PHY_INTERFACE/NR_IF_Module.c +++ b/openair2/NR_PHY_INTERFACE/NR_IF_Module.c @@ -372,9 +372,6 @@ static void match_crc_rx_pdu(nfapi_nr_rx_data_indication_t *rx_ind, nfapi_nr_crc void NR_UL_indication(NR_UL_IND_t *UL_info) { AssertFatal(UL_info!=NULL,"UL_info is null\n"); -#ifdef DUMP_FAPI - dump_ul(UL_info); -#endif module_id_t module_id = UL_info->module_id; int CC_id = UL_info->CC_id; NR_Sched_Rsp_t *sched_info = &NR_Sched_INFO[module_id][CC_id]; diff --git a/openair2/RRC/NR/rrc_gNB.c b/openair2/RRC/NR/rrc_gNB.c index 3d41787a2d7..f5879a0c49a 100755 --- a/openair2/RRC/NR/rrc_gNB.c +++ b/openair2/RRC/NR/rrc_gNB.c @@ -1643,7 +1643,7 @@ rrc_gNB_generate_RRCReestablishment( ue_context->Srb0.Tx_buffer.payload_size); #if(0) /* TODO : It may be needed if gNB goes into full stack working. */ - UE_id = find_nr_UE_id(module_id, rnti); + UE = find_nr_UE(module_id, rnti); if (UE_id != -1) { /* Activate reject timer, if RRCComplete not received after 10 frames, reject UE */ RC.nrmac[module_id]->UE_info.UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer = 1; @@ -3549,6 +3549,7 @@ void nr_rrc_subframe_process(protocol_ctxt_t *const ctxt_pP, const int CC_id) { FILE *fd=NULL;//fopen("nrRRCstats.log","w"); RB_FOREACH(ue_context_p, rrc_nr_ue_tree_s, &(RC.nrrrc[ctxt_pP->module_id]->rrc_ue_head)) { ctxt_pP->rnti = ue_context_p->ue_id_rnti; + gNB_MAC_INST *nrmac=RC.nrmac[ctxt_pP->module_id]; //WHAT A BEAUTIFULL RACE CONDITION !!! if (fd) { if (ue_context_p->ue_context.Initialue_identity_5g_s_TMSI.presence == TRUE) { @@ -3588,7 +3589,7 @@ void nr_rrc_subframe_process(protocol_ctxt_t *const ctxt_pP, const int CC_id) { // Remove here the MAC and RRC context when RRC is not connected or gNB is not connected to CN5G if(ue_context_p->ue_context.StatusRrc < NR_RRC_CONNECTED || ue_context_p->ue_context.gNB_ue_ngap_id == 0) { - mac_remove_nr_ue(ctxt_pP->module_id, ctxt_pP->rnti); + mac_remove_nr_ue(nrmac, ctxt_pP->rnti); rrc_rlc_remove_ue(ctxt_pP); pdcp_remove_UE(ctxt_pP); @@ -3612,7 +3613,7 @@ void nr_rrc_subframe_process(protocol_ctxt_t *const ctxt_pP, const int CC_id) { ue_context_p->ue_context.rnti); ue_context_p->ue_context.ue_release_timer_rrc = 0; - mac_remove_nr_ue(ctxt_pP->module_id, ctxt_pP->rnti); + mac_remove_nr_ue(nrmac, ctxt_pP->rnti); rrc_rlc_remove_ue(ctxt_pP); pdcp_remove_UE(ctxt_pP); newGtpuDeleteAllTunnels(ctxt_pP->instance, ctxt_pP->rnti); diff --git a/openair2/RRC/NR/rrc_gNB_nsa.c b/openair2/RRC/NR/rrc_gNB_nsa.c index 8040fcb13c3..07a6bd85be1 100644 --- a/openair2/RRC/NR/rrc_gNB_nsa.c +++ b/openair2/RRC/NR/rrc_gNB_nsa.c @@ -426,7 +426,8 @@ void rrc_remove_nsa_user(gNB_RRC_INST *rrc, int rnti) { rrc_rlc_remove_ue(&ctxt); - mac_remove_nr_ue(rrc->module_id, rnti); + // WHAT A RACE CONDITION + mac_remove_nr_ue(RC.nrmac[rrc->module_id], rnti); gtpv1u_enb_delete_tunnel_req_t tmp={0}; tmp.rnti=rnti; tmp.from_gnb=1; diff --git a/openair2/UTIL/OPT/opt.h b/openair2/UTIL/OPT/opt.h index ae6a3eb2a65..d08790a0584 100644 --- a/openair2/UTIL/OPT/opt.h +++ b/openair2/UTIL/OPT/opt.h @@ -109,11 +109,14 @@ typedef enum radio_type_e { extern int opt_enabled; #define trace_pdu(x...) if (opt_enabled) trace_pdu_implementation(0, x) -#define trace_NRpdu(x...) if (opt_enabled) trace_pdu_implementation(1, x) +#define trace_NRpdu(x...) if (opt_enabled) nr_trace_pdu_implementation(1, x) void trace_pdu_implementation(int nr, int direction, uint8_t *pdu_buffer, unsigned int pdu_buffer_size, int ueid, int rntiType, int rnti, uint16_t sysFrame, uint8_t subframe, int oob_event, int oob_event_value); +void nr_trace_pdu_implementation(int nr, int direction, uint8_t *pdu_buffer, unsigned int pdu_buffer_size, + int rntiType, int rnti, uint16_t sysFrame, uint8_t subframe, + int oob_event, int oob_event_value); int init_opt(void); diff --git a/openair2/UTIL/OPT/probe.c b/openair2/UTIL/OPT/probe.c index 8731eb294a5..9359c48e95a 100644 --- a/openair2/UTIL/OPT/probe.c +++ b/openair2/UTIL/OPT/probe.c @@ -476,6 +476,14 @@ static void SendFrameNR(guint8 radioType, guint8 direction, guint8 rntiType, extern RAN_CONTEXT_t RC; #include <openair1/PHY/phy_extern_ue.h> /* Remote serveraddress (where Wireshark is running) */ +void nr_trace_pdu_implementation(int nr, int direction, uint8_t *pdu_buffer, unsigned int pdu_buffer_size, + int rntiType, int rnti, uint16_t sysFrameNumber, uint8_t subFrameNumber, int oob_event, + int oob_event_value) { + trace_pdu_implementation(nr, direction, pdu_buffer, pdu_buffer_size, + rnti, rntiType, rnti, sysFrameNumber, subFrameNumber, oob_event, + oob_event_value); +} + void trace_pdu_implementation(int nr, int direction, uint8_t *pdu_buffer, unsigned int pdu_buffer_size, int ueid, int rntiType, int rnti, uint16_t sysFrameNumber, uint8_t subFrameNumber, int oob_event, int oob_event_value) { -- GitLab