From 9b4725ad0bf07428504080f6704678c1a577cdcc Mon Sep 17 00:00:00 2001 From: Robert Schmidt <robert.schmidt@eurecom.fr> Date: Wed, 5 Feb 2020 15:49:33 +0200 Subject: [PATCH] Shortened UL-PP, remove CC and slicing --- openair2/ENB_APP/flexran_agent_ran_api.c | 14 +- openair2/LAYER2/MAC/eNB_scheduler_fairRR.c | 10 +- .../LAYER2/MAC/eNB_scheduler_primitives.c | 21 - openair2/LAYER2/MAC/eNB_scheduler_ulsch.c | 2 +- openair2/LAYER2/MAC/mac.h | 7 +- openair2/LAYER2/MAC/mac_proto.h | 7 +- openair2/LAYER2/MAC/main.c | 1 - openair2/LAYER2/MAC/pre_processor.c | 368 ++++++------------ 8 files changed, 148 insertions(+), 282 deletions(-) diff --git a/openair2/ENB_APP/flexran_agent_ran_api.c b/openair2/ENB_APP/flexran_agent_ran_api.c index 4a29a4ba5af..32cb8c74e18 100644 --- a/openair2/ENB_APP/flexran_agent_ran_api.c +++ b/openair2/ENB_APP/flexran_agent_ran_api.c @@ -3037,7 +3037,7 @@ void flexran_set_ue_dl_slice_idx(mid_t mod_id, mid_t ue_id, int slice_idx) { int flexran_get_ue_ul_slice_id(mid_t mod_id, mid_t ue_id) { if (!mac_is_present(mod_id)) return -1; - int slice_idx = RC.mac[mod_id]->UE_info.assoc_ul_slice_idx[ue_id]; + int slice_idx = 0; //RC.mac[mod_id]->UE_info.assoc_ul_slice_idx[ue_id]; if (slice_idx >= 0 && slice_idx < RC.mac[mod_id]->slice_info.n_ul) return RC.mac[mod_id]->slice_info.ul[slice_idx].id; @@ -3052,7 +3052,7 @@ void flexran_set_ue_ul_slice_idx(mid_t mod_id, mid_t ue_id, int slice_idx) { if (!flexran_ul_slice_exists(mod_id, slice_idx)) return; - RC.mac[mod_id]->UE_info.assoc_ul_slice_idx[ue_id] = slice_idx; + //RC.mac[mod_id]->UE_info.assoc_ul_slice_idx[ue_id] = slice_idx; } int flexran_dl_slice_exists(mid_t mod_id, int slice_idx) { @@ -3397,12 +3397,12 @@ int flexran_remove_ul_slice(mid_t mod_id, int slice_idx) { memset(&sli->ul[sli->n_ul], 0, sizeof(sli->ul[sli->n_ul])); /* all UEs that have been in the old slice are put into slice index 0 */ - int *assoc_list = RC.mac[mod_id]->UE_info.assoc_ul_slice_idx; + //int *assoc_list = RC.mac[mod_id]->UE_info.assoc_ul_slice_idx; - for (int i = 0; i < MAX_MOBILES_PER_ENB; ++i) { - if (assoc_list[i] == slice_idx) - assoc_list[i] = 0; - } + //for (int i = 0; i < MAX_MOBILES_PER_ENB; ++i) { + // if (assoc_list[i] == slice_idx) + // assoc_list[i] = 0; + //} return sli->n_ul; } diff --git a/openair2/LAYER2/MAC/eNB_scheduler_fairRR.c b/openair2/LAYER2/MAC/eNB_scheduler_fairRR.c index 96eededc1a3..d486c59ff5e 100644 --- a/openair2/LAYER2/MAC/eNB_scheduler_fairRR.c +++ b/openair2/LAYER2/MAC/eNB_scheduler_fairRR.c @@ -2695,7 +2695,7 @@ void ulsch_scheduler_pre_processor_fairRR(module_id_t module_idP, if ( rb_table[rb_table_index] <= average_rbs ) { // assigne RBS( nb_rb) first_rb[CC_id] = first_rb[CC_id] + rb_table[rb_table_index]; - UE_info->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul[0] = rb_table[rb_table_index]; + UE_info->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul = rb_table[rb_table_index]; UE_info->UE_template[CC_id][UE_id].pre_allocated_rb_table_index_ul = rb_table_index; UE_info->UE_template[CC_id][UE_id].pre_assigned_mcs_ul = mcs; } @@ -2711,7 +2711,7 @@ void ulsch_scheduler_pre_processor_fairRR(module_id_t module_idP, } first_rb[CC_id] = first_rb[CC_id] + rb_table[rb_table_index]; - UE_info->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul[0] = rb_table[rb_table_index]; + UE_info->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul = rb_table[rb_table_index]; UE_info->UE_template[CC_id][UE_id].pre_allocated_rb_table_index_ul = rb_table_index; UE_info->UE_template[CC_id][UE_id].pre_assigned_mcs_ul = mcs; } @@ -2719,13 +2719,13 @@ void ulsch_scheduler_pre_processor_fairRR(module_id_t module_idP, if (mac_eNB_get_rrc_status(module_idP,UE_RNTI(module_idP, UE_id)) < RRC_CONNECTED) { // assigne RBS( 6 RBs) first_rb[CC_id] = first_rb[CC_id] + 6; - UE_info->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul[0] = 6; + UE_info->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul = 6; UE_info->UE_template[CC_id][UE_id].pre_allocated_rb_table_index_ul = 5; UE_info->UE_template[CC_id][UE_id].pre_assigned_mcs_ul = 10; } else { // assigne RBS( 3 RBs) first_rb[CC_id] = first_rb[CC_id] + 3; - UE_info->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul[0] = 3; + UE_info->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul = 3; UE_info->UE_template[CC_id][UE_id].pre_allocated_rb_table_index_ul = 2; UE_info->UE_template[CC_id][UE_id].pre_assigned_mcs_ul = 10; } @@ -2733,7 +2733,7 @@ void ulsch_scheduler_pre_processor_fairRR(module_id_t module_idP, } else if ( ulsch_ue_select[CC_id].list[ulsch_ue_num].ue_priority == SCH_UL_INACTIVE ) { // assigne RBS( 3 RBs) first_rb[CC_id] = first_rb[CC_id] + 3; - UE_info->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul[0] = 3; + UE_info->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul = 3; UE_info->UE_template[CC_id][UE_id].pre_allocated_rb_table_index_ul = 2; UE_info->UE_template[CC_id][UE_id].pre_assigned_mcs_ul = 10; } diff --git a/openair2/LAYER2/MAC/eNB_scheduler_primitives.c b/openair2/LAYER2/MAC/eNB_scheduler_primitives.c index bb0b0604d3d..fdd4459c45c 100644 --- a/openair2/LAYER2/MAC/eNB_scheduler_primitives.c +++ b/openair2/LAYER2/MAC/eNB_scheduler_primitives.c @@ -2209,8 +2209,6 @@ add_new_ue(module_id_t mod_idP, 0, sizeof(eNB_UE_STATS)); UE_info->UE_sched_ctrl[UE_id].ue_reestablishment_reject_timer = 0; - /* default slice in case there was something different */ - UE_info->assoc_ul_slice_idx[UE_id] = 0; UE_info->UE_sched_ctrl[UE_id].ta_update = 31; for (j = 0; j < 8; j++) { @@ -5055,22 +5053,3 @@ nb_rbs_allowed_slice(float rb_percentage, { return (uint16_t) floor(rb_percentage * total_rbs); } - -//------------------------------------------------------------------------------ -int -ue_ul_slice_membership(module_id_t mod_id, - int UE_id, - int slice_idx) -//------------------------------------------------------------------------------ -{ - eNB_MAC_INST *eNB = RC.mac[mod_id]; - - if (slice_idx < 0 || slice_idx >= eNB->slice_info.n_ul) { - LOG_W(MAC, "out of range slice index %d (slice ID %d)\n", - slice_idx, - eNB->slice_info.dl[slice_idx].id); - return 0; - } - - return eNB->UE_info.active[UE_id] == TRUE && eNB->UE_info.assoc_ul_slice_idx[UE_id] == slice_idx; -} diff --git a/openair2/LAYER2/MAC/eNB_scheduler_ulsch.c b/openair2/LAYER2/MAC/eNB_scheduler_ulsch.c index d34fa28fa51..a6083fbf1c2 100644 --- a/openair2/LAYER2/MAC/eNB_scheduler_ulsch.c +++ b/openair2/LAYER2/MAC/eNB_scheduler_ulsch.c @@ -1334,7 +1334,7 @@ schedule_ulsch_rnti(module_id_t module_idP, exit(1); } - UE_info->first_rb_offset[CC_id][0] = n_rb_ul_tab; + UE_info->first_rb_offset[CC_id] = n_rb_ul_tab; /* * ULSCH preprocessor: set UE_template-> diff --git a/openair2/LAYER2/MAC/mac.h b/openair2/LAYER2/MAC/mac.h index 6f445eabb7f..0a7e616d915 100644 --- a/openair2/LAYER2/MAC/mac.h +++ b/openair2/LAYER2/MAC/mac.h @@ -824,7 +824,7 @@ typedef struct { uint16_t cshift[8]; // num_max_harq /// Number of Allocated RBs by the ulsch preprocessor - uint8_t pre_allocated_nb_rb_ul[MAX_NUM_SLICES]; + uint8_t pre_allocated_nb_rb_ul; /// index of Allocated RBs by the ulsch preprocessor int8_t pre_allocated_rb_table_index_ul; @@ -920,8 +920,6 @@ typedef struct { /// number of bytes to schedule for each LC uint32_t dl_lc_bytes[MAX_NUM_LCID]; - uint16_t max_rbs_allowed_slice_uplink[NFAPI_CC_MAX][MAX_NUM_SLICES]; - // resource scheduling information /// Current DL harq round per harq_pid on each CC @@ -1149,8 +1147,7 @@ typedef struct { /// Sorting criteria for the UE list in the MAC preprocessor uint16_t sorting_criteria[MAX_NUM_SLICES][CR_NUM]; - uint16_t first_rb_offset[NFAPI_CC_MAX][MAX_NUM_SLICES]; - int assoc_ul_slice_idx[MAX_MOBILES_PER_ENB]; + uint16_t first_rb_offset[NFAPI_CC_MAX]; } UE_info_t; /*! \brief deleting control information*/ diff --git a/openair2/LAYER2/MAC/mac_proto.h b/openair2/LAYER2/MAC/mac_proto.h index ef2f0fd5f73..0804dcd8c34 100644 --- a/openair2/LAYER2/MAC/mac_proto.h +++ b/openair2/LAYER2/MAC/mac_proto.h @@ -686,8 +686,11 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP, uint16_t *first_rb); void store_ulsch_buffer(module_id_t module_idP, int frameP, sub_frame_t subframeP); -void assign_max_mcs_min_rb(module_id_t module_idP, int slice_idx, int frameP, - sub_frame_t subframeP, uint16_t *first_rb); +void assign_max_mcs_min_rb(module_id_t module_idP, + int CC_id, + int frameP, + sub_frame_t subframeP, + uint16_t *first_rb); void adjust_bsr_info(int buffer_occupancy, uint16_t TBS, UE_TEMPLATE *UE_template); diff --git a/openair2/LAYER2/MAC/main.c b/openair2/LAYER2/MAC/main.c index ae1e7aee376..1a88623a1f3 100644 --- a/openair2/LAYER2/MAC/main.c +++ b/openair2/LAYER2/MAC/main.c @@ -55,7 +55,6 @@ void init_UE_info(UE_info_t *UE_info) memset(UE_info->eNB_UE_stats, 0, sizeof(UE_info->eNB_UE_stats)); memset(UE_info->UE_sched_ctrl, 0, sizeof(UE_info->UE_sched_ctrl)); memset(UE_info->active, 0, sizeof(UE_info->active)); - memset(UE_info->assoc_ul_slice_idx, 0, sizeof(UE_info->assoc_ul_slice_idx)); } void init_slice_info(slice_info_t *sli) diff --git a/openair2/LAYER2/MAC/pre_processor.c b/openair2/LAYER2/MAC/pre_processor.c index 4b9925968d5..a1cd64a7d57 100644 --- a/openair2/LAYER2/MAC/pre_processor.c +++ b/openair2/LAYER2/MAC/pre_processor.c @@ -198,7 +198,6 @@ int round_robin_dl(module_id_t Mod_id, void sort_ue_ul(module_id_t module_idP, - int slice_idx, int sched_frameP, sub_frame_t sched_subframeP, rnti_t *rntiTable); @@ -425,190 +424,98 @@ void ulsch_scheduler_pre_processor(module_id_t module_idP, int sched_frameP, unsigned char sched_subframeP, uint16_t *first_rb) { - int UE_id; - uint16_t n; - uint8_t harq_pid; - uint16_t nb_allocated_rbs[NFAPI_CC_MAX][MAX_MOBILES_PER_ENB]; - uint16_t total_allocated_rbs[NFAPI_CC_MAX]; - uint16_t average_rbs_per_user[NFAPI_CC_MAX]; - int16_t total_remaining_rbs[NFAPI_CC_MAX]; - uint16_t total_ue_count[NFAPI_CC_MAX]; + uint16_t nb_allocated_rbs[MAX_MOBILES_PER_ENB]; + uint16_t total_allocated_rbs = 0; + uint16_t average_rbs_per_user = 0; + int16_t total_remaining_rbs = 0; + uint16_t total_ue_count = 0; eNB_MAC_INST *eNB = RC.mac[module_idP]; UE_info_t *UE_info = &eNB->UE_info; - slice_info_t *sli = &eNB->slice_info; - const int slice_idx = 0; - UE_TEMPLATE *UE_template = 0; - UE_sched_ctrl_t *ue_sched_ctl; - int N_RB_UL = 0; - uint16_t available_rbs, first_rb_offset; + const int N_RB_UL = to_prb(eNB->common_channels[CC_id].ul_Bandwidth); + uint16_t available_rbs = N_RB_UL - 2 * first_rb[CC_id]; // top and bottom // - UE_info->first_rb_offset[CC_id]; rnti_t rntiTable[MAX_MOBILES_PER_ENB]; + // sort ues LOG_D(MAC, "In ulsch_preprocessor: sort ue \n"); - sort_ue_ul(module_idP, slice_idx, sched_frameP, sched_subframeP, rntiTable); + sort_ue_ul(module_idP, sched_frameP, sched_subframeP, rntiTable); // maximize MCS and then allocate required RB according to the buffer occupancy with the limit of max available UL RB LOG_D(MAC, "In ulsch_preprocessor: assign max mcs min rb\n"); - assign_max_mcs_min_rb(module_idP, slice_idx, frameP, subframeP, first_rb); - // we need to distribute RBs among UEs - // step1: reset the vars - uint8_t CC_nb = (uint8_t) RC.nb_mac_CC[module_idP]; - - for (CC_id = 0; CC_id < CC_nb; CC_id++) { - total_allocated_rbs[CC_id] = 0; - total_remaining_rbs[CC_id] = 0; - average_rbs_per_user[CC_id] = 0; - total_ue_count[CC_id] = 0; - } + assign_max_mcs_min_rb(module_idP, CC_id, frameP, subframeP, first_rb); - // Step 1.5: Calculate total_ue_count - for (UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) { - // This is not the actual CC_id in the list - for (n = 0; n < UE_info->numactiveULCCs[UE_id]; n++) { - CC_id = UE_info->ordered_ULCCids[n][UE_id]; - UE_template = &UE_info->UE_template[CC_id][UE_id]; - - if (UE_template->pre_allocated_nb_rb_ul[slice_idx] > 0) { - total_ue_count[CC_id]++; - } + for (int UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) { + if (UE_info->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul > 0) { + total_ue_count++; } } - // step 2: calculate the average rb per UE - LOG_D(MAC, "In ulsch_preprocessor: step2 \n"); - - for (UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) { - if (UE_info->UE_template[CC_id][UE_id].rach_resource_type > 0) continue; + if (total_ue_count == 0) + average_rbs_per_user = 0; + else if (total_ue_count == 1) + average_rbs_per_user = available_rbs + 1; + else if (total_ue_count <= available_rbs) + average_rbs_per_user = (uint16_t) floor(available_rbs / total_ue_count); + else + average_rbs_per_user = 1; + + if (total_ue_count > 0) + LOG_D(MAC, "[eNB %d] Frame %d subframe %d: total ue to be scheduled %d\n", + module_idP, + frameP, + subframeP, + total_ue_count); - LOG_D(MAC, "In ulsch_preprocessor: handling UE %d/%x\n", - UE_id, - rntiTable[UE_id]); + for (int UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) { + uint8_t harq_pid = subframe2harqpid(&RC.mac[module_idP]->common_channels[CC_id], + sched_frameP, sched_subframeP); - for (n = 0; n < UE_info->numactiveULCCs[UE_id]; n++) { - // This is the actual CC_id in the list - CC_id = UE_info->ordered_ULCCids[n][UE_id]; - LOG_D(MAC, "In ulsch_preprocessor: handling UE %d/%x CCid %d\n", - UE_id, - rntiTable[UE_id], - CC_id); - /* - if((mac_xface->get_nCCE_max(module_idP,CC_id,3,subframeP) - nCCE_to_be_used[CC_id]) > (1<<aggregation)) { - nCCE_to_be_used[CC_id] = nCCE_to_be_used[CC_id] + (1<<aggregation); - max_num_ue_to_be_scheduled+=1; - } */ - N_RB_UL = to_prb(eNB->common_channels[CC_id].ul_Bandwidth); - ue_sched_ctl = &UE_info->UE_sched_ctrl[UE_id]; - ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_idx] = - nb_rbs_allowed_slice(sli->ul[slice_idx].pct, N_RB_UL); - first_rb_offset = UE_info->first_rb_offset[CC_id][slice_idx]; - available_rbs = N_RB_UL - 2 * first_rb[CC_id]; // factor 2: top&bottom - - if (available_rbs < 0) - available_rbs = 0; - - if (total_ue_count[CC_id] == 0) { - average_rbs_per_user[CC_id] = 0; - } else if (total_ue_count[CC_id] == 1) { // increase the available RBs, special case, - average_rbs_per_user[CC_id] = (uint16_t) (available_rbs + 1); - } else if (total_ue_count[CC_id] <= available_rbs) { - average_rbs_per_user[CC_id] = (uint16_t) floor(available_rbs / total_ue_count[CC_id]); - } else { - average_rbs_per_user[CC_id] = 1; - LOG_W(MAC, "[eNB %d] frame %d subframe %d: UE %d CC %d: can't get average rb per user (should not be here)\n", - module_idP, - frameP, - subframeP, - UE_id, - CC_id); - } + if (UE_info->UE_sched_ctrl[UE_id].round_UL[CC_id][harq_pid] > 0) + nb_allocated_rbs[UE_id] = UE_info->UE_template[CC_id][UE_id].nb_rb_ul[harq_pid]; + else + nb_allocated_rbs[UE_id] = cmin(UE_info->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul, average_rbs_per_user); - if (total_ue_count[CC_id] > 0) { - LOG_D(MAC, "[eNB %d] Frame %d subframe %d: total ue to be scheduled %d\n", - module_idP, - frameP, - subframeP, - total_ue_count[CC_id]); - } - } + total_allocated_rbs += nb_allocated_rbs[UE_id]; + LOG_D(MAC, "In ulsch_preprocessor: assigning %d RBs for UE %d/%x CCid %d, harq_pid %d\n", + nb_allocated_rbs[UE_id], + UE_id, + rntiTable[UE_id], + CC_id, + harq_pid); } - // step 3: assigne RBS - for (UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) { - // if (continueTable[UE_id]) continue; - for (n = 0; n < UE_info->numactiveULCCs[UE_id]; n++) { - // This is the actual CC_id in the list - CC_id = UE_info->ordered_ULCCids[n][UE_id]; - UE_template = &UE_info->UE_template[CC_id][UE_id]; - harq_pid = subframe2harqpid(&RC.mac[module_idP]->common_channels[CC_id], - sched_frameP, sched_subframeP); - - // mac_xface->get_ue_active_harq_pid(module_idP,CC_id,rnti,frameP,subframeP,&harq_pid,&round,openair_harq_UL); - - if (UE_info->UE_sched_ctrl[UE_id].round_UL[CC_id][harq_pid] > 0) { - nb_allocated_rbs[CC_id][UE_id] = UE_info->UE_template[CC_id][UE_id].nb_rb_ul[harq_pid]; - } else { - nb_allocated_rbs[CC_id][UE_id] = - cmin(UE_info->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul[slice_idx], average_rbs_per_user[CC_id]); - } - - total_allocated_rbs[CC_id] += nb_allocated_rbs[CC_id][UE_id]; - LOG_D(MAC, "In ulsch_preprocessor: assigning %d RBs for UE %d/%x CCid %d, harq_pid %d\n", - nb_allocated_rbs[CC_id][UE_id], - UE_id, - rntiTable[UE_id], - CC_id, - harq_pid); + for (int UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) { + UE_TEMPLATE *UE_template = &UE_info->UE_template[CC_id][UE_id]; + total_remaining_rbs = available_rbs - total_allocated_rbs; + + /* TODO this has already been accounted for - do we need it again? */ + //if (total_ue_count == 1) + // total_remaining_rbs++; + + while (UE_template->pre_allocated_nb_rb_ul > 0 && + nb_allocated_rbs[UE_id] < UE_template->pre_allocated_nb_rb_ul && + total_remaining_rbs > 0) { + nb_allocated_rbs[UE_id] = cmin(nb_allocated_rbs[UE_id] + 1, UE_template->pre_allocated_nb_rb_ul); + total_remaining_rbs--; + total_allocated_rbs++; } - } - - // step 4: assigne the remaining RBs and set the pre_allocated rbs accordingly - for (UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) { - // if (continueTable[UE_id]) continue; - ue_sched_ctl = &UE_info->UE_sched_ctrl[UE_id]; - - for (n = 0; n < UE_info->numactiveULCCs[UE_id]; n++) { - // This is the actual CC_id in the list - CC_id = UE_info->ordered_ULCCids[n][UE_id]; - UE_template = &UE_info->UE_template[CC_id][UE_id]; - N_RB_UL = to_prb(eNB->common_channels[CC_id].ul_Bandwidth); - first_rb_offset = UE_info->first_rb_offset[CC_id][slice_idx]; - available_rbs = cmin(ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_idx], N_RB_UL - first_rb[CC_id] - first_rb_offset); - total_remaining_rbs[CC_id] = available_rbs - total_allocated_rbs[CC_id]; - - if (total_ue_count[CC_id] == 1) { - total_remaining_rbs[CC_id]++; - } - - while (UE_template->pre_allocated_nb_rb_ul[slice_idx] > 0 && - nb_allocated_rbs[CC_id][UE_id] < UE_template->pre_allocated_nb_rb_ul[slice_idx] && - total_remaining_rbs[CC_id] > 0) { - nb_allocated_rbs[CC_id][UE_id] = cmin(nb_allocated_rbs[CC_id][UE_id] + 1, UE_template->pre_allocated_nb_rb_ul[slice_idx]); - total_remaining_rbs[CC_id]--; - total_allocated_rbs[CC_id]++; - } - UE_template->pre_allocated_nb_rb_ul[slice_idx] = nb_allocated_rbs[CC_id][UE_id]; - LOG_D(MAC, "******************UL Scheduling Information for UE%d CC_id %d ************************\n", - UE_id, - CC_id); - LOG_D(MAC, "[eNB %d] total RB allocated for UE%d CC_id %d = %d\n", - module_idP, - UE_id, - CC_id, - UE_template->pre_allocated_nb_rb_ul[slice_idx]); - } + UE_template->pre_allocated_nb_rb_ul = nb_allocated_rbs[UE_id]; + LOG_D(MAC, "******************UL Scheduling Information for UE%d CC_id %d ************************\n", + UE_id, + CC_id); + LOG_D(MAC, "[eNB %d] total RB allocated for UE%d CC_id %d = %d\n", + module_idP, + UE_id, + CC_id, + UE_template->pre_allocated_nb_rb_ul); } - - return; } void assign_max_mcs_min_rb(module_id_t module_idP, - int slice_idx, + int CC_id, int frameP, sub_frame_t subframeP, uint16_t *first_rb) { - int i; - uint16_t n, UE_id; - uint8_t CC_id; int mcs; int rb_table_index = 0, tbs, tx_power; eNB_MAC_INST *eNB = RC.mac[module_idP]; @@ -620,77 +527,61 @@ assign_max_mcs_min_rb(module_id_t module_idP, int N_RB_UL; int first_rb_offset, available_rbs; - for (i = UE_info->list.head; i >= 0; i = UE_info->list.next[i]) { - if (UE_info->UE_sched_ctrl[i].phr_received == 1) { - /* if we've received the power headroom information the UE, we can go to - * maximum mcs */ - mcs = cmin(20, sli->ul[slice_idx].maxmcs); - } else { - /* otherwise, limit to QPSK PUSCH */ - mcs = cmin(10, sli->ul[slice_idx].maxmcs); - } - - UE_id = i; - - for (n = 0; n < UE_info->numactiveULCCs[UE_id]; n++) { - // This is the actual CC_id in the list - CC_id = UE_info->ordered_ULCCids[n][UE_id]; - AssertFatal(CC_id < RC.nb_mac_CC[module_idP], "CC_id %u should be < %u, loop n=%u < numactiveULCCs[%u]=%u", - CC_id, - NFAPI_CC_MAX, - n, - UE_id, - UE_info->numactiveULCCs[UE_id]); - UE_template = &UE_info->UE_template[CC_id][UE_id]; - UE_template->pre_assigned_mcs_ul = mcs; - ue_sched_ctl = &UE_info->UE_sched_ctrl[UE_id]; - Ncp = eNB->common_channels[CC_id].Ncp; - N_RB_UL = to_prb(eNB->common_channels[CC_id].ul_Bandwidth); - ue_sched_ctl->max_rbs_allowed_slice_uplink[CC_id][slice_idx] = nb_rbs_allowed_slice(sli->ul[slice_idx].pct, N_RB_UL); - int bytes_to_schedule = UE_template->estimated_ul_buffer - UE_template->scheduled_ul_bytes; - - if (bytes_to_schedule < 0) bytes_to_schedule = 0; - - int bits_to_schedule = bytes_to_schedule * 8; - - // if this UE has UL traffic - if (bits_to_schedule > 0) { - tbs = get_TBS_UL(UE_template->pre_assigned_mcs_ul, 3) << 3; // 1 or 2 PRB with cqi enabled does not work well! - rb_table_index = 2; - // fixme: set use_srs flag - tx_power = estimate_ue_tx_power(tbs, rb_table[rb_table_index], 0, Ncp, 0); + for (int UE_id = UE_info->list.head; UE_id >= 0; UE_id = UE_info->list.next[UE_id]) { + /* if we've received the power headroom information the UE, we can go to + * maximum mcs */ + mcs = UE_info->UE_sched_ctrl[UE_id].phr_received == 1 ? 20 : 10; - while ((UE_template->phr_info - tx_power < 0 || tbs > bits_to_schedule) && UE_template->pre_assigned_mcs_ul > 3) { - // LOG_I(MAC,"UE_template->phr_info %d tx_power %d mcs %d\n", UE_template->phr_info,tx_power, mcs); - UE_template->pre_assigned_mcs_ul--; - tbs = get_TBS_UL(UE_template->pre_assigned_mcs_ul, rb_table[rb_table_index]) << 3; - tx_power = estimate_ue_tx_power(tbs, rb_table[rb_table_index], 0, Ncp, 0); // fixme: set use_srs - } + UE_template = &UE_info->UE_template[CC_id][UE_id]; + UE_template->pre_assigned_mcs_ul = mcs; + ue_sched_ctl = &UE_info->UE_sched_ctrl[UE_id]; + Ncp = eNB->common_channels[CC_id].Ncp; + N_RB_UL = to_prb(eNB->common_channels[CC_id].ul_Bandwidth); + int bytes_to_schedule = UE_template->estimated_ul_buffer - UE_template->scheduled_ul_bytes; + + if (bytes_to_schedule < 0) bytes_to_schedule = 0; + + int bits_to_schedule = bytes_to_schedule * 8; + + // if this UE has UL traffic + if (bits_to_schedule > 0) { + tbs = get_TBS_UL(UE_template->pre_assigned_mcs_ul, 3) << 3; // 1 or 2 PRB with cqi enabled does not work well! + rb_table_index = 2; + // fixme: set use_srs flag + tx_power = estimate_ue_tx_power(tbs, rb_table[rb_table_index], 0, Ncp, 0); + + while ((UE_template->phr_info - tx_power < 0 || tbs > bits_to_schedule) && UE_template->pre_assigned_mcs_ul > 3) { + // LOG_I(MAC,"UE_template->phr_info %d tx_power %d mcs %d\n", UE_template->phr_info,tx_power, mcs); + UE_template->pre_assigned_mcs_ul--; + tbs = get_TBS_UL(UE_template->pre_assigned_mcs_ul, rb_table[rb_table_index]) << 3; + tx_power = estimate_ue_tx_power(tbs, rb_table[rb_table_index], 0, Ncp, 0); // fixme: set use_srs + } - first_rb_offset = UE_info->first_rb_offset[CC_id][slice_idx]; - available_rbs = N_RB_UL - 2 * first_rb[CC_id]; + first_rb_offset = UE_info->first_rb_offset[CC_id]; + available_rbs = N_RB_UL - 2 * first_rb[CC_id]; - while (tbs < bits_to_schedule && - rb_table[rb_table_index] < available_rbs && - UE_template->phr_info - tx_power > 0 && - rb_table_index < 32) { - rb_table_index++; - tbs = get_TBS_UL(UE_template->pre_assigned_mcs_ul, rb_table[rb_table_index]) << 3; - tx_power = estimate_ue_tx_power(tbs, rb_table[rb_table_index], 0, Ncp, 0); - } + while (tbs < bits_to_schedule && + rb_table[rb_table_index] < available_rbs && + UE_template->phr_info - tx_power > 0 && + rb_table_index < 32) { + rb_table_index++; + tbs = get_TBS_UL(UE_template->pre_assigned_mcs_ul, rb_table[rb_table_index]) << 3; + tx_power = estimate_ue_tx_power(tbs, rb_table[rb_table_index], 0, Ncp, 0); + } - if (rb_table[rb_table_index] > (available_rbs - 1)) { - rb_table_index--; - } + if (rb_table[rb_table_index] > (available_rbs - 1)) { + rb_table_index--; + } - // 1 or 2 PRB with cqi enabled does not work well - if (rb_table[rb_table_index] < 3) { - rb_table_index = 2; //3PRB - } + // 1 or 2 PRB with cqi enabled does not work well + if (rb_table[rb_table_index] < 3) { + rb_table_index = 2; //3PRB + } - UE_template->pre_allocated_rb_table_index_ul = rb_table_index; - UE_template->pre_allocated_nb_rb_ul[slice_idx] = rb_table[rb_table_index]; - LOG_D(MAC, "[eNB %d] frame %d subframe %d: for UE %d CC %d: pre-assigned mcs %d, pre-allocated rb_table[%d]=%d RBs (phr %d, tx power %d)\n", + UE_template->pre_allocated_rb_table_index_ul = rb_table_index; + UE_template->pre_allocated_nb_rb_ul = rb_table[rb_table_index]; + if (UE_template->pre_allocated_nb_rb_ul > 0) + LOG_W(MAC, "[eNB %d] frame %d subframe %d: for UE %d CC %d: pre-assigned mcs %d, pre-allocated rb_table[%d]=%d RBs (phr %d, tx power %d)\n", module_idP, frameP, subframeP, @@ -698,21 +589,20 @@ assign_max_mcs_min_rb(module_id_t module_idP, CC_id, UE_template->pre_assigned_mcs_ul, UE_template->pre_allocated_rb_table_index_ul, - UE_template->pre_allocated_nb_rb_ul[slice_idx], + UE_template->pre_allocated_nb_rb_ul, UE_template->phr_info, tx_power); + } else { + /* if UE has pending scheduling request then pre-allocate 3 RBs */ + //if (UE_template->ul_active == 1 && UE_template->ul_SR == 1) { + if (UE_is_to_be_scheduled(module_idP, CC_id, UE_id)) { + /* use QPSK mcs */ + UE_template->pre_assigned_mcs_ul = 10; + UE_template->pre_allocated_rb_table_index_ul = 2; + UE_template->pre_allocated_nb_rb_ul = 3; } else { - /* if UE has pending scheduling request then pre-allocate 3 RBs */ - //if (UE_template->ul_active == 1 && UE_template->ul_SR == 1) { - if (UE_is_to_be_scheduled(module_idP, CC_id, i)) { - /* use QPSK mcs */ - UE_template->pre_assigned_mcs_ul = 10; - UE_template->pre_allocated_rb_table_index_ul = 2; - UE_template->pre_allocated_nb_rb_ul[slice_idx] = 3; - } else { - UE_template->pre_assigned_mcs_ul = 0; - UE_template->pre_allocated_rb_table_index_ul = -1; - UE_template->pre_allocated_nb_rb_ul[slice_idx] = 0; - } + UE_template->pre_assigned_mcs_ul = 0; + UE_template->pre_allocated_rb_table_index_ul = -1; + UE_template->pre_allocated_nb_rb_ul = 0; } } } @@ -790,7 +680,6 @@ static int ue_ul_compare(const void *_a, const void *_b, void *_params) { * This function sorts the UEs in order, depending on their ulsch buffer and CQI */ void sort_ue_ul(module_id_t module_idP, - int slice_idx, int sched_frameP, sub_frame_t sched_subframeP, rnti_t *rntiTable) @@ -817,8 +706,7 @@ void sort_ue_ul(module_id_t module_idP, // Valid element and is not the actual CC_id in the list if (UE_info->active[i] == TRUE && rntiTable[i] != NOT_A_RNTI && - UE_info->UE_sched_ctrl[i].ul_out_of_sync != 1 && - ue_ul_slice_membership(module_idP, i, slice_idx)) { + UE_info->UE_sched_ctrl[i].ul_out_of_sync != 1) { list[list_size++] = i; // Add to list } } -- GitLab