pre_processor.c 45.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The OpenAirInterface Software Alliance licenses this file to You under
 * the OAI Public License, Version 1.0  (the "License"); you may not use this file
 * except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.openairinterface.org/?page_id=698
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 *-------------------------------------------------------------------------------
 * For more information about the OpenAirInterface (OAI) Software Alliance:
 *      contact@openairinterface.org
 */

22
/*! \file pre_processor.c
23
 * \brief eNB scheduler preprocessing fuction prior to scheduling
24
 * \author Navid Nikaein and Ankit Bhamri
25
 * \date 2013 - 2014
26
 * \email navid.nikaein@eurecom.fr
27
 * \version 1.0
28 29 30 31
 * @ingroup _mac

 */

32
#include "assertions.h"
33 34 35 36 37 38 39
#include "PHY/defs.h"
#include "PHY/extern.h"

#include "SCHED/defs.h"
#include "SCHED/extern.h"

#include "LAYER2/MAC/defs.h"
40
#include "LAYER2/MAC/proto.h"
41 42
#include "LAYER2/MAC/extern.h"
#include "UTIL/LOG/log.h"
43
#include "UTIL/LOG/vcd_signal_dumper.h"
44 45 46 47 48
#include "UTIL/OPT/opt.h"
#include "OCG.h"
#include "OCG_extern.h"
#include "RRC/LITE/extern.h"
#include "RRC/L2_INTERFACE/openair_rrc_L2_interface.h"
49
#include "rlc.h"
50 51


52

53 54 55 56 57 58 59 60 61 62
#define DEBUG_eNB_SCHEDULER 1
#define DEBUG_HEADER_PARSING 1
//#define DEBUG_PACKET_TRACE 1

//#define ICIC 0

/*
  #ifndef USER_MODE
  #define msg debug_msg
  #endif
63
*/
64 65


66
// This function stores the downlink buffer for all the logical channels
67 68
void store_dlsch_buffer (module_id_t Mod_id,
                         frame_t     frameP,
69 70
                         sub_frame_t subframeP)
{
71

72
  int                   UE_id,i;
73
  rnti_t                rnti;
74
  mac_rlc_status_resp_t rlc_status;
75 76 77
  UE_list_t             *UE_list = &eNB_mac_inst[Mod_id].UE_list;
  UE_TEMPLATE           *UE_template;

78
  for (UE_id=UE_list->head; UE_id>=0; UE_id=UE_list->next[UE_id]) {
79 80

    UE_template = &UE_list->UE_template[UE_PCCID(Mod_id,UE_id)][UE_id];
81 82

    // clear logical channel interface variables
83 84
    UE_template->dl_buffer_total = 0;
    UE_template->dl_pdus_total = 0;
85 86

    for(i=0; i< MAX_NUM_LCID; i++) {
87 88 89 90 91
      UE_template->dl_buffer_info[i]=0;
      UE_template->dl_pdus_in_buffer[i]=0;
      UE_template->dl_buffer_head_sdu_creation_time[i]=0;
      UE_template->dl_buffer_head_sdu_remaining_size_to_send[i]=0;
    }
92

93
    rnti = UE_RNTI(Mod_id,UE_id);
94 95 96

    for(i=0; i< MAX_NUM_LCID; i++) { // loop over all the logical channels

97
      rlc_status = mac_rlc_status_ind(Mod_id,rnti, Mod_id,frameP,ENB_FLAG_YES,MBMS_FLAG_NO,i,0 );
98 99 100
      UE_template->dl_buffer_info[i] = rlc_status.bytes_in_buffer; //storing the dlsch buffer for each logical channel
      UE_template->dl_pdus_in_buffer[i] = rlc_status.pdus_in_buffer;
      UE_template->dl_buffer_head_sdu_creation_time[i] = rlc_status.head_sdu_creation_time ;
101 102
      UE_template->dl_buffer_head_sdu_creation_time_max = cmax(UE_template->dl_buffer_head_sdu_creation_time_max,
          rlc_status.head_sdu_creation_time );
103 104
      UE_template->dl_buffer_head_sdu_remaining_size_to_send[i] = rlc_status.head_sdu_remaining_size_to_send;
      UE_template->dl_buffer_head_sdu_is_segmented[i] = rlc_status.head_sdu_is_segmented;
105 106
      UE_template->dl_buffer_total += UE_template->dl_buffer_info[i];//storing the total dlsch buffer
      UE_template->dl_pdus_total   += UE_template->dl_pdus_in_buffer[i];
107

108
#ifdef DEBUG_eNB_SCHEDULER
109

110 111 112 113
      /* note for dl_buffer_head_sdu_remaining_size_to_send[i] :
       * 0 if head SDU has not been segmented (yet), else remaining size not already segmented and sent
       */
      if (UE_template->dl_buffer_info[i]>0)
114 115
        LOG_D(MAC,
              "[eNB %d] Frame %d Subframe %d : RLC status for UE %d in LCID%d: total of %d pdus and size %d, head sdu queuing time %d, remaining size %d, is segmeneted %d \n",
116 117 118 119 120 121 122
              Mod_id, frameP, subframeP, UE_id,
              i, UE_template->dl_pdus_in_buffer[i],UE_template->dl_buffer_info[i],
              UE_template->dl_buffer_head_sdu_creation_time[i],
              UE_template->dl_buffer_head_sdu_remaining_size_to_send[i],
              UE_template->dl_buffer_head_sdu_is_segmented[i]
             );

123
#endif
124

125
    }
126

127
    //#ifdef DEBUG_eNB_SCHEDULER
128 129
    if ( UE_template->dl_buffer_total>0)
      LOG_D(MAC,"[eNB %d] Frame %d Subframe %d : RLC status for UE %d : total DL buffer size %d and total number of pdu %d \n",
130 131 132 133 134 135
            Mod_id, frameP, subframeP, UE_id,
            UE_template->dl_buffer_total,
            UE_template->dl_pdus_total
           );

    //#endif
136 137 138
  }
}

139

140
// This function returns the estimated number of RBs required by each UE for downlink scheduling
141
void assign_rbs_required (module_id_t Mod_id,
142 143 144 145 146
                          frame_t     frameP,
                          sub_frame_t subframe,
                          uint16_t    nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
                          int         min_rb_unit[MAX_NUM_CCs])
{
147

148

149 150 151 152 153
  rnti_t           rnti;
  uint16_t         TBS = 0;
  LTE_eNB_UE_stats *eNB_UE_stats[MAX_NUM_CCs];
  int              UE_id,n,i,j,CC_id,pCCid,tmp;
  UE_list_t        *UE_list = &eNB_mac_inst[Mod_id].UE_list;
154
  //  UE_TEMPLATE           *UE_template;
155
  LTE_DL_FRAME_PARMS   *frame_parms[MAX_NUM_CCs];
156

157
  // clear rb allocations across all CC_ids
158
  for (UE_id=UE_list->head; UE_id>=0; UE_id=UE_list->next[UE_id]) {
159
    pCCid = UE_PCCID(Mod_id,UE_id);
160
    rnti = UE_list->UE_template[pCCid][UE_id].rnti;
161

162
    //update CQI information across component carriers
163
    for (n=0; n<UE_list->numactiveCCs[UE_id]; n++) {
164

165
      CC_id = UE_list->ordered_CCids[n][UE_id];
166 167
      frame_parms[CC_id] = mac_xface->get_lte_frame_parms(Mod_id,CC_id);
      eNB_UE_stats[CC_id] = mac_xface->get_eNB_UE_stats(Mod_id,CC_id,rnti);
168
      /*
169 170
      DevCheck(((eNB_UE_stats[CC_id]->DL_cqi[0] < MIN_CQI_VALUE) || (eNB_UE_stats[CC_id]->DL_cqi[0] > MAX_CQI_VALUE)),
      eNB_UE_stats[CC_id]->DL_cqi[0], MIN_CQI_VALUE, MAX_CQI_VALUE);
171 172
      */
      eNB_UE_stats[CC_id]->dlsch_mcs1=cqi_to_mcs[eNB_UE_stats[CC_id]->DL_cqi[0]];
173

174
      eNB_UE_stats[CC_id]->dlsch_mcs1 = eNB_UE_stats[CC_id]->dlsch_mcs1;//cmin(eNB_UE_stats[CC_id]->dlsch_mcs1,openair_daq_vars.target_ue_dl_mcs);
175

176
    }
177

178
    // provide the list of CCs sorted according to MCS
179 180
    for (i=0; i<UE_list->numactiveCCs[UE_id]; i++) {
      for (j=i+1; j<UE_list->numactiveCCs[UE_id]; j++) {
181
        DevAssert( j < MAX_NUM_CCs );
182 183 184 185 186 187 188

        if (eNB_UE_stats[UE_list->ordered_CCids[i][UE_id]]->dlsch_mcs1 >
            eNB_UE_stats[UE_list->ordered_CCids[j][UE_id]]->dlsch_mcs1) {
          tmp = UE_list->ordered_CCids[i][UE_id];
          UE_list->ordered_CCids[i][UE_id] = UE_list->ordered_CCids[j][UE_id];
          UE_list->ordered_CCids[j][UE_id] = tmp;
        }
189
      }
190
    }
191

192
    /*
193
    if ((mac_get_rrc_status(Mod_id,1,UE_id) < RRC_RECONFIGURED)){  // If we still don't have a default radio bearer
194
      nb_rbs_required[pCCid][UE_id] = PHY_vars_eNB_g[Mod_id][pCCid]->frame_parms.N_RB_DL;
195 196
      continue;
    }
197 198
    */
    /* NN --> RK
199 200
     * check the index of UE_template"
     */
201 202
    //    if (UE_list->UE_template[UE_id]->dl_buffer_total> 0) {
    if (UE_list->UE_template[pCCid][UE_id].dl_buffer_total> 0) {
203
      LOG_D(MAC,"[preprocessor] assign RB for UE %d\n",UE_id);
204 205 206 207 208 209

      for (i=0; i<UE_list->numactiveCCs[UE_id]; i++) {
        CC_id = UE_list->ordered_CCids[i][UE_id];
        frame_parms[CC_id] = mac_xface->get_lte_frame_parms(Mod_id,CC_id);
        eNB_UE_stats[CC_id] = mac_xface->get_eNB_UE_stats(Mod_id,CC_id,rnti);

210 211 212 213 214
        if (eNB_UE_stats[CC_id]->dlsch_mcs1==0) {
          nb_rbs_required[CC_id][UE_id] = 4;  // don't let the TBS get too small
        } else {
          nb_rbs_required[CC_id][UE_id] = min_rb_unit[CC_id];
        }
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236

        TBS = mac_xface->get_TBS_DL(eNB_UE_stats[CC_id]->dlsch_mcs1,nb_rbs_required[CC_id][UE_id]);

        LOG_D(MAC,"[preprocessor] start RB assignement for UE %d CC_id %d dl buffer %d (RB unit %d, MCS %d, TBS %d) \n",
              UE_id, CC_id, UE_list->UE_template[pCCid][UE_id].dl_buffer_total,
              nb_rbs_required[CC_id][UE_id],eNB_UE_stats[CC_id]->dlsch_mcs1,TBS);

        /* calculating required number of RBs for each UE */
        while (TBS < UE_list->UE_template[pCCid][UE_id].dl_buffer_total)  {
          nb_rbs_required[CC_id][UE_id] += min_rb_unit[CC_id];

          if (nb_rbs_required[CC_id][UE_id] > frame_parms[CC_id]->N_RB_DL) {
            TBS = mac_xface->get_TBS_DL(eNB_UE_stats[CC_id]->dlsch_mcs1,frame_parms[CC_id]->N_RB_DL);
            nb_rbs_required[CC_id][UE_id] = frame_parms[CC_id]->N_RB_DL;
            break;
          }

          TBS = mac_xface->get_TBS_DL(eNB_UE_stats[CC_id]->dlsch_mcs1,nb_rbs_required[CC_id][UE_id]);
        } // end of while

        LOG_D(MAC,"[eNB %d] Frame %d: UE %d on CC %d: RB unit %d,  nb_required RB %d (TBS %d, mcs %d)\n",
              Mod_id, frameP,UE_id, CC_id,  min_rb_unit[CC_id], nb_rbs_required[CC_id][UE_id], TBS, eNB_UE_stats[CC_id]->dlsch_mcs1);
237 238 239 240
      }
    }
  }
}
241 242


243
// This function scans all CC_ids for a particular UE to find the maximum round index of its HARQ processes
244

245 246
int maxround(module_id_t Mod_id,uint16_t rnti,int frame,sub_frame_t subframe,uint8_t ul_flag )
{
247

248
  uint8_t round,round_max=0,UE_id;
249
  int CC_id;
250
  UE_list_t *UE_list = &eNB_mac_inst[Mod_id].UE_list;
251

252 253
  for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {

254 255
    UE_id = find_UE_id(Mod_id,rnti);
    round    = UE_list->UE_sched_ctrl[UE_id].round[CC_id];
256
    if (round > round_max) {
257
      round_max = round;
258
    }
259 260
  }

261
  return round_max;
262
}
263

264
// This function scans all CC_ids for a particular UE to find the maximum DL CQI
265

266 267
int maxcqi(module_id_t Mod_id,int32_t UE_id)
{
268

269
  LTE_eNB_UE_stats *eNB_UE_stats = NULL;
270 271
  UE_list_t *UE_list = &eNB_mac_inst[Mod_id].UE_list;
  int CC_id,n;
272
  int CQI = 0;
273

274
  for (n=0; n<UE_list->numactiveCCs[UE_id]; n++) {
275 276
    CC_id = UE_list->ordered_CCids[n][UE_id];
    eNB_UE_stats = mac_xface->get_eNB_UE_stats(Mod_id,CC_id,UE_RNTI(Mod_id,UE_id));
277

278
    if (eNB_UE_stats==NULL) {
279
      mac_xface->macphy_exit("maxcqi: could not get eNB_UE_stats\n");
280 281
      return 0; // not reached
    }
282

283
    if (eNB_UE_stats->DL_cqi[0] > CQI) {
284
      CQI = eNB_UE_stats->DL_cqi[0];
285
    }
286
  }
287

288 289
  return(CQI);
}
290 291 292



293
// This fuction sorts the UE in order their dlsch buffer and CQI
294
void sort_UEs (module_id_t Mod_idP,
295 296 297
               int         frameP,
               sub_frame_t subframeP)
{
298 299


300 301 302
  int               UE_id1,UE_id2;
  int               pCC_id1,pCC_id2;
  int               cqi1,cqi2,round1,round2;
303
  int               i=0,ii=0;//,j=0;
304
  rnti_t            rnti1,rnti2;
305

306
  UE_list_t *UE_list = &eNB_mac_inst[Mod_idP].UE_list;
307

308
  for (i=UE_list->head; i>=0; i=UE_list->next[i]) {
309

310
    for(ii=UE_list->next[i]; ii>=0; ii=UE_list->next[ii]) {
311

312 313
      UE_id1  = i;
      rnti1 = UE_RNTI(Mod_idP,UE_id1);
314 315
      if(rnti1 == NOT_A_RNTI)
	continue;
316 317
      if (UE_list->UE_sched_ctrl[UE_id1].ul_out_of_sync == 1)
	continue;
318 319 320
      pCC_id1 = UE_PCCID(Mod_idP,UE_id1);
      cqi1    = maxcqi(Mod_idP,UE_id1); //
      round1  = maxround(Mod_idP,rnti1,frameP,subframeP,0);
321

322 323
      UE_id2 = ii;
      rnti2 = UE_RNTI(Mod_idP,UE_id2);
324 325
      if(rnti2 == NOT_A_RNTI)
        continue;
326 327
      if (UE_list->UE_sched_ctrl[UE_id2].ul_out_of_sync == 1)
	continue;
328
      cqi2    = maxcqi(Mod_idP,UE_id2);
329
      round2  = maxround(Mod_idP,rnti2,frameP,subframeP,0);  //mac_xface->get_ue_active_harq_pid(Mod_id,rnti2,subframe,&harq_pid2,&round2,0);
330
      pCC_id2 = UE_PCCID(Mod_idP,UE_id2);
331

332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
      if(round2 > round1) { // Check first if one of the UEs has an active HARQ process which needs service and swap order
        swap_UEs(UE_list,UE_id1,UE_id2,0);
      } else if (round2 == round1) {
        // RK->NN : I guess this is for fairness in the scheduling. This doesn't make sense unless all UEs have the same configuration of logical channels.  This should be done on the sum of all information that has to be sent.  And still it wouldn't ensure fairness.  It should be based on throughput seen by each UE or maybe using the head_sdu_creation_time, i.e. swap UEs if one is waiting longer for service.
        //  for(j=0;j<MAX_NUM_LCID;j++){
        //    if (eNB_mac_inst[Mod_id][pCC_id1].UE_template[UE_id1].dl_buffer_info[j] <
        //      eNB_mac_inst[Mod_id][pCC_id2].UE_template[UE_id2].dl_buffer_info[j]){

        // first check the buffer status for SRB1 and SRB2

        if ( (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[1] + UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[2]) <
             (UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[1] + UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[2])   ) {
          swap_UEs(UE_list,UE_id1,UE_id2,0);
        } else if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_head_sdu_creation_time_max <
                   UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_head_sdu_creation_time_max   ) {
          swap_UEs(UE_list,UE_id1,UE_id2,0);
        } else if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_total <
                   UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_total   ) {
          swap_UEs(UE_list,UE_id1,UE_id2,0);
        } else if (cqi1 < cqi2) {
          swap_UEs(UE_list,UE_id1,UE_id2,0);
        }
354 355
      }
    }
356 357 358
  }
}

359

360 361


362
// This function assigns pre-available RBS to each UE in specified sub-bands before scheduling is done
363
void dlsch_scheduler_pre_processor (module_id_t   Mod_id,
364 365 366 367 368
                                    frame_t       frameP,
                                    sub_frame_t   subframeP,
                                    int           N_RBG[MAX_NUM_CCs],
                                    int           *mbsfn_flag)
{
369

370
  unsigned char rballoc_sub[MAX_NUM_CCs][N_RBG_MAX],harq_pid=0,round=0,total_ue_count;
371
  unsigned char MIMO_mode_indicator[MAX_NUM_CCs][N_RBG_MAX];
372
  int                     UE_id, i; 
373
  uint16_t                ii,j;
374 375 376
  uint16_t                nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX];
  uint16_t                nb_rbs_required_remaining[MAX_NUM_CCs][NUMBER_OF_UE_MAX];
  uint16_t                nb_rbs_required_remaining_1[MAX_NUM_CCs][NUMBER_OF_UE_MAX];
377
  uint16_t                average_rbs_per_user[MAX_NUM_CCs] = {0};
378
  rnti_t             rnti;
379
  int                min_rb_unit[MAX_NUM_CCs];
380
  uint16_t r1=0;
381
  uint8_t CC_id;
382
  UE_list_t *UE_list = &eNB_mac_inst[Mod_id].UE_list;
383
  LTE_DL_FRAME_PARMS   *frame_parms[MAX_NUM_CCs] = {0};
384

385
  int transmission_mode = 0;
386 387 388 389 390 391 392 393 394 395 396 397 398
  UE_sched_ctrl *ue_sched_ctl;
  //  int rrc_status           = RRC_IDLE;

#ifdef TM5
  int harq_pid1=0,harq_pid2=0;
  int round1=0,round2=0;
  int UE_id2;
  uint16_t                i1,i2,i3;
  rnti_t             rnti1,rnti2;
  LTE_eNB_UE_stats  *eNB_UE_stats1 = NULL;
  LTE_eNB_UE_stats  *eNB_UE_stats2 = NULL;
  UE_sched_ctrl *ue_sched_ctl1,*ue_sched_ctl2;
#endif
399 400

  for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
401 402 403

    if (mbsfn_flag[CC_id]>0)  // If this CC is allocated for MBSFN skip it here
      continue;
404 405 406

    frame_parms[CC_id] = mac_xface->get_lte_frame_parms(Mod_id,CC_id);

407

408
    min_rb_unit[CC_id]=get_min_rb_unit(Mod_id,CC_id);
409 410

    for (i=UE_list->head; i>=0; i=UE_list->next[i]) {
411
      UE_id = i;
412
      // Initialize scheduling information for all active UEs
413 414
      

415

416
      dlsch_scheduler_pre_processor_reset(Mod_id,
417 418
        UE_id,
        CC_id,
419 420
        frameP,
        subframeP,
421 422 423 424 425
        N_RBG[CC_id],
        nb_rbs_required,
        nb_rbs_required_remaining,
        rballoc_sub,
        MIMO_mode_indicator);
426

427
    }
428
  }
429 430


431
  // Store the DLSCH buffer for each logical channel
432
  store_dlsch_buffer (Mod_id,frameP,subframeP);
433

434 435


436
  // Calculate the number of RBs required by each UE on the basis of logical channel's buffer
437
  assign_rbs_required (Mod_id,frameP,subframeP,nb_rbs_required,min_rb_unit);
438

439 440


441
  // Sorts the user on the basis of dlsch logical channel buffer and CQI
442 443 444
  sort_UEs (Mod_id,frameP,subframeP);


445

446
  total_ue_count =0;
447

448
  // loop over all active UEs
449
  for (i=UE_list->head; i>=0; i=UE_list->next[i]) {
450
    rnti = UE_RNTI(Mod_id,i);
451

452
    if(rnti == NOT_A_RNTI)
453
      continue;
454 455
    if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1)
      continue;
456
    UE_id = i;
457

458 459 460 461
    // if there is no available harq_process, skip the UE
    if (UE_list->UE_sched_ctrl[UE_id].harq_pid[CC_id]<0)
      continue;

462
    for (ii=0; ii<UE_num_active_CC(UE_list,UE_id); ii++) {
463
      CC_id = UE_list->ordered_CCids[ii][UE_id];
464 465 466
      ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
      harq_pid = ue_sched_ctl->harq_pid[CC_id];
      round    = ue_sched_ctl->round[CC_id];
467 468 469

      average_rbs_per_user[CC_id]=0;

470
      frame_parms[CC_id] = mac_xface->get_lte_frame_parms(Mod_id,CC_id);
471

472
      //      mac_xface->get_ue_active_harq_pid(Mod_id,CC_id,rnti,frameP,subframeP,&harq_pid,&round,0);
473

474
      if(round>0) {
475
        nb_rbs_required[CC_id][UE_id] = UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid];
476
      }
477

478 479
      //nb_rbs_required_remaining[UE_id] = nb_rbs_required[UE_id];
      if (nb_rbs_required[CC_id][UE_id] > 0) {
480
        total_ue_count = total_ue_count + 1;
481
      }
482 483 484 485 486 487 488 489 490 491 492 493


      // hypotetical assignement
      /*
       * If schedule is enabled and if the priority of the UEs is modified
       * The average rbs per logical channel per user will depend on the level of
       * priority. Concerning the hypothetical assignement, we should assign more
       * rbs to prioritized users. Maybe, we can do a mapping between the
       * average rbs per user and the level of priority or multiply the average rbs
       * per user by a coefficient which represents the degree of priority.
       */

494
      if (total_ue_count == 0) {
495
        average_rbs_per_user[CC_id] = 0;
496
      } else if( (min_rb_unit[CC_id] * total_ue_count) <= (frame_parms[CC_id]->N_RB_DL) ) {
497
        average_rbs_per_user[CC_id] = (uint16_t) floor(frame_parms[CC_id]->N_RB_DL/total_ue_count);
498
      } else {
499
        average_rbs_per_user[CC_id] = min_rb_unit[CC_id]; // consider the total number of use that can be scheduled UE
500
      }
501 502
    }
  }
503

504 505
  // note: nb_rbs_required is assigned according to total_buffer_dl
  // extend nb_rbs_required to capture per LCID RB required
506
  for(i=UE_list->head; i>=0; i=UE_list->next[i]) {
507
    rnti = UE_RNTI(Mod_id,i);
508

509
    for (ii=0; ii<UE_num_active_CC(UE_list,i); ii++) {
510
      CC_id = UE_list->ordered_CCids[ii][i];
511

512
      // control channel
513
      if (mac_eNB_get_rrc_status(Mod_id,rnti) < RRC_RECONFIGURED) {
514
        nb_rbs_required_remaining_1[CC_id][i] = nb_rbs_required[CC_id][i];
515
      } else {
516 517
        nb_rbs_required_remaining_1[CC_id][i] = cmin(average_rbs_per_user[CC_id],nb_rbs_required[CC_id][i]);

518
      }
519
    }
520
  }
521

522
  //Allocation to UEs is done in 2 rounds,
523 524
  // 1st stage: average number of RBs allocated to each UE
  // 2nd stage: remaining RBs are allocated to high priority UEs
525 526 527 528 529 530
  for(r1=0; r1<2; r1++) {

    for(i=UE_list->head; i>=0; i=UE_list->next[i]) {
      for (ii=0; ii<UE_num_active_CC(UE_list,i); ii++) {
        CC_id = UE_list->ordered_CCids[ii][i];

531
        if(r1 == 0) {
532
          nb_rbs_required_remaining[CC_id][i] = nb_rbs_required_remaining_1[CC_id][i];
533
        } else { // rb required based only on the buffer - rb allloctaed in the 1st round + extra reaming rb form the 1st round
534
          nb_rbs_required_remaining[CC_id][i] = nb_rbs_required[CC_id][i]-nb_rbs_required_remaining_1[CC_id][i]+nb_rbs_required_remaining[CC_id][i];
535
        }
536 537 538 539 540 541 542

        if (nb_rbs_required[CC_id][i]> 0 )
          LOG_D(MAC,"round %d : nb_rbs_required_remaining[%d][%d]= %d (remaining_1 %d, required %d,  pre_nb_available_rbs %d, N_RBG %d, rb_unit %d)\n",
                r1, CC_id, i,
                nb_rbs_required_remaining[CC_id][i],
                nb_rbs_required_remaining_1[CC_id][i],
                nb_rbs_required[CC_id][i],
543
                UE_list->UE_sched_ctrl[i].pre_nb_available_rbs[CC_id],
544 545 546
                N_RBG[CC_id],
                min_rb_unit[CC_id]);

547
      }
548
    }
549

550
    if (total_ue_count > 0 ) {
551 552 553 554 555
      for(i=UE_list->head; i>=0; i=UE_list->next[i]) {
        UE_id = i;

        for (ii=0; ii<UE_num_active_CC(UE_list,UE_id); ii++) {
          CC_id = UE_list->ordered_CCids[ii][UE_id];
556 557 558
	  ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
	  harq_pid = ue_sched_ctl->harq_pid[CC_id];
	  round    = ue_sched_ctl->round[CC_id];
559 560 561 562

          rnti = UE_RNTI(Mod_id,UE_id);

          // LOG_D(MAC,"UE %d rnti 0x\n", UE_id, rnti );
563
          if(rnti == NOT_A_RNTI)
564
            continue;
565 566
	  if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1)
	    continue;
567 568

          transmission_mode = mac_xface->get_transmission_mode(Mod_id,CC_id,rnti);
569 570
	  //          mac_xface->get_ue_active_harq_pid(Mod_id,CC_id,rnti,frameP,subframeP,&harq_pid,&round,0);
          //rrc_status = mac_eNB_get_rrc_status(Mod_id,rnti);
571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
          /* 1st allocate for the retx */

          // retransmission in data channels
          // control channel in the 1st transmission
          // data channel for all TM
          LOG_T(MAC,"calling dlsch_scheduler_pre_processor_allocate .. \n ");
          dlsch_scheduler_pre_processor_allocate (Mod_id,
                                                  UE_id,
                                                  CC_id,
                                                  N_RBG[CC_id],
                                                  transmission_mode,
                                                  min_rb_unit[CC_id],
                                                  frame_parms[CC_id]->N_RB_DL,
                                                  nb_rbs_required,
                                                  nb_rbs_required_remaining,
                                                  rballoc_sub,
                                                  MIMO_mode_indicator);

589
#ifdef TM5
590 591 592 593

          // data chanel TM5: to be revisted
          if ((round == 0 )  &&
              (transmission_mode == 5)  &&
594
              (ue_sched_ctl->dl_pow_off[CC_id] != 1)) {
595 596 597

            for(j=0; j<N_RBG[CC_id]; j+=2) {

598 599
              if( (((j == (N_RBG[CC_id]-1))&& (rballoc_sub[CC_id][j] == 0) && (ue_sched_ctl->rballoc_sub_UE[CC_id][j] == 0))  ||
                   ((j < (N_RBG[CC_id]-1)) && (rballoc_sub[CC_id][j+1] == 0) && (ue_sched_ctl->rballoc_sub_UE[CC_id][j+1] == 0)) ) &&
600 601 602 603 604 605
                  (nb_rbs_required_remaining[CC_id][UE_id]>0)) {

                for (ii = UE_list->next[i+1]; ii >=0; ii=UE_list->next[ii]) {

                  UE_id2 = ii;
                  rnti2 = UE_RNTI(Mod_id,UE_id2);
606 607 608
		  ue_sched_ctl2 = &UE_list->UE_sched_ctrl[UE_id2];
		  harq_pid2 = ue_sched_ctl2->harq_pid[CC_id];
		  round2    = ue_sched_ctl2->round[CC_id];
609
                  if(rnti2 == NOT_A_RNTI)
610
                    continue;
611 612
		  if (UE_list->UE_sched_ctrl[UE_id2].ul_out_of_sync == 1)
		    continue;
613 614

                  eNB_UE_stats2 = mac_xface->get_eNB_UE_stats(Mod_id,CC_id,rnti2);
615
                  //mac_xface->get_ue_active_harq_pid(Mod_id,CC_id,rnti2,frameP,subframeP,&harq_pid2,&round2,0);
616

617
                  if ((mac_eNB_get_rrc_status(Mod_id,rnti2) >= RRC_RECONFIGURED) &&
618 619
                      (round2==0) &&
                      (mac_xface->get_transmission_mode(Mod_id,CC_id,rnti2)==5) &&
620
                      (ue_sched_ctl->dl_pow_off[CC_id] != 1)) {
621

622 623
                    if( (((j == (N_RBG[CC_id]-1)) && (ue_sched_ctl->rballoc_sub_UE[CC_id][j] == 0)) ||
                         ((j < (N_RBG[CC_id]-1)) && (ue_sched_ctl->rballoc_sub_UE[CC_id][j+1] == 0))  ) &&
624 625 626 627 628
                        (nb_rbs_required_remaining[CC_id][UE_id2]>0)) {

                      if((((eNB_UE_stats2->DL_pmi_single^eNB_UE_stats1->DL_pmi_single)<<(14-j))&0xc000)== 0x4000) { //MU-MIMO only for 25 RBs configuration

                        rballoc_sub[CC_id][j] = 1;
629 630
                        ue_sched_ctl->rballoc_sub_UE[CC_id][j] = 1;
                        ue_sched_ctl2->rballoc_sub_UE[CC_id][j] = 1;
631 632 633 634
                        MIMO_mode_indicator[CC_id][j] = 0;

                        if (j< N_RBG[CC_id]-1) {
                          rballoc_sub[CC_id][j+1] = 1;
635 636
                          ue_sched_ctl->rballoc_sub_UE[CC_id][j+1] = 1;
                          ue_sched_ctl2->rballoc_sub_UE[CC_id][j+1] = 1;
637 638 639
                          MIMO_mode_indicator[CC_id][j+1] = 0;
                        }

640 641
                        ue_sched_ctl->dl_pow_off[CC_id] = 0;
                        ue_sched_ctl2->dl_pow_off[CC_id] = 0;
642 643 644


                        if ((j == N_RBG[CC_id]-1) &&
645 646
                            ((PHY_vars_eNB_g[Mod_id][CC_id]->frame_parms.N_RB_DL == 25) ||
                             (PHY_vars_eNB_g[Mod_id][CC_id]->frame_parms.N_RB_DL == 50))) {
647
			  
648
                          nb_rbs_required_remaining[CC_id][UE_id] = nb_rbs_required_remaining[CC_id][UE_id] - min_rb_unit[CC_id]+1;
649
                          ue_sched_ctl->pre_nb_available_rbs[CC_id] = ue_sched_ctl->pre_nb_available_rbs[CC_id] + min_rb_unit[CC_id]-1;
650
                          nb_rbs_required_remaining[CC_id][UE_id2] = nb_rbs_required_remaining[CC_id][UE_id2] - min_rb_unit[CC_id]+1;
651
                          ue_sched_ctl2->pre_nb_available_rbs[CC_id] = ue_sched_ctl2->pre_nb_available_rbs[CC_id] + min_rb_unit[CC_id]-1;
652
                        } else {
653 654
                          
			  nb_rbs_required_remaining[CC_id][UE_id] = nb_rbs_required_remaining[CC_id][UE_id] - 4;
655
                          ue_sched_ctl->pre_nb_available_rbs[CC_id] = ue_sched_ctl->pre_nb_available_rbs[CC_id] + 4;
656
                          nb_rbs_required_remaining[CC_id][UE_id2] = nb_rbs_required_remaining[CC_id][UE_id2] - 4;
657
                          ue_sched_ctl2->pre_nb_available_rbs[CC_id] = ue_sched_ctl2->pre_nb_available_rbs[CC_id] + 4;
658 659 660 661 662 663 664 665 666 667 668 669 670
                        }

                        break;
                      }
                    }
                  }
                }
              }
            }
          }

#endif
        }
671
      }
672
    } // total_ue_count
673
  } // end of for for r1 and r2
674 675 676

#ifdef TM5

677
  // This has to be revisited!!!!
678
  for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
679 680 681
    i1=0;
    i2=0;
    i3=0;
682 683

    for (j=0; j<N_RBG[CC_id]; j++) {
684
      if(MIMO_mode_indicator[CC_id][j] == 2) {
685
        i1 = i1+1;
686
      } else if(MIMO_mode_indicator[CC_id][j] == 1) {
687
        i2 = i2+1;
688
      } else if(MIMO_mode_indicator[CC_id][j] == 0) {
689
        i3 = i3+1;
690
      }
691
    }
692

693
    if((i1 < N_RBG[CC_id]) && (i2>0) && (i3==0)) {
694
      PHY_vars_eNB_g[Mod_id][CC_id]->check_for_SUMIMO_transmissions = PHY_vars_eNB_g[Mod_id][CC_id]->check_for_SUMIMO_transmissions + 1;
695
    }
696

697
    if(i3 == N_RBG[CC_id] && i1==0 && i2==0) {
698
      PHY_vars_eNB_g[Mod_id][CC_id]->FULL_MUMIMO_transmissions = PHY_vars_eNB_g[Mod_id][CC_id]->FULL_MUMIMO_transmissions + 1;
699
    }
700

701
    if((i1 < N_RBG[CC_id]) && (i3 > 0)) {
702
      PHY_vars_eNB_g[Mod_id][CC_id]->check_for_MUMIMO_transmissions = PHY_vars_eNB_g[Mod_id][CC_id]->check_for_MUMIMO_transmissions + 1;
703
    }
704

705
    PHY_vars_eNB_g[Mod_id][CC_id]->check_for_total_transmissions = PHY_vars_eNB_g[Mod_id][CC_id]->check_for_total_transmissions + 1;
706

707 708
  }

709 710 711
#endif

  for(i=UE_list->head; i>=0; i=UE_list->next[i]) {
712
    UE_id = i;
713
    ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
714 715

    for (ii=0; ii<UE_num_active_CC(UE_list,UE_id); ii++) {
716
      CC_id = UE_list->ordered_CCids[ii][UE_id];
717
      //PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].dl_pow_off = dl_pow_off[UE_id];
718

719
      if (ue_sched_ctl->pre_nb_available_rbs[CC_id] > 0 ) {
720
        LOG_D(MAC,"******************DL Scheduling Information for UE%d ************************\n",UE_id);
721
        LOG_D(MAC,"dl power offset UE%d = %d \n",UE_id,ue_sched_ctl->dl_pow_off[CC_id]);
722 723 724 725
        LOG_D(MAC,"***********RB Alloc for every subband for UE%d ***********\n",UE_id);

        for(j=0; j<N_RBG[CC_id]; j++) {
          //PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].rballoc_sub[i] = rballoc_sub_UE[CC_id][UE_id][i];
726
          LOG_D(MAC,"RB Alloc for UE%d and Subband%d = %d\n",UE_id,j,ue_sched_ctl->rballoc_sub_UE[CC_id][j]);
727 728 729
        }

        //PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].pre_nb_available_rbs = pre_nb_available_rbs[CC_id][UE_id];
730
        LOG_D(MAC,"Total RBs allocated for UE%d = %d\n",UE_id,ue_sched_ctl->pre_nb_available_rbs[CC_id]);
731
      }
732
    }
733 734 735
  }
}

736
#define SF05_LIMIT 1
737

738
void dlsch_scheduler_pre_processor_reset (int module_idP,
739 740 741 742 743 744 745 746 747 748
					  int UE_id,
					  uint8_t  CC_id,
					  int frameP,
					  int subframeP,					  
					  int N_RBG,
					  uint16_t nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
					  uint16_t nb_rbs_required_remaining[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
					  unsigned char rballoc_sub[MAX_NUM_CCs][N_RBG_MAX],
					  unsigned char MIMO_mode_indicator[MAX_NUM_CCs][N_RBG_MAX])
  
749
{
750
  int i,j;
751 752 753
  UE_list_t *UE_list=&eNB_mac_inst[module_idP].UE_list;
  UE_sched_ctrl *ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
  rnti_t rnti = UE_RNTI(module_idP,UE_id);
Cedric Roux's avatar
Cedric Roux committed
754
  uint8_t *vrb_map = eNB_mac_inst[module_idP].common_channels[CC_id].vrb_map;
755
  int RBGsize = PHY_vars_eNB_g[module_idP][CC_id]->frame_parms.N_RB_DL/N_RBG;
756
#ifdef SF05_LIMIT
757
  //int subframe05_limit=0;
758 759
  int sf05_upper=-1,sf05_lower=-1;
#endif
760
  LTE_eNB_UE_stats *eNB_UE_stats = mac_xface->get_eNB_UE_stats(module_idP,CC_id,rnti);
761 762 763 764 765 766
  // initialize harq_pid and round
  mac_xface->get_ue_active_harq_pid(module_idP,CC_id,rnti,
				    frameP,subframeP,
				    &ue_sched_ctl->harq_pid[CC_id],
				    &ue_sched_ctl->round[CC_id],
				    0);
767
  if (ue_sched_ctl->ta_timer == 0) {
768

769 770 771
    // WE SHOULD PROTECT the eNB_UE_stats with a mutex here ...

    ue_sched_ctl->ta_timer = 20;  // wait 20 subframes before taking TA measurement from PHY
772
    switch (PHY_vars_eNB_g[module_idP][CC_id]->frame_parms.N_RB_DL) {
773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803
    case 6:
      ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update;
      break;
      
    case 15:
      ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/2;
      break;
      
    case 25:
      ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/4;
      break;
      
    case 50:
      ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/8;
      break;
      
    case 75:
      ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/12;
      break;
      
    case 100:
      ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/16;
      break;
    }
    // clear the update in case PHY does not have a new measurement after timer expiry
    eNB_UE_stats->timing_advance_update =  0;
  }
  else {
    ue_sched_ctl->ta_timer--;
    ue_sched_ctl->ta_update =0; // don't trigger a timing advance command
  }
804 805 806
  if (UE_id==0) {
    VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_UE0_TIMING_ADVANCE,ue_sched_ctl->ta_update);
  }
807
  nb_rbs_required[CC_id][UE_id]=0;
808 809
  ue_sched_ctl->pre_nb_available_rbs[CC_id] = 0;
  ue_sched_ctl->dl_pow_off[CC_id] = 2;
810
  nb_rbs_required_remaining[CC_id][UE_id] = 0;
811

812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835
#ifdef SF05_LIMIT  
  switch (N_RBG) {
  case 6:
    sf05_lower=0;
    sf05_upper=5;
    break;
  case 8:
    sf05_lower=2;
    sf05_upper=5;
    break;
  case 13:
    sf05_lower=4;
    sf05_upper=7;
    break;
  case 17:
    sf05_lower=7;
    sf05_upper=9;
    break;
  case 25:
    sf05_lower=11;
    sf05_upper=13;
    break;
  }
#endif
836
  // Initialize Subbands according to VRB map
837
  for (i=0; i<N_RBG; i++) {
838
    ue_sched_ctl->rballoc_sub_UE[CC_id][i] = 0;
839
    rballoc_sub[CC_id][i] = 0;
840 841 842 843 844 845 846 847
#ifdef SF05_LIMIT
    // for avoiding 6+ PRBs around DC in subframe 0-5 (avoid excessive errors)

    if ((subframeP==0 || subframeP==5) && 
	(i>=sf05_lower && i<=sf05_upper))
      rballoc_sub[CC_id][i]=1;
#endif
    // for SI-RNTI,RA-RNTI and P-RNTI allocations
848
    for (j=0;j<RBGsize;j++) {
849
      if (vrb_map[j+(i*RBGsize)]!=0)  {
850
	rballoc_sub[CC_id][i] = 1;
851
	LOG_D(MAC,"Frame %d, subframe %d : vrb %d allocated\n",frameP,subframeP,j+(i*RBGsize));
852 853 854 855
	break;
      }
    }
    LOG_D(MAC,"Frame %d Subframe %d CC_id %d RBG %i : rb_alloc %d\n",frameP,subframeP,CC_id,i,rballoc_sub[CC_id][i]);
856
    MIMO_mode_indicator[CC_id][i] = 2;
857 858 859 860 861
  }
}


void dlsch_scheduler_pre_processor_allocate (module_id_t   Mod_id,
862 863 864 865 866 867 868 869 870 871 872 873
    int           UE_id,
    uint8_t       CC_id,
    int           N_RBG,
    int           transmission_mode,
    int           min_rb_unit,
    uint8_t       N_RB_DL,
    uint16_t      nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
    uint16_t      nb_rbs_required_remaining[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
    unsigned char rballoc_sub[MAX_NUM_CCs][N_RBG_MAX],
    unsigned char MIMO_mode_indicator[MAX_NUM_CCs][N_RBG_MAX])
{

874
  int i;
875 876
  UE_list_t *UE_list=&eNB_mac_inst[Mod_id].UE_list;
  UE_sched_ctrl *ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
877 878 879 880

  for(i=0; i<N_RBG; i++) {

    if((rballoc_sub[CC_id][i] == 0)           &&
881
        (ue_sched_ctl->rballoc_sub_UE[CC_id][i] == 0) &&
882
        (nb_rbs_required_remaining[CC_id][UE_id]>0)   &&
883
        (ue_sched_ctl->pre_nb_available_rbs[CC_id] < nb_rbs_required[CC_id][UE_id])) {
884

885
      // if this UE is not scheduled for TM5
886
      if (ue_sched_ctl->dl_pow_off[CC_id] != 0 )  {
887

888 889
	if ((i == N_RBG-1) && ((N_RB_DL == 25) || (N_RB_DL == 50))) {
	  rballoc_sub[CC_id][i] = 1;
890
	  ue_sched_ctl->rballoc_sub_UE[CC_id][i] = 1;
891 892
	  MIMO_mode_indicator[CC_id][i] = 1;
	  if (transmission_mode == 5 ) {
893
	    ue_sched_ctl->dl_pow_off[CC_id] = 1;
894 895
	  }   
	  nb_rbs_required_remaining[CC_id][UE_id] = nb_rbs_required_remaining[CC_id][UE_id] - min_rb_unit+1;
896
          ue_sched_ctl->pre_nb_available_rbs[CC_id] = ue_sched_ctl->pre_nb_available_rbs[CC_id] + min_rb_unit - 1;
897
        } else {
898 899
	  if (nb_rbs_required_remaining[CC_id][UE_id] >=  min_rb_unit){
	    rballoc_sub[CC_id][i] = 1;
900
	    ue_sched_ctl->rballoc_sub_UE[CC_id][i] = 1;
901 902
	    MIMO_mode_indicator[CC_id][i] = 1;
	    if (transmission_mode == 5 ) {
903
	      ue_sched_ctl->dl_pow_off[CC_id] = 1;
904 905
	    }
	    nb_rbs_required_remaining[CC_id][UE_id] = nb_rbs_required_remaining[CC_id][UE_id] - min_rb_unit;
906
	    ue_sched_ctl->pre_nb_available_rbs[CC_id] = ue_sched_ctl->pre_nb_available_rbs[CC_id] + min_rb_unit;
907 908
	  }
	}
909 910
      } // dl_pow_off[CC_id][UE_id] ! = 0
    }
911 912
  }

913 914 915
}


916
/// ULSCH PRE_PROCESSOR
917

918

919
void ulsch_scheduler_pre_processor(module_id_t module_idP,
920 921 922
                                   int frameP,
                                   sub_frame_t subframeP,
                                   uint16_t *first_rb,
923
                                   uint8_t aggregation)
924
{
925 926 927 928 929 930 931 932

  int16_t            i;
  uint16_t           UE_id,n,r;
  uint8_t            CC_id, round, harq_pid;
  uint16_t           nb_allocated_rbs[MAX_NUM_CCs][NUMBER_OF_UE_MAX],total_allocated_rbs[MAX_NUM_CCs],average_rbs_per_user[MAX_NUM_CCs];
  int16_t            total_remaining_rbs[MAX_NUM_CCs];
  uint16_t           max_num_ue_to_be_scheduled=0,total_ue_count=0;
  rnti_t             rnti= -1;
933
  UE_list_t          *UE_list = &eNB_mac_inst[module_idP].UE_list;
934 935
  UE_TEMPLATE        *UE_template = 0;
  LTE_DL_FRAME_PARMS   *frame_parms = 0;
936

937

938
  //LOG_I(MAC,"assign max mcs min rb\n");
939 940
  // maximize MCS and then allocate required RB according to the buffer occupancy with the limit of max available UL RB
  assign_max_mcs_min_rb(module_idP,frameP, subframeP, first_rb);
941

942
  //LOG_I(MAC,"sort ue \n");
943
  // sort ues
944 945
  sort_ue_ul (module_idP,frameP, subframeP);

946

947 948
  // we need to distribute RBs among UEs
  // step1:  reset the vars
949
  for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
950 951 952
    total_allocated_rbs[CC_id]=0;
    total_remaining_rbs[CC_id]=0;
    average_rbs_per_user[CC_id]=0;
953 954

    for (i=UE_list->head_ul; i>=0; i=UE_list->next_ul[i]) {
955 956 957 958
      nb_allocated_rbs[CC_id][i]=0;
    }
  }

959
  //LOG_I(MAC,"step2 \n");
960 961 962
  // step 2: calculate the average rb per UE
  total_ue_count =0;
  max_num_ue_to_be_scheduled=0;
963 964 965 966 967

  for (i=UE_list->head_ul; i>=0; i=UE_list->next_ul[i]) {

    rnti = UE_RNTI(module_idP,i);

968
    if (rnti==NOT_A_RNTI)
969 970
      continue;

971 972 973
    if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1)
      continue;

974
    UE_id = i;
975 976

    for (n=0; n<UE_list->numactiveULCCs[UE_id]; n++) {
977 978 979 980
      // This is the actual CC_id in the list
      CC_id = UE_list->ordered_ULCCids[n][UE_id];
      UE_template = &UE_list->UE_template[CC_id][UE_id];
      average_rbs_per_user[CC_id]=0;
981 982
      frame_parms = mac_xface->get_lte_frame_parms(module_idP,CC_id);

983
      if (UE_template->pre_allocated_nb_rb_ul > 0) {
984
        total_ue_count+=1;
985
      }
986 987
      /*
      if((mac_xface->get_nCCE_max(module_idP,CC_id,3,subframeP) - nCCE_to_be_used[CC_id])  > (1<<aggregation)) {
988 989
        nCCE_to_be_used[CC_id] = nCCE_to_be_used[CC_id] + (1<<aggregation);
        max_num_ue_to_be_scheduled+=1;
990 991 992
	}*/

      max_num_ue_to_be_scheduled+=1;
993

994
      if (total_ue_count == 0) {
995
        average_rbs_per_user[CC_id] = 0;
996
      } else if (total_ue_count == 1 ) { // increase the available RBs, special case,
997
        average_rbs_per_user[CC_id] = frame_parms->N_RB_UL-first_rb[CC_id]+1;
998
      } else if( (total_ue_count <= (frame_parms->N_RB_DL-first_rb[CC_id])) &&
999
                 (total_ue_count <= max_num_ue_to_be_scheduled)) {
1000
        average_rbs_per_user[CC_id] = (uint16_t) floor((frame_parms->N_RB_UL-first_rb[CC_id])/total_ue_count);
1001
      } else if (max_num_ue_to_be_scheduled > 0 ) {
1002
        average_rbs_per_user[CC_id] = (uint16_t) floor((frame_parms->N_RB_UL-first_rb[CC_id])/max_num_ue_to_be_scheduled);
1003
      } else {
1004 1005 1006
        average_rbs_per_user[CC_id]=1;
        LOG_W(MAC,"[eNB %d] frame %d subframe %d: UE %d CC %d: can't get average rb per user (should not be here)\n",
              module_idP,frameP,subframeP,UE_id,CC_id);
1007 1008 1009
      }
    }
  }
1010 1011 1012
  if (total_ue_count > 0)
    LOG_D(MAC,"[eNB %d] Frame %d subframe %d: total ue to be scheduled %d/%d\n",
	  module_idP, frameP, subframeP,total_ue_count, max_num_ue_to_be_scheduled);
1013

1014
  //LOG_D(MAC,"step3\n");
1015

1016 1017 1018 1019
  // step 3: assigne RBS
  for (i=UE_list->head_ul; i>=0; i=UE_list->next_ul[i]) {
    rnti = UE_RNTI(module_idP,i);

1020
    if (rnti==NOT_A_RNTI)
1021
      continue;
1022 1023
    if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1)
      continue;
1024

1025
    UE_id = i;
1026 1027

    for (n=0; n<UE_list->numactiveULCCs[UE_id]; n++) {
1028 1029
      // This is the actual CC_id in the list
      CC_id = UE_list->ordered_ULCCids[n][UE_id];
1030

1031
      mac_xface->get_ue_active_harq_pid(module_idP,CC_id,rnti,frameP,subframeP,&harq_pid,&round,1);
1032

1033
      if(round>0) {
1034
        nb_allocated_rbs[CC_id][UE_id] = UE_list->UE_template[CC_id][UE_id].nb_rb_ul[harq_pid];
1035
      } else {
1036
        nb_allocated_rbs[CC_id][UE_id] = cmin(UE_list->UE_template[CC_id][UE_id].pre_allocated_nb_rb_ul, average_rbs_per_user[CC_id]);
1037
      }
1038

1039
      total_allocated_rbs[CC_id]+= nb_allocated_rbs[CC_id][UE_id];
1040

1041 1042
    }
  }
1043

1044
  // step 4: assigne the remaining RBs and set the pre_allocated rbs accordingly
1045 1046 1047 1048 1049
  for(r=0; r<2; r++) {

    for (i=UE_list->head_ul; i>=0; i=UE_list->next_ul[i]) {
      rnti = UE_RNTI(module_idP,i);

1050
      if (rnti==NOT_A_RNTI)
1051
        continue;
1052 1053
      if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1)
	continue;
1054

1055
      UE_id = i;
1056 1057 1058 1059 1060 1061 1062 1063

      for (n=0; n<UE_list->numactiveULCCs[UE_id]; n++) {
        // This is the actual CC_id in the list
        CC_id = UE_list->ordered_ULCCids[n][UE_id];
        UE_template = &UE_list->UE_template[CC_id][UE_id];
        frame_parms = mac_xface->get_lte_frame_parms(module_idP,CC_id);
        total_remaining_rbs[CC_id]=frame_parms->N_RB_UL - first_rb[CC_id] - total_allocated_rbs[CC_id];

1064
        if (total_ue_count == 1 ) {
1065
          total_remaining_rbs[CC_id]+=1;