pre_processor.c 55.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The OpenAirInterface Software Alliance licenses this file to You under
 * the OAI Public License, Version 1.0  (the "License"); you may not use this file
 * except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.openairinterface.org/?page_id=698
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 *-------------------------------------------------------------------------------
 * For more information about the OpenAirInterface (OAI) Software Alliance:
 *      contact@openairinterface.org
 */
21 22

/*! \file pre_processor.c
23
 * \brief eNB scheduler preprocessing fuction prior to scheduling
24
 * \author Navid Nikaein and Ankit Bhamri
25
 * \date 2013 - 2014
26
 * \email navid.nikaein@eurecom.fr
27
 * \version 1.0
28 29 30 31
 * @ingroup _mac

 */

32 33 34
#define _GNU_SOURCE
#include <stdlib.h>

35
#include "assertions.h"
36 37 38 39 40 41 42
#include "PHY/defs.h"
#include "PHY/extern.h"

#include "SCHED/defs.h"
#include "SCHED/extern.h"

#include "LAYER2/MAC/defs.h"
43
#include "LAYER2/MAC/proto.h"
44 45
#include "LAYER2/MAC/extern.h"
#include "UTIL/LOG/log.h"
46
#include "UTIL/LOG/vcd_signal_dumper.h"
47 48 49 50 51
#include "UTIL/OPT/opt.h"
#include "OCG.h"
#include "OCG_extern.h"
#include "RRC/LITE/extern.h"
#include "RRC/L2_INTERFACE/openair_rrc_L2_interface.h"
52
#include "rlc.h"
53 54


55

56 57 58 59 60 61 62 63 64 65
#define DEBUG_eNB_SCHEDULER 1
#define DEBUG_HEADER_PARSING 1
//#define DEBUG_PACKET_TRACE 1

//#define ICIC 0

/*
  #ifndef USER_MODE
  #define msg debug_msg
  #endif
knopp's avatar
knopp committed
66
*/
67

68 69 70
/* this function checks that get_eNB_UE_stats returns
 * a non-NULL pointer for all CCs for a given UE
 */
71
int phy_stats_exist(module_id_t Mod_id, int rnti)
72 73 74 75 76 77 78
{
  int CC_id;
  for (CC_id = 0; CC_id < MAX_NUM_CCs; CC_id++)
    if (mac_xface->get_eNB_UE_stats(Mod_id, CC_id, rnti) == NULL)
      return 0;
  return 1;
}
79

80
// This function stores the downlink buffer for all the logical channels
81 82
void store_dlsch_buffer (module_id_t Mod_id,
                         frame_t     frameP,
83 84
                         sub_frame_t subframeP)
{
85

knopp's avatar
knopp committed
86
  int                   UE_id,i;
87
  rnti_t                rnti;
88
  mac_rlc_status_resp_t rlc_status;
knopp's avatar
knopp committed
89 90 91
  UE_list_t             *UE_list = &eNB_mac_inst[Mod_id].UE_list;
  UE_TEMPLATE           *UE_template;

92 93
  for (UE_id = 0; UE_id < NUMBER_OF_UE_MAX; UE_id++) {
    if (UE_list->active[UE_id] != TRUE) continue;
knopp's avatar
knopp committed
94 95

    UE_template = &UE_list->UE_template[UE_PCCID(Mod_id,UE_id)][UE_id];
96 97

    // clear logical channel interface variables
knopp's avatar
knopp committed
98 99
    UE_template->dl_buffer_total = 0;
    UE_template->dl_pdus_total = 0;
100 101

    for(i=0; i< MAX_NUM_LCID; i++) {
knopp's avatar
knopp committed
102 103 104 105 106
      UE_template->dl_buffer_info[i]=0;
      UE_template->dl_pdus_in_buffer[i]=0;
      UE_template->dl_buffer_head_sdu_creation_time[i]=0;
      UE_template->dl_buffer_head_sdu_remaining_size_to_send[i]=0;
    }
107

knopp's avatar
knopp committed
108
    rnti = UE_RNTI(Mod_id,UE_id);
109 110 111

    for(i=0; i< MAX_NUM_LCID; i++) { // loop over all the logical channels

112
      rlc_status = mac_rlc_status_ind(Mod_id,rnti, Mod_id,frameP,subframeP,ENB_FLAG_YES,MBMS_FLAG_NO,i,0 );
knopp's avatar
knopp committed
113 114 115
      UE_template->dl_buffer_info[i] = rlc_status.bytes_in_buffer; //storing the dlsch buffer for each logical channel
      UE_template->dl_pdus_in_buffer[i] = rlc_status.pdus_in_buffer;
      UE_template->dl_buffer_head_sdu_creation_time[i] = rlc_status.head_sdu_creation_time ;
116 117
      UE_template->dl_buffer_head_sdu_creation_time_max = cmax(UE_template->dl_buffer_head_sdu_creation_time_max,
          rlc_status.head_sdu_creation_time );
knopp's avatar
knopp committed
118 119
      UE_template->dl_buffer_head_sdu_remaining_size_to_send[i] = rlc_status.head_sdu_remaining_size_to_send;
      UE_template->dl_buffer_head_sdu_is_segmented[i] = rlc_status.head_sdu_is_segmented;
120 121
      UE_template->dl_buffer_total += UE_template->dl_buffer_info[i];//storing the total dlsch buffer
      UE_template->dl_pdus_total   += UE_template->dl_pdus_in_buffer[i];
122

123
#ifdef DEBUG_eNB_SCHEDULER
124

knopp's avatar
knopp committed
125 126 127 128
      /* note for dl_buffer_head_sdu_remaining_size_to_send[i] :
       * 0 if head SDU has not been segmented (yet), else remaining size not already segmented and sent
       */
      if (UE_template->dl_buffer_info[i]>0)
129 130
        LOG_D(MAC,
              "[eNB %d] Frame %d Subframe %d : RLC status for UE %d in LCID%d: total of %d pdus and size %d, head sdu queuing time %d, remaining size %d, is segmeneted %d \n",
131 132 133 134 135 136 137
              Mod_id, frameP, subframeP, UE_id,
              i, UE_template->dl_pdus_in_buffer[i],UE_template->dl_buffer_info[i],
              UE_template->dl_buffer_head_sdu_creation_time[i],
              UE_template->dl_buffer_head_sdu_remaining_size_to_send[i],
              UE_template->dl_buffer_head_sdu_is_segmented[i]
             );

138
#endif
139

knopp's avatar
knopp committed
140
    }
141

142
    //#ifdef DEBUG_eNB_SCHEDULER
knopp's avatar
knopp committed
143 144
    if ( UE_template->dl_buffer_total>0)
      LOG_D(MAC,"[eNB %d] Frame %d Subframe %d : RLC status for UE %d : total DL buffer size %d and total number of pdu %d \n",
145 146 147 148 149 150
            Mod_id, frameP, subframeP, UE_id,
            UE_template->dl_buffer_total,
            UE_template->dl_pdus_total
           );

    //#endif
151 152 153
  }
}

154

155
// This function returns the estimated number of RBs required by each UE for downlink scheduling
knopp's avatar
knopp committed
156
void assign_rbs_required (module_id_t Mod_id,
157 158 159 160 161
                          frame_t     frameP,
                          sub_frame_t subframe,
                          uint16_t    nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
                          int         min_rb_unit[MAX_NUM_CCs])
{
162

163

knopp's avatar
knopp committed
164 165 166 167 168
  rnti_t           rnti;
  uint16_t         TBS = 0;
  LTE_eNB_UE_stats *eNB_UE_stats[MAX_NUM_CCs];
  int              UE_id,n,i,j,CC_id,pCCid,tmp;
  UE_list_t        *UE_list = &eNB_mac_inst[Mod_id].UE_list;
169
  //  UE_TEMPLATE           *UE_template;
170
  LTE_DL_FRAME_PARMS   *frame_parms[MAX_NUM_CCs];
171

knopp's avatar
knopp committed
172
  // clear rb allocations across all CC_ids
173 174 175
  for (UE_id = 0; UE_id < NUMBER_OF_UE_MAX; UE_id++) {
    if (UE_list->active[UE_id] != TRUE) continue;

knopp's avatar
knopp committed
176
    pCCid = UE_PCCID(Mod_id,UE_id);
177
    rnti = UE_list->UE_template[pCCid][UE_id].rnti;
178

179
    /* skip UE not present in PHY (for any of its active CCs) */
180
    if (!phy_stats_exist(Mod_id, rnti))
181 182
      continue;

knopp's avatar
knopp committed
183
    //update CQI information across component carriers
184
    for (n=0; n<UE_list->numactiveCCs[UE_id]; n++) {
185

knopp's avatar
knopp committed
186
      CC_id = UE_list->ordered_CCids[n][UE_id];
187 188
      frame_parms[CC_id] = mac_xface->get_lte_frame_parms(Mod_id,CC_id);
      eNB_UE_stats[CC_id] = mac_xface->get_eNB_UE_stats(Mod_id,CC_id,rnti);
189
      /*
190 191
      DevCheck(((eNB_UE_stats[CC_id]->DL_cqi[0] < MIN_CQI_VALUE) || (eNB_UE_stats[CC_id]->DL_cqi[0] > MAX_CQI_VALUE)),
      eNB_UE_stats[CC_id]->DL_cqi[0], MIN_CQI_VALUE, MAX_CQI_VALUE);
192 193
      */
      eNB_UE_stats[CC_id]->dlsch_mcs1=cqi_to_mcs[eNB_UE_stats[CC_id]->DL_cqi[0]];
194

195
      eNB_UE_stats[CC_id]->dlsch_mcs1 = eNB_UE_stats[CC_id]->dlsch_mcs1;//cmin(eNB_UE_stats[CC_id]->dlsch_mcs1,openair_daq_vars.target_ue_dl_mcs);
196

knopp's avatar
knopp committed
197
    }
198

knopp's avatar
knopp committed
199
    // provide the list of CCs sorted according to MCS
200 201
    for (i=0; i<UE_list->numactiveCCs[UE_id]; i++) {
      for (j=i+1; j<UE_list->numactiveCCs[UE_id]; j++) {
202
        DevAssert( j < MAX_NUM_CCs );
203 204 205 206 207 208 209

        if (eNB_UE_stats[UE_list->ordered_CCids[i][UE_id]]->dlsch_mcs1 >
            eNB_UE_stats[UE_list->ordered_CCids[j][UE_id]]->dlsch_mcs1) {
          tmp = UE_list->ordered_CCids[i][UE_id];
          UE_list->ordered_CCids[i][UE_id] = UE_list->ordered_CCids[j][UE_id];
          UE_list->ordered_CCids[j][UE_id] = tmp;
        }
210
      }
knopp's avatar
knopp committed
211
    }
212

213
    /*
knopp's avatar
knopp committed
214
    if ((mac_get_rrc_status(Mod_id,1,UE_id) < RRC_RECONFIGURED)){  // If we still don't have a default radio bearer
215
      nb_rbs_required[pCCid][UE_id] = PHY_vars_eNB_g[Mod_id][pCCid]->frame_parms.N_RB_DL;
knopp's avatar
knopp committed
216 217
      continue;
    }
218 219
    */
    /* NN --> RK
220 221
     * check the index of UE_template"
     */
222 223
    //    if (UE_list->UE_template[UE_id]->dl_buffer_total> 0) {
    if (UE_list->UE_template[pCCid][UE_id].dl_buffer_total> 0) {
224
      LOG_D(MAC,"[preprocessor] assign RB for UE %d\n",UE_id);
225 226 227 228 229 230

      for (i=0; i<UE_list->numactiveCCs[UE_id]; i++) {
        CC_id = UE_list->ordered_CCids[i][UE_id];
        frame_parms[CC_id] = mac_xface->get_lte_frame_parms(Mod_id,CC_id);
        eNB_UE_stats[CC_id] = mac_xface->get_eNB_UE_stats(Mod_id,CC_id,rnti);

231 232 233 234 235
        if (eNB_UE_stats[CC_id]->dlsch_mcs1==0) {
          nb_rbs_required[CC_id][UE_id] = 4;  // don't let the TBS get too small
        } else {
          nb_rbs_required[CC_id][UE_id] = min_rb_unit[CC_id];
        }
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257

        TBS = mac_xface->get_TBS_DL(eNB_UE_stats[CC_id]->dlsch_mcs1,nb_rbs_required[CC_id][UE_id]);

        LOG_D(MAC,"[preprocessor] start RB assignement for UE %d CC_id %d dl buffer %d (RB unit %d, MCS %d, TBS %d) \n",
              UE_id, CC_id, UE_list->UE_template[pCCid][UE_id].dl_buffer_total,
              nb_rbs_required[CC_id][UE_id],eNB_UE_stats[CC_id]->dlsch_mcs1,TBS);

        /* calculating required number of RBs for each UE */
        while (TBS < UE_list->UE_template[pCCid][UE_id].dl_buffer_total)  {
          nb_rbs_required[CC_id][UE_id] += min_rb_unit[CC_id];

          if (nb_rbs_required[CC_id][UE_id] > frame_parms[CC_id]->N_RB_DL) {
            TBS = mac_xface->get_TBS_DL(eNB_UE_stats[CC_id]->dlsch_mcs1,frame_parms[CC_id]->N_RB_DL);
            nb_rbs_required[CC_id][UE_id] = frame_parms[CC_id]->N_RB_DL;
            break;
          }

          TBS = mac_xface->get_TBS_DL(eNB_UE_stats[CC_id]->dlsch_mcs1,nb_rbs_required[CC_id][UE_id]);
        } // end of while

        LOG_D(MAC,"[eNB %d] Frame %d: UE %d on CC %d: RB unit %d,  nb_required RB %d (TBS %d, mcs %d)\n",
              Mod_id, frameP,UE_id, CC_id,  min_rb_unit[CC_id], nb_rbs_required[CC_id][UE_id], TBS, eNB_UE_stats[CC_id]->dlsch_mcs1);
knopp's avatar
knopp committed
258 259 260 261
      }
    }
  }
}
262 263


knopp's avatar
knopp committed
264
// This function scans all CC_ids for a particular UE to find the maximum round index of its HARQ processes
265

266 267
int maxround(module_id_t Mod_id,uint16_t rnti,int frame,sub_frame_t subframe,uint8_t ul_flag )
{
268

269
  uint8_t round,round_max=0,UE_id;
knopp's avatar
knopp committed
270
  int CC_id;
271
  UE_list_t *UE_list = &eNB_mac_inst[Mod_id].UE_list;
272

273 274
  for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {

275 276
    UE_id = find_UE_id(Mod_id,rnti);
    round    = UE_list->UE_sched_ctrl[UE_id].round[CC_id];
277
    if (round > round_max) {
knopp's avatar
knopp committed
278
      round_max = round;
279
    }
280 281
  }

282
  return round_max;
knopp's avatar
knopp committed
283
}
284

knopp's avatar
knopp committed
285
// This function scans all CC_ids for a particular UE to find the maximum DL CQI
286
// it returns -1 if the UE is not found in PHY layer (get_eNB_UE_stats gives NULL)
287 288
int maxcqi(module_id_t Mod_id,int32_t UE_id)
{
289

knopp's avatar
knopp committed
290
  LTE_eNB_UE_stats *eNB_UE_stats = NULL;
291 292
  UE_list_t *UE_list = &eNB_mac_inst[Mod_id].UE_list;
  int CC_id,n;
knopp's avatar
knopp committed
293
  int CQI = 0;
294

295
  for (n=0; n<UE_list->numactiveCCs[UE_id]; n++) {
296 297
    CC_id = UE_list->ordered_CCids[n][UE_id];
    eNB_UE_stats = mac_xface->get_eNB_UE_stats(Mod_id,CC_id,UE_RNTI(Mod_id,UE_id));
298

299
    if (eNB_UE_stats==NULL) {
300 301 302
      /* the UE may have been removed in the PHY layer, don't exit */
      //mac_xface->macphy_exit("maxcqi: could not get eNB_UE_stats\n");
      return -1;
303
    }
304

305
    if (eNB_UE_stats->DL_cqi[0] > CQI) {
knopp's avatar
knopp committed
306
      CQI = eNB_UE_stats->DL_cqi[0];
307
    }
308
  }
309

knopp's avatar
knopp committed
310 311
  return(CQI);
}
312

313 314 315 316 317
struct sort_ue_dl_params {
  int Mod_idP;
  int frameP;
  int subframeP;
};
318

319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
static int ue_dl_compare(const void *_a, const void *_b, void *_params)
{
  struct sort_ue_dl_params *params = _params;
  UE_list_t *UE_list = &eNB_mac_inst[params->Mod_idP].UE_list;

  int UE_id1 = *(const int *)_a;
  int UE_id2 = *(const int *)_b;

  int rnti1  = UE_RNTI(params->Mod_idP, UE_id1);
  int pCC_id1 = UE_PCCID(params->Mod_idP, UE_id1);
  int round1 = maxround(params->Mod_idP, rnti1, params->frameP, params->subframeP, 1);

  int rnti2  = UE_RNTI(params->Mod_idP, UE_id2);
  int pCC_id2 = UE_PCCID(params->Mod_idP, UE_id2);
  int round2 = maxround(params->Mod_idP, rnti2, params->frameP, params->subframeP, 1);

  int cqi1 = maxcqi(params->Mod_idP, UE_id1);
  int cqi2 = maxcqi(params->Mod_idP, UE_id2);

  if (round1 > round2) return -1;
  if (round1 < round2) return 1;

  if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[1] + UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[2] >
      UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[1] + UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[2])
    return -1;
  if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[1] + UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[2] <
      UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[1] + UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[2])
    return 1;

  if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_head_sdu_creation_time_max >
      UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_head_sdu_creation_time_max)
    return -1;
  if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_head_sdu_creation_time_max <
      UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_head_sdu_creation_time_max)
    return 1;

  if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_total >
      UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_total)
    return -1;
  if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_total <
      UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_total)
    return 1;

  if (cqi1 > cqi2) return -1;
  if (cqi1 < cqi2) return 1;

  return 0;
#if 0
  /* The above order derives from the following.  */
      if(round2 > round1) { // Check first if one of the UEs has an active HARQ process which needs service and swap order
        swap_UEs(UE_list,UE_id1,UE_id2,0);
      } else if (round2 == round1) {
        // RK->NN : I guess this is for fairness in the scheduling. This doesn't make sense unless all UEs have the same configuration of logical channels.  This should be done on the sum of all information that has to be sent.  And still it wouldn't ensure fairness.  It should be based on throughput seen by each UE or maybe using the head_sdu_creation_time, i.e. swap UEs if one is waiting longer for service.
        //  for(j=0;j<MAX_NUM_LCID;j++){
        //    if (eNB_mac_inst[Mod_id][pCC_id1].UE_template[UE_id1].dl_buffer_info[j] <
        //      eNB_mac_inst[Mod_id][pCC_id2].UE_template[UE_id2].dl_buffer_info[j]){

        // first check the buffer status for SRB1 and SRB2
377

378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
        if ( (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[1] + UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[2]) <
             (UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[1] + UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[2])   ) {
          swap_UEs(UE_list,UE_id1,UE_id2,0);
        } else if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_head_sdu_creation_time_max <
                   UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_head_sdu_creation_time_max   ) {
          swap_UEs(UE_list,UE_id1,UE_id2,0);
        } else if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_total <
                   UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_total   ) {
          swap_UEs(UE_list,UE_id1,UE_id2,0);
        } else if (cqi1 < cqi2) {
          swap_UEs(UE_list,UE_id1,UE_id2,0);
        }
      }
#endif
}
393

knopp's avatar
knopp committed
394
// This fuction sorts the UE in order their dlsch buffer and CQI
knopp's avatar
knopp committed
395
void sort_UEs (module_id_t Mod_idP,
396 397 398
               int         frameP,
               sub_frame_t subframeP)
{
399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
  int               i;
  int               list[NUMBER_OF_UE_MAX];
  int               list_size = 0;
  int               rnti;
  struct sort_ue_dl_params params = { Mod_idP, frameP, subframeP };

  UE_list_t *UE_list = &eNB_mac_inst[Mod_idP].UE_list;

  for (i = 0; i < NUMBER_OF_UE_MAX; i++) {
    rnti = UE_RNTI(Mod_idP, i);
    if (rnti == NOT_A_RNTI)
      continue;
    if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1)
      continue;
    if (!phy_stats_exist(Mod_idP, rnti))
      continue;
    list[list_size] = i;
    list_size++;
  }

  qsort_r(list, list_size, sizeof(int), ue_dl_compare, &params);

  if (list_size) {
    for (i = 0; i < list_size-1; i++)
      UE_list->next[list[i]] = list[i+1];
    UE_list->next[list[list_size-1]] = -1;
    UE_list->head = list[0];
  } else {
    UE_list->head = -1;
  }

#if 0
431 432


knopp's avatar
knopp committed
433 434 435
  int               UE_id1,UE_id2;
  int               pCC_id1,pCC_id2;
  int               cqi1,cqi2,round1,round2;
436
  int               i=0,ii=0;//,j=0;
knopp's avatar
knopp committed
437
  rnti_t            rnti1,rnti2;
438

knopp's avatar
knopp committed
439
  UE_list_t *UE_list = &eNB_mac_inst[Mod_idP].UE_list;
440

441
  for (i=UE_list->head; i>=0; i=UE_list->next[i]) {
442

443
    for(ii=UE_list->next[i]; ii>=0; ii=UE_list->next[ii]) {
444

445 446
      UE_id1  = i;
      rnti1 = UE_RNTI(Mod_idP,UE_id1);
447 448
      if(rnti1 == NOT_A_RNTI)
	continue;
449 450
      if (UE_list->UE_sched_ctrl[UE_id1].ul_out_of_sync == 1)
	continue;
451 452
      if (!phy_stats_exist(Mod_idP, rnti1))
        continue;
453 454 455
      pCC_id1 = UE_PCCID(Mod_idP,UE_id1);
      cqi1    = maxcqi(Mod_idP,UE_id1); //
      round1  = maxround(Mod_idP,rnti1,frameP,subframeP,0);
456

457 458
      UE_id2 = ii;
      rnti2 = UE_RNTI(Mod_idP,UE_id2);
459 460
      if(rnti2 == NOT_A_RNTI)
        continue;
461 462
      if (UE_list->UE_sched_ctrl[UE_id2].ul_out_of_sync == 1)
	continue;
463 464
      if (!phy_stats_exist(Mod_idP, rnti2))
        continue;
465
      cqi2    = maxcqi(Mod_idP,UE_id2);
466
      round2  = maxround(Mod_idP,rnti2,frameP,subframeP,0);  //mac_xface->get_ue_active_harq_pid(Mod_id,rnti2,subframe,&harq_pid2,&round2,0);
knopp's avatar
knopp committed
467
      pCC_id2 = UE_PCCID(Mod_idP,UE_id2);
468

469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
      if(round2 > round1) { // Check first if one of the UEs has an active HARQ process which needs service and swap order
        swap_UEs(UE_list,UE_id1,UE_id2,0);
      } else if (round2 == round1) {
        // RK->NN : I guess this is for fairness in the scheduling. This doesn't make sense unless all UEs have the same configuration of logical channels.  This should be done on the sum of all information that has to be sent.  And still it wouldn't ensure fairness.  It should be based on throughput seen by each UE or maybe using the head_sdu_creation_time, i.e. swap UEs if one is waiting longer for service.
        //  for(j=0;j<MAX_NUM_LCID;j++){
        //    if (eNB_mac_inst[Mod_id][pCC_id1].UE_template[UE_id1].dl_buffer_info[j] <
        //      eNB_mac_inst[Mod_id][pCC_id2].UE_template[UE_id2].dl_buffer_info[j]){

        // first check the buffer status for SRB1 and SRB2

        if ( (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[1] + UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[2]) <
             (UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[1] + UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[2])   ) {
          swap_UEs(UE_list,UE_id1,UE_id2,0);
        } else if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_head_sdu_creation_time_max <
                   UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_head_sdu_creation_time_max   ) {
          swap_UEs(UE_list,UE_id1,UE_id2,0);
        } else if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_total <
                   UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_total   ) {
          swap_UEs(UE_list,UE_id1,UE_id2,0);
        } else if (cqi1 < cqi2) {
          swap_UEs(UE_list,UE_id1,UE_id2,0);
        }
knopp's avatar
knopp committed
491 492
      }
    }
493
  }
494
#endif
495 496
}

497

knopp's avatar
knopp committed
498 499


500
// This function assigns pre-available RBS to each UE in specified sub-bands before scheduling is done
knopp's avatar
knopp committed
501
void dlsch_scheduler_pre_processor (module_id_t   Mod_id,
502 503 504 505 506
                                    frame_t       frameP,
                                    sub_frame_t   subframeP,
                                    int           N_RBG[MAX_NUM_CCs],
                                    int           *mbsfn_flag)
{
knopp's avatar
knopp committed
507

508
  unsigned char rballoc_sub[MAX_NUM_CCs][N_RBG_MAX],harq_pid=0,round=0,total_ue_count;
knopp's avatar
knopp committed
509
  unsigned char MIMO_mode_indicator[MAX_NUM_CCs][N_RBG_MAX];
510
  int                     UE_id, i; 
511
  uint16_t                ii,j;
knopp's avatar
knopp committed
512 513 514
  uint16_t                nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX];
  uint16_t                nb_rbs_required_remaining[MAX_NUM_CCs][NUMBER_OF_UE_MAX];
  uint16_t                nb_rbs_required_remaining_1[MAX_NUM_CCs][NUMBER_OF_UE_MAX];
515
  uint16_t                average_rbs_per_user[MAX_NUM_CCs] = {0};
516
  rnti_t             rnti;
knopp's avatar
knopp committed
517
  int                min_rb_unit[MAX_NUM_CCs];
518
  uint16_t r1=0;
519
  uint8_t CC_id;
knopp's avatar
knopp committed
520
  UE_list_t *UE_list = &eNB_mac_inst[Mod_id].UE_list;
521
  LTE_DL_FRAME_PARMS   *frame_parms[MAX_NUM_CCs] = {0};
522

523
  int transmission_mode = 0;
524 525 526 527 528 529 530 531 532 533 534 535 536
  UE_sched_ctrl *ue_sched_ctl;
  //  int rrc_status           = RRC_IDLE;

#ifdef TM5
  int harq_pid1=0,harq_pid2=0;
  int round1=0,round2=0;
  int UE_id2;
  uint16_t                i1,i2,i3;
  rnti_t             rnti1,rnti2;
  LTE_eNB_UE_stats  *eNB_UE_stats1 = NULL;
  LTE_eNB_UE_stats  *eNB_UE_stats2 = NULL;
  UE_sched_ctrl *ue_sched_ctl1,*ue_sched_ctl2;
#endif
537 538

  for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
knopp's avatar
knopp committed
539 540 541

    if (mbsfn_flag[CC_id]>0)  // If this CC is allocated for MBSFN skip it here
      continue;
542 543 544

    frame_parms[CC_id] = mac_xface->get_lte_frame_parms(Mod_id,CC_id);

knopp's avatar
knopp committed
545

546
    min_rb_unit[CC_id]=get_min_rb_unit(Mod_id,CC_id);
547

548 549 550
    for (i = 0; i < NUMBER_OF_UE_MAX; i++) {
      if (UE_list->active[i] != TRUE) continue;

knopp's avatar
knopp committed
551
      UE_id = i;
552
      // Initialize scheduling information for all active UEs
553 554
      

555

556
      dlsch_scheduler_pre_processor_reset(Mod_id,
557 558
        UE_id,
        CC_id,
559 560
        frameP,
        subframeP,
561 562 563 564 565
        N_RBG[CC_id],
        nb_rbs_required,
        nb_rbs_required_remaining,
        rballoc_sub,
        MIMO_mode_indicator);
566

knopp's avatar
knopp committed
567
    }
568
  }
knopp's avatar
knopp committed
569 570


571
  // Store the DLSCH buffer for each logical channel
572
  store_dlsch_buffer (Mod_id,frameP,subframeP);
573

knopp's avatar
knopp committed
574 575


576
  // Calculate the number of RBs required by each UE on the basis of logical channel's buffer
knopp's avatar
knopp committed
577
  assign_rbs_required (Mod_id,frameP,subframeP,nb_rbs_required,min_rb_unit);
578

knopp's avatar
knopp committed
579 580


581
  // Sorts the user on the basis of dlsch logical channel buffer and CQI
knopp's avatar
knopp committed
582 583 584
  sort_UEs (Mod_id,frameP,subframeP);


knopp's avatar
knopp committed
585

586
  total_ue_count =0;
587

knopp's avatar
knopp committed
588
  // loop over all active UEs
589
  for (i=UE_list->head; i>=0; i=UE_list->next[i]) {
590
    rnti = UE_RNTI(Mod_id,i);
591

592
    if(rnti == NOT_A_RNTI)
knopp's avatar
knopp committed
593
      continue;
594 595
    if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1)
      continue;
596
    if (!phy_stats_exist(Mod_id, rnti))
597
      continue;
598
    UE_id = i;
599

600
    for (ii=0; ii<UE_num_active_CC(UE_list,UE_id); ii++) {
knopp's avatar
knopp committed
601
      CC_id = UE_list->ordered_CCids[ii][UE_id];
602 603 604
      ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
      harq_pid = ue_sched_ctl->harq_pid[CC_id];
      round    = ue_sched_ctl->round[CC_id];
knopp's avatar
knopp committed
605

606 607 608 609
      // if there is no available harq_process, skip the UE
      if (UE_list->UE_sched_ctrl[UE_id].harq_pid[CC_id]<0)
        continue;

knopp's avatar
knopp committed
610 611
      average_rbs_per_user[CC_id]=0;

612
      frame_parms[CC_id] = mac_xface->get_lte_frame_parms(Mod_id,CC_id);
knopp's avatar
knopp committed
613

614
      //      mac_xface->get_ue_active_harq_pid(Mod_id,CC_id,rnti,frameP,subframeP,&harq_pid,&round,0);
615

616
      if(round>0) {
617
        nb_rbs_required[CC_id][UE_id] = UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid];
618
      }
619

knopp's avatar
knopp committed
620 621
      //nb_rbs_required_remaining[UE_id] = nb_rbs_required[UE_id];
      if (nb_rbs_required[CC_id][UE_id] > 0) {
622
        total_ue_count = total_ue_count + 1;
knopp's avatar
knopp committed
623
      }
624 625 626 627 628 629 630 631 632 633 634 635


      // hypotetical assignement
      /*
       * If schedule is enabled and if the priority of the UEs is modified
       * The average rbs per logical channel per user will depend on the level of
       * priority. Concerning the hypothetical assignement, we should assign more
       * rbs to prioritized users. Maybe, we can do a mapping between the
       * average rbs per user and the level of priority or multiply the average rbs
       * per user by a coefficient which represents the degree of priority.
       */

636
      if (total_ue_count == 0) {
637
        average_rbs_per_user[CC_id] = 0;
638
      } else if( (min_rb_unit[CC_id] * total_ue_count) <= (frame_parms[CC_id]->N_RB_DL) ) {
639
        average_rbs_per_user[CC_id] = (uint16_t) floor(frame_parms[CC_id]->N_RB_DL/total_ue_count);
640
      } else {
641
        average_rbs_per_user[CC_id] = min_rb_unit[CC_id]; // consider the total number of use that can be scheduled UE
642
      }
knopp's avatar
knopp committed
643 644
    }
  }
645

646 647
  // note: nb_rbs_required is assigned according to total_buffer_dl
  // extend nb_rbs_required to capture per LCID RB required
648
  for(i=UE_list->head; i>=0; i=UE_list->next[i]) {
649
    rnti = UE_RNTI(Mod_id,i);
650

651 652 653 654 655 656 657
    if(rnti == NOT_A_RNTI)
      continue;
    if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1)
      continue;
    if (!phy_stats_exist(Mod_id, rnti))
      continue;

658
    for (ii=0; ii<UE_num_active_CC(UE_list,i); ii++) {
659
      CC_id = UE_list->ordered_CCids[ii][i];
660

661
      // control channel
662
      if (mac_eNB_get_rrc_status(Mod_id,rnti) < RRC_RECONFIGURED) {
663
        nb_rbs_required_remaining_1[CC_id][i] = nb_rbs_required[CC_id][i];
664
      } else {
665 666
        nb_rbs_required_remaining_1[CC_id][i] = cmin(average_rbs_per_user[CC_id],nb_rbs_required[CC_id][i]);

667
      }
knopp's avatar
knopp committed
668
    }
669
  }
670

671
  //Allocation to UEs is done in 2 rounds,
672 673
  // 1st stage: average number of RBs allocated to each UE
  // 2nd stage: remaining RBs are allocated to high priority UEs
674 675 676 677 678 679
  for(r1=0; r1<2; r1++) {

    for(i=UE_list->head; i>=0; i=UE_list->next[i]) {
      for (ii=0; ii<UE_num_active_CC(UE_list,i); ii++) {
        CC_id = UE_list->ordered_CCids[ii][i];

680
        if(r1 == 0) {
681
          nb_rbs_required_remaining[CC_id][i] = nb_rbs_required_remaining_1[CC_id][i];
682
        } else { // rb required based only on the buffer - rb allloctaed in the 1st round + extra reaming rb form the 1st round
683
          nb_rbs_required_remaining[CC_id][i] = nb_rbs_required[CC_id][i]-nb_rbs_required_remaining_1[CC_id][i]+nb_rbs_required_remaining[CC_id][i];
684
        }
685 686 687 688 689 690 691

        if (nb_rbs_required[CC_id][i]> 0 )
          LOG_D(MAC,"round %d : nb_rbs_required_remaining[%d][%d]= %d (remaining_1 %d, required %d,  pre_nb_available_rbs %d, N_RBG %d, rb_unit %d)\n",
                r1, CC_id, i,
                nb_rbs_required_remaining[CC_id][i],
                nb_rbs_required_remaining_1[CC_id][i],
                nb_rbs_required[CC_id][i],
692
                UE_list->UE_sched_ctrl[i].pre_nb_available_rbs[CC_id],
693 694 695
                N_RBG[CC_id],
                min_rb_unit[CC_id]);

696
      }
knopp's avatar
knopp committed
697
    }
698

699
    if (total_ue_count > 0 ) {
700 701 702 703 704
      for(i=UE_list->head; i>=0; i=UE_list->next[i]) {
        UE_id = i;

        for (ii=0; ii<UE_num_active_CC(UE_list,UE_id); ii++) {
          CC_id = UE_list->ordered_CCids[ii][UE_id];
705 706 707
	  ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
	  harq_pid = ue_sched_ctl->harq_pid[CC_id];
	  round    = ue_sched_ctl->round[CC_id];
708 709 710 711

          rnti = UE_RNTI(Mod_id,UE_id);

          // LOG_D(MAC,"UE %d rnti 0x\n", UE_id, rnti );
712
          if(rnti == NOT_A_RNTI)
713
            continue;
714 715
	  if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1)
	    continue;
716 717
          if (!phy_stats_exist(Mod_id, rnti))
            continue;
718 719

          transmission_mode = mac_xface->get_transmission_mode(Mod_id,CC_id,rnti);
720 721
	  //          mac_xface->get_ue_active_harq_pid(Mod_id,CC_id,rnti,frameP,subframeP,&harq_pid,&round,0);
          //rrc_status = mac_eNB_get_rrc_status(Mod_id,rnti);
722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739
          /* 1st allocate for the retx */

          // retransmission in data channels
          // control channel in the 1st transmission
          // data channel for all TM
          LOG_T(MAC,"calling dlsch_scheduler_pre_processor_allocate .. \n ");
          dlsch_scheduler_pre_processor_allocate (Mod_id,
                                                  UE_id,
                                                  CC_id,
                                                  N_RBG[CC_id],
                                                  transmission_mode,
                                                  min_rb_unit[CC_id],
                                                  frame_parms[CC_id]->N_RB_DL,
                                                  nb_rbs_required,
                                                  nb_rbs_required_remaining,
                                                  rballoc_sub,
                                                  MIMO_mode_indicator);

740
#ifdef TM5
741 742 743 744

          // data chanel TM5: to be revisted
          if ((round == 0 )  &&
              (transmission_mode == 5)  &&
745
              (ue_sched_ctl->dl_pow_off[CC_id] != 1)) {
746 747 748

            for(j=0; j<N_RBG[CC_id]; j+=2) {

749 750
              if( (((j == (N_RBG[CC_id]-1))&& (rballoc_sub[CC_id][j] == 0) && (ue_sched_ctl->rballoc_sub_UE[CC_id][j] == 0))  ||
                   ((j < (N_RBG[CC_id]-1)) && (rballoc_sub[CC_id][j+1] == 0) && (ue_sched_ctl->rballoc_sub_UE[CC_id][j+1] == 0)) ) &&
751 752 753 754 755 756
                  (nb_rbs_required_remaining[CC_id][UE_id]>0)) {

                for (ii = UE_list->next[i+1]; ii >=0; ii=UE_list->next[ii]) {

                  UE_id2 = ii;
                  rnti2 = UE_RNTI(Mod_id,UE_id2);
757 758 759
		  ue_sched_ctl2 = &UE_list->UE_sched_ctrl[UE_id2];
		  harq_pid2 = ue_sched_ctl2->harq_pid[CC_id];
		  round2    = ue_sched_ctl2->round[CC_id];
760
                  if(rnti2 == NOT_A_RNTI)
761
                    continue;
762 763
		  if (UE_list->UE_sched_ctrl[UE_id2].ul_out_of_sync == 1)
		    continue;
764 765
                  if (!phy_stats_exist(Mod_idP, rnti2))
                    continue;
766 767

                  eNB_UE_stats2 = mac_xface->get_eNB_UE_stats(Mod_id,CC_id,rnti2);
768
                  //mac_xface->get_ue_active_harq_pid(Mod_id,CC_id,rnti2,frameP,subframeP,&harq_pid2,&round2,0);
769

770
                  if ((mac_eNB_get_rrc_status(Mod_id,rnti2) >= RRC_RECONFIGURED) &&
771 772
                      (round2==0) &&
                      (mac_xface->get_transmission_mode(Mod_id,CC_id,rnti2)==5) &&
773
                      (ue_sched_ctl->dl_pow_off[CC_id] != 1)) {
774

775 776
                    if( (((j == (N_RBG[CC_id]-1)) && (ue_sched_ctl->rballoc_sub_UE[CC_id][j] == 0)) ||
                         ((j < (N_RBG[CC_id]-1)) && (ue_sched_ctl->rballoc_sub_UE[CC_id][j+1] == 0))  ) &&
777 778 779 780 781
                        (nb_rbs_required_remaining[CC_id][UE_id2]>0)) {

                      if((((eNB_UE_stats2->DL_pmi_single^eNB_UE_stats1->DL_pmi_single)<<(14-j))&0xc000)== 0x4000) { //MU-MIMO only for 25 RBs configuration

                        rballoc_sub[CC_id][j] = 1;
782 783
                        ue_sched_ctl->rballoc_sub_UE[CC_id][j] = 1;
                        ue_sched_ctl2->rballoc_sub_UE[CC_id][j] = 1;
784 785 786 787
                        MIMO_mode_indicator[CC_id][j] = 0;

                        if (j< N_RBG[CC_id]-1) {
                          rballoc_sub[CC_id][j+1] = 1;
788 789
                          ue_sched_ctl->rballoc_sub_UE[CC_id][j+1] = 1;
                          ue_sched_ctl2->rballoc_sub_UE[CC_id][j+1] = 1;
790 791 792
                          MIMO_mode_indicator[CC_id][j+1] = 0;
                        }

793 794
                        ue_sched_ctl->dl_pow_off[CC_id] = 0;
                        ue_sched_ctl2->dl_pow_off[CC_id] = 0;
795 796 797


                        if ((j == N_RBG[CC_id]-1) &&
798 799
                            ((PHY_vars_eNB_g[Mod_id][CC_id]->frame_parms.N_RB_DL == 25) ||
                             (PHY_vars_eNB_g[Mod_id][CC_id]->frame_parms.N_RB_DL == 50))) {
800
			  
801
                          nb_rbs_required_remaining[CC_id][UE_id] = nb_rbs_required_remaining[CC_id][UE_id] - min_rb_unit[CC_id]+1;
802
                          ue_sched_ctl->pre_nb_available_rbs[CC_id] = ue_sched_ctl->pre_nb_available_rbs[CC_id] + min_rb_unit[CC_id]-1;
803
                          nb_rbs_required_remaining[CC_id][UE_id2] = nb_rbs_required_remaining[CC_id][UE_id2] - min_rb_unit[CC_id]+1;
804
                          ue_sched_ctl2->pre_nb_available_rbs[CC_id] = ue_sched_ctl2->pre_nb_available_rbs[CC_id] + min_rb_unit[CC_id]-1;
805
                        } else {
806 807
                          
			  nb_rbs_required_remaining[CC_id][UE_id] = nb_rbs_required_remaining[CC_id][UE_id] - 4;
808
                          ue_sched_ctl->pre_nb_available_rbs[CC_id] = ue_sched_ctl->pre_nb_available_rbs[CC_id] + 4;
809
                          nb_rbs_required_remaining[CC_id][UE_id2] = nb_rbs_required_remaining[CC_id][UE_id2] - 4;
810
                          ue_sched_ctl2->pre_nb_available_rbs[CC_id] = ue_sched_ctl2->pre_nb_available_rbs[CC_id] + 4;
811 812 813 814 815 816 817 818 819 820 821 822 823
                        }

                        break;
                      }
                    }
                  }
                }
              }
            }
          }

#endif
        }
knopp's avatar
knopp committed
824
      }
825
    } // total_ue_count
826
  } // end of for for r1 and r2
827 828 829

#ifdef TM5

knopp's avatar
knopp committed
830
  // This has to be revisited!!!!
831
  for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
knopp's avatar
knopp committed
832 833 834
    i1=0;
    i2=0;
    i3=0;
835 836

    for (j=0; j<N_RBG[CC_id]; j++) {
837
      if(MIMO_mode_indicator[CC_id][j] == 2) {
838
        i1 = i1+1;
839
      } else if(MIMO_mode_indicator[CC_id][j] == 1) {
840
        i2 = i2+1;
841
      } else if(MIMO_mode_indicator[CC_id][j] == 0) {
842
        i3 = i3+1;
843
      }
knopp's avatar
knopp committed
844
    }
845

846
    if((i1 < N_RBG[CC_id]) && (i2>0) && (i3==0)) {
knopp's avatar
knopp committed
847
      PHY_vars_eNB_g[Mod_id][CC_id]->check_for_SUMIMO_transmissions = PHY_vars_eNB_g[Mod_id][CC_id]->check_for_SUMIMO_transmissions + 1;
848
    }
849

850
    if(i3 == N_RBG[CC_id] && i1==0 && i2==0) {
knopp's avatar
knopp committed
851
      PHY_vars_eNB_g[Mod_id][CC_id]->FULL_MUMIMO_transmissions = PHY_vars_eNB_g[Mod_id][CC_id]->FULL_MUMIMO_transmissions + 1;
852
    }
853

854
    if((i1 < N_RBG[CC_id]) && (i3 > 0)) {
knopp's avatar
knopp committed
855
      PHY_vars_eNB_g[Mod_id][CC_id]->check_for_MUMIMO_transmissions = PHY_vars_eNB_g[Mod_id][CC_id]->check_for_MUMIMO_transmissions + 1;
856
    }
857

knopp's avatar
knopp committed
858
    PHY_vars_eNB_g[Mod_id][CC_id]->check_for_total_transmissions = PHY_vars_eNB_g[Mod_id][CC_id]->check_for_total_transmissions + 1;
859

860 861
  }

862 863 864
#endif

  for(i=UE_list->head; i>=0; i=UE_list->next[i]) {
865
    UE_id = i;
866
    ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
867 868

    for (ii=0; ii<UE_num_active_CC(UE_list,UE_id); ii++) {
knopp's avatar
knopp committed
869
      CC_id = UE_list->ordered_CCids[ii][UE_id];
870
      //PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].dl_pow_off = dl_pow_off[UE_id];
871

872
      if (ue_sched_ctl->pre_nb_available_rbs[CC_id] > 0 ) {
873
        LOG_D(MAC,"******************DL Scheduling Information for UE%d ************************\n",UE_id);
874
        LOG_D(MAC,"dl power offset UE%d = %d \n",UE_id,ue_sched_ctl->dl_pow_off[CC_id]);
875 876 877 878
        LOG_D(MAC,"***********RB Alloc for every subband for UE%d ***********\n",UE_id);

        for(j=0; j<N_RBG[CC_id]; j++) {
          //PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].rballoc_sub[i] = rballoc_sub_UE[CC_id][UE_id][i];
879
          LOG_D(MAC,"RB Alloc for UE%d and Subband%d = %d\n",UE_id,j,ue_sched_ctl->rballoc_sub_UE[CC_id][j]);
880 881 882
        }

        //PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].pre_nb_available_rbs = pre_nb_available_rbs[CC_id][UE_id];
883
        LOG_D(MAC,"Total RBs allocated for UE%d = %d\n",UE_id,ue_sched_ctl->pre_nb_available_rbs[CC_id]);
884
      }
knopp's avatar
knopp committed
885
    }
886 887 888
  }
}

889
#define SF05_LIMIT 1
890

891
void dlsch_scheduler_pre_processor_reset (int module_idP,
892 893 894 895 896 897 898 899 900 901
					  int UE_id,
					  uint8_t  CC_id,
					  int frameP,
					  int subframeP,					  
					  int N_RBG,
					  uint16_t nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
					  uint16_t nb_rbs_required_remaining[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
					  unsigned char rballoc_sub[MAX_NUM_CCs][N_RBG_MAX],
					  unsigned char MIMO_mode_indicator[MAX_NUM_CCs][N_RBG_MAX])
  
902
{
903
  int i,j;
904 905 906
  UE_list_t *UE_list=&eNB_mac_inst[module_idP].UE_list;
  UE_sched_ctrl *ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
  rnti_t rnti = UE_RNTI(module_idP,UE_id);
Cedric Roux's avatar
Cedric Roux committed
907
  uint8_t *vrb_map = eNB_mac_inst[module_idP].common_channels[CC_id].vrb_map;
908
  int RBGsize = PHY_vars_eNB_g[module_idP][CC_id]->frame_parms.N_RB_DL/N_RBG;
909
#ifdef SF05_LIMIT
910
  //int subframe05_limit=0;
911 912
  int sf05_upper=-1,sf05_lower=-1;
#endif
913
  LTE_eNB_UE_stats *eNB_UE_stats = mac_xface->get_eNB_UE_stats(module_idP,CC_id,rnti);
914 915
  if (eNB_UE_stats == NULL) return;

916
  // initialize harq_pid and round
917 918 919 920

  if (eNB_UE_stats == NULL)
    return;

921 922 923 924
  mac_xface->get_ue_active_harq_pid(module_idP,CC_id,rnti,
				    frameP,subframeP,
				    &ue_sched_ctl->harq_pid[CC_id],
				    &ue_sched_ctl->round[CC_id],
925
				    openair_harq_DL);
926
  if (ue_sched_ctl->ta_timer == 0) {
927

928 929 930
    // WE SHOULD PROTECT the eNB_UE_stats with a mutex here ...

    ue_sched_ctl->ta_timer = 20;  // wait 20 subframes before taking TA measurement from PHY
931
    switch (PHY_vars_eNB_g[module_idP][CC_id]->frame_parms.N_RB_DL) {
932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952
    case 6:
      ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update;
      break;
      
    case 15:
      ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/2;
      break;
      
    case 25:
      ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/4;
      break;
      
    case 50:
      ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/8;
      break;
      
    case 75:
      ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/12;
      break;
      
    case 100:
953 954 955 956
      if (PHY_vars_eNB_g[module_idP][CC_id]->frame_parms.threequarter_fs == 0)
	ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/16;
      else
	ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/12;
957 958 959 960 961 962 963 964 965
      break;
    }
    // clear the update in case PHY does not have a new measurement after timer expiry
    eNB_UE_stats->timing_advance_update =  0;
  }
  else {
    ue_sched_ctl->ta_timer--;
    ue_sched_ctl->ta_update =0; // don't trigger a timing advance command
  }
966 967 968
  if (UE_id==0) {
    VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_UE0_TIMING_ADVANCE,ue_sched_ctl->ta_update);
  }
969
  nb_rbs_required[CC_id][UE_id]=0;
970 971
  ue_sched_ctl->pre_nb_available_rbs[CC_id] = 0;
  ue_sched_ctl->dl_pow_off[CC_id] = 2;
972
  nb_rbs_required_remaining[CC_id][UE_id] = 0;
973

974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997
#ifdef SF05_LIMIT  
  switch (N_RBG) {
  case 6:
    sf05_lower=0;
    sf05_upper=5;
    break;
  case 8:
    sf05_lower=2;
    sf05_upper=5;
    break;
  case 13:
    sf05_lower=4;
    sf05_upper=7;
    break;
  case 17:
    sf05_lower=7;
    sf05_upper=9;
    break;
  case 25:
    sf05_lower=11;
    sf05_upper=13;
    break;
  }
#endif
998
  // Initialize Subbands according to VRB map
999
  for (i=0; i<N_RBG; i++) {
1000
    ue_sched_ctl->rballoc_sub_UE[CC_id][i] = 0;
1001
    rballoc_sub[CC_id][i] = 0;
1002 1003 1004 1005 1006 1007 1008 1009
#ifdef SF05_LIMIT
    // for avoiding 6+ PRBs around DC in subframe 0-5 (avoid excessive errors)

    if ((subframeP==0 || subframeP==5) && 
	(i>=sf05_lower && i<=sf05_upper))
      rballoc_sub[CC_id][i]=1;
#endif
    // for SI-RNTI,RA-RNTI and P-RNTI allocations
1010
    for (j=0;j<RBGsize;j++) {
1011
      if (vrb_map[j+(i*RBGsize)]!=0)  {
1012
	rballoc_sub[CC_id][i] = 1;
1013
	LOG_D(MAC,"Frame %d, subframe %d : vrb %d allocated\n",frameP,subframeP,j+(i*RBGsize));
1014 1015 1016 1017
	break;
      }
    }
    LOG_D(MAC,"Frame %d Subframe %d CC_id %d RBG %i : rb_alloc %d\n",frameP,subframeP,CC_id,i,rballoc_sub[CC_id][i]);
1018
    MIMO_mode_indicator[CC_id][i] = 2;
1019 1020 1021 1022 1023
  }
}


void dlsch_scheduler_pre_processor_allocate (module_id_t   Mod_id,
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
    int           UE_id,
    uint8_t       CC_id,
    int           N_RBG,
    int           transmission_mode,
    int           min_rb_unit,
    uint8_t       N_RB_DL,
    uint16_t      nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
    uint16_t      nb_rbs_required_remaining[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
    unsigned char rballoc_sub[MAX_NUM_CCs][N_RBG_MAX],
    unsigned char MIMO_mode_indicator[MAX_NUM_CCs][N_RBG_MAX])
{

1036
  int i;
1037 1038
  UE_list_t *UE_list=&eNB_mac_inst[Mod_id].UE_list;
  UE_sched_ctrl *ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
1039 1040 1041 1042

  for(i=0; i<N_RBG; i++) {

    if((rballoc_sub[CC_id][i] == 0)           &&
1043
        (ue_sched_ctl->rballoc_sub_UE[CC_id][i] == 0) &&
1044
        (nb_rbs_required_remaining[CC_id][UE_id]>0)   &&
1045
        (ue_sched_ctl->pre_nb_available_rbs[CC_id] < nb_rbs_required[CC_id][UE_id])) {
1046

1047
      // if this UE is not scheduled for TM5
1048
      if (ue_sched_ctl->dl_pow_off[CC_id] != 0 )  {
1049

1050 1051
	if ((i == N_RBG-1) && ((N_RB_DL == 25) || (N_RB_DL == 50))) {
	  rballoc_sub[CC_id][i] = 1;
1052
	  ue_sched_ctl->rballoc_sub_UE[CC_id][i] = 1;
1053 1054
	  MIMO_mode_indicator[CC_id][i] = 1;
	  if (transmission_mode == 5 ) {
1055
	    ue_sched_ctl->dl_pow_off[CC_id] = 1;
1056 1057
	  }   
	  nb_rbs_required_remaining[CC_id][UE_id] = nb_rbs_required_remaining[CC_id][UE_id] - min_rb_unit+1;
1058
          ue_sched_ctl->pre_nb_available_rbs[CC_id] = ue_sched_ctl->pre_nb_available_rbs[CC_id] + min_rb_unit - 1;
1059
        } else {
1060 1061
	  if (nb_rbs_required_remaining[CC_id][UE_id] >=  min_rb_unit){
	    rballoc_sub[CC_id][i] = 1;
1062
	    ue_sched_ctl->rballoc_sub_UE[CC_id][i] = 1;
1063 1064
	    MIMO_mode_indicator[CC_id][i] = 1;
	    if (transmission_mode == 5 ) {
1065
	      ue_sched_ctl->dl_pow_off[CC_id] = 1;
1066 1067
	    }
	    nb_rbs_required_remaining[CC_id][UE_id] = nb_rbs_required_remaining[CC_id][UE_id] - min_rb_unit;
1068
	    ue_sched_ctl->pre_nb_available_rbs[CC_id] = ue_sched_ctl->pre_nb_available_rbs[CC_id] + min_rb_unit;
1069 1070
	  }
	}
1071 1072
      } // dl_pow_off[CC_id][UE_id] ! = 0
    }
1073 1074
  }

1075 1076 1077
}


1078
/// ULSCH PRE_PROCESSOR
1079

1080

1081
void ulsch_scheduler_pre_processor(module_id_t module_idP,
1082 1083
                                   int frameP,
                                   sub_frame_t subframeP,
1084
                                   uint16_t *first_rb)
1085
{
1086 1087 1088 1089 1090 1091 1092 1093

  int16_t            i;
  uint16_t           UE_id,n,r;
  uint8_t            CC_id, round, harq_pid;
  uint16_t           nb_allocated_rbs[MAX_NUM_CCs][NUMBER_OF_UE_MAX],total_allocated_rbs[MAX_NUM_CCs],average_rbs_per_user[MAX_NUM_CCs];
  int16_t            total_remaining_rbs[MAX_NUM_CCs];
  uint16_t           max_num_ue_to_be_scheduled=0,total_ue_count=0;
  rnti_t             rnti= -1;
1094
  UE_list_t          *UE_list = &eNB_mac_inst[module_idP].UE_list;
1095 1096
  UE_TEMPLATE        *UE_template = 0;
  LTE_DL_FRAME_PARMS   *frame_parms = 0;
1097

1098

1099
  //LOG_I(MAC,"assign max mcs min rb\n");
1100 1101
  // maximize MCS and then allocate required RB according to the buffer occupancy with the limit of max available UL RB
  assign_max_mcs_min_rb(module_idP,frameP, subframeP, first_rb);
1102

1103
  //LOG_I(MAC,"sort ue \n");
1104
  // sort ues
1105 1106
  sort_ue_ul (module_idP,frameP, subframeP);

1107

1108 1109
  // we need to distribute RBs among UEs
  // step1:  reset the vars
1110
  for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
1111 1112 1113
    total_allocated_rbs[CC_id]=0;
    total_remaining_rbs[CC_id]=0;
    average_rbs_per_user[CC_id]=0;
1114 1115

    for (i=UE_list->head_ul; i>=0; i=UE_list->next_ul[i]) {
1116 1117 1118 1119
      nb_allocated_rbs[CC_id][i]=0;
    }
  }

1120
  //LOG_I(MAC,"step2 \n");
1121 1122 1123
  // step 2: calculate the average rb per UE
  total_ue_count =0;
  max_num_ue_to_be_scheduled=0;
1124 1125 1126 1127 1128

  for (i=UE_list->head_ul; i>=0; i=UE_list->next_ul[i]) {

    rnti = UE_RNTI(module_idP,i);

1129
    if (rnti==NOT_A_RNTI)
1130 1131
      continue;

1132 1133 1134
    if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1)
      continue;

1135 1136 1137
    if (!phy_stats_exist(module_idP, rnti))
      continue;

1138
    UE_id = i;
1139 1140

    for (n=0; n<UE_list->numactiveULCCs[UE_id]; n++) {
1141 1142 1143 1144
      // This is the actual CC_id in the list
      CC_id = UE_list->ordered_ULCCids[n][UE_id];
      UE_template = &UE_list->UE_template[CC_id][UE_id];
      average_rbs_per_user[CC_id]=0;
1145 1146
      frame_parms = mac_xface->get_lte_frame_parms(module_idP,CC_id);

1147
      if (UE_template->pre_allocated_nb_rb_ul > 0) {
1148
        total_ue_count+=1;
1149
      }
1150 1151
      /*
      if((mac_xface->get_nCCE_max(module_idP,CC_id,3,subframeP) - nCCE_to_be_used[CC_id])  > (1<<aggregation)) {
1152 1153
        nCCE_to_be_used[CC_id] = nCCE_to_be_used[CC_id] + (1<<aggregation);
        max_num_ue_to_be_scheduled+=1;
1154 1155 1156
	}*/

      max_num_ue_to_be_scheduled+=1;
1157

1158
      if (total_ue_count == 0) {
1159
        average_rbs_per_user[CC_id] = 0;
1160
      } else if (total_ue_count == 1 ) { // increase the available RBs, special case,
1161
        average_rbs_per_user[CC_id] = frame_parms->N_RB_UL-first_rb[CC_id]+1;
1162
      } else if( (total_ue_count <= (frame_parms->N_RB_DL-first_rb[CC_id])) &&
1163
                 (total_ue_count <= max_num_ue_to_be_scheduled)) {
1164
        average_rbs_per_user[CC_id] = (uint16_t) floor((frame_parms->N_RB_UL-first_rb[CC_id])/total_ue_count);
1165
      } else if (max_num_ue_to_be_scheduled > 0 ) {
1166
        average_rbs_per_user[CC_id] = (uint16_t) floor((frame_parms->N_RB_UL-first_rb[CC_id])/max_num_ue_to_be_scheduled);
1167
      } else {
1168 1169 1170
        average_rbs_per_user[CC_id]=1;
        LOG_W(MAC,"[eNB %d] frame %d subframe %d: UE %d CC %d: can't get average rb per user (should not be here)\n",
              module_idP,frameP,subframeP,UE_id,CC_id);
1171 1172 1173
      }
    }
  }