pre_processor.c 55.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The OpenAirInterface Software Alliance licenses this file to You under
 * the OAI Public License, Version 1.0  (the "License"); you may not use this file
 * except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.openairinterface.org/?page_id=698
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 *-------------------------------------------------------------------------------
 * For more information about the OpenAirInterface (OAI) Software Alliance:
 *      contact@openairinterface.org
 */
21 22

/*! \file pre_processor.c
23
 * \brief eNB scheduler preprocessing fuction prior to scheduling
24
 * \author Navid Nikaein and Ankit Bhamri
nikaeinn's avatar
nikaeinn committed
25
 * \date 2013 - 2014
26
 * \email navid.nikaein@eurecom.fr
nikaeinn's avatar
nikaeinn committed
27
 * \version 1.0
28 29 30 31
 * @ingroup _mac

 */

32 33 34
#define _GNU_SOURCE
#include <stdlib.h>

35
#include "assertions.h"
36 37 38 39 40 41 42
#include "PHY/defs.h"
#include "PHY/extern.h"

#include "SCHED/defs.h"
#include "SCHED/extern.h"

#include "LAYER2/MAC/defs.h"
43
#include "LAYER2/MAC/proto.h"
44 45
#include "LAYER2/MAC/extern.h"
#include "UTIL/LOG/log.h"
46
#include "UTIL/LOG/vcd_signal_dumper.h"
47 48 49 50 51
#include "UTIL/OPT/opt.h"
#include "OCG.h"
#include "OCG_extern.h"
#include "RRC/LITE/extern.h"
#include "RRC/L2_INTERFACE/openair_rrc_L2_interface.h"
52
#include "rlc.h"
53 54


gauthier's avatar
gauthier committed
55

56 57 58 59 60 61 62 63 64 65
#define DEBUG_eNB_SCHEDULER 1
#define DEBUG_HEADER_PARSING 1
//#define DEBUG_PACKET_TRACE 1

//#define ICIC 0

/*
  #ifndef USER_MODE
  #define msg debug_msg
  #endif
66
*/
67

68
/* this function checks that get_eNB_UE_stats returns
Cedric Roux's avatar
Cedric Roux committed
69
 * a non-NULL pointer for all the active CCs of an UE
70
 */
71
/*
72
int phy_stats_exist(module_id_t Mod_id, int rnti)
73 74
{
  int CC_id;
Cedric Roux's avatar
Cedric Roux committed
75 76
  int i;
  int UE_id          = find_UE_id(Mod_id, rnti);
77
  UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
Cedric Roux's avatar
Cedric Roux committed
78 79 80 81 82 83 84 85 86 87 88 89
  if (UE_id == -1) {
    LOG_W(MAC, "[eNB %d] UE %x not found, should be there (in phy_stats_exist)\n",
	  Mod_id, rnti);
    return 0;
  }
  if (UE_list->numactiveCCs[UE_id] == 0) {
    LOG_W(MAC, "[eNB %d] UE %x has no active CC (in phy_stats_exist)\n",
	  Mod_id, rnti);
    return 0;
  }
  for (i = 0; i < UE_list->numactiveCCs[UE_id]; i++) {
    CC_id = UE_list->ordered_CCids[i][UE_id];
90 91
    if (mac_xface->get_eNB_UE_stats(Mod_id, CC_id, rnti) == NULL)
      return 0;
92
  }
93 94
  return 1;
}
95
*/
96

97
// This function stores the downlink buffer for all the logical channels
gauthier's avatar
gauthier committed
98 99
void store_dlsch_buffer (module_id_t Mod_id,
                         frame_t     frameP,
100 101
                         sub_frame_t subframeP)
{
Cedric Roux's avatar
Cedric Roux committed
102

103
  int                   UE_id,i;
gauthier's avatar
gauthier committed
104
  rnti_t                rnti;
105
  mac_rlc_status_resp_t rlc_status;
106
  UE_list_t             *UE_list = &RC.mac[Mod_id]->UE_list;
107 108
  UE_TEMPLATE           *UE_template;

109 110
  for (UE_id = 0; UE_id < NUMBER_OF_UE_MAX; UE_id++) {
    if (UE_list->active[UE_id] != TRUE) continue;
111 112

    UE_template = &UE_list->UE_template[UE_PCCID(Mod_id,UE_id)][UE_id];
113 114

    // clear logical channel interface variables
115 116
    UE_template->dl_buffer_total = 0;
    UE_template->dl_pdus_total = 0;
117 118

    for(i=0; i< MAX_NUM_LCID; i++) {
119 120 121 122 123
      UE_template->dl_buffer_info[i]=0;
      UE_template->dl_pdus_in_buffer[i]=0;
      UE_template->dl_buffer_head_sdu_creation_time[i]=0;
      UE_template->dl_buffer_head_sdu_remaining_size_to_send[i]=0;
    }
124

125
    rnti = UE_RNTI(Mod_id,UE_id);
126 127 128

    for(i=0; i< MAX_NUM_LCID; i++) { // loop over all the logical channels

129
      rlc_status = mac_rlc_status_ind(Mod_id,rnti, Mod_id,frameP,subframeP,ENB_FLAG_YES,MBMS_FLAG_NO,i,0 );
130 131 132
      UE_template->dl_buffer_info[i] = rlc_status.bytes_in_buffer; //storing the dlsch buffer for each logical channel
      UE_template->dl_pdus_in_buffer[i] = rlc_status.pdus_in_buffer;
      UE_template->dl_buffer_head_sdu_creation_time[i] = rlc_status.head_sdu_creation_time ;
133 134
      UE_template->dl_buffer_head_sdu_creation_time_max = cmax(UE_template->dl_buffer_head_sdu_creation_time_max,
          rlc_status.head_sdu_creation_time );
135 136
      UE_template->dl_buffer_head_sdu_remaining_size_to_send[i] = rlc_status.head_sdu_remaining_size_to_send;
      UE_template->dl_buffer_head_sdu_is_segmented[i] = rlc_status.head_sdu_is_segmented;
137 138
      UE_template->dl_buffer_total += UE_template->dl_buffer_info[i];//storing the total dlsch buffer
      UE_template->dl_pdus_total   += UE_template->dl_pdus_in_buffer[i];
139

Cedric Roux's avatar
Cedric Roux committed
140
#ifdef DEBUG_eNB_SCHEDULER
141

142 143 144 145
      /* note for dl_buffer_head_sdu_remaining_size_to_send[i] :
       * 0 if head SDU has not been segmented (yet), else remaining size not already segmented and sent
       */
      if (UE_template->dl_buffer_info[i]>0)
146 147
        LOG_D(MAC,
              "[eNB %d] Frame %d Subframe %d : RLC status for UE %d in LCID%d: total of %d pdus and size %d, head sdu queuing time %d, remaining size %d, is segmeneted %d \n",
148 149 150 151 152 153 154
              Mod_id, frameP, subframeP, UE_id,
              i, UE_template->dl_pdus_in_buffer[i],UE_template->dl_buffer_info[i],
              UE_template->dl_buffer_head_sdu_creation_time[i],
              UE_template->dl_buffer_head_sdu_remaining_size_to_send[i],
              UE_template->dl_buffer_head_sdu_is_segmented[i]
             );

Cedric Roux's avatar
Cedric Roux committed
155
#endif
156

157
    }
158

159
    //#ifdef DEBUG_eNB_SCHEDULER
160 161
    if ( UE_template->dl_buffer_total>0)
      LOG_D(MAC,"[eNB %d] Frame %d Subframe %d : RLC status for UE %d : total DL buffer size %d and total number of pdu %d \n",
162 163 164 165 166 167
            Mod_id, frameP, subframeP, UE_id,
            UE_template->dl_buffer_total,
            UE_template->dl_pdus_total
           );

    //#endif
168 169 170
  }
}

171

172
// This function returns the estimated number of RBs required by each UE for downlink scheduling
173
void assign_rbs_required (module_id_t Mod_id,
174 175 176 177 178
                          frame_t     frameP,
                          sub_frame_t subframe,
                          uint16_t    nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
                          int         min_rb_unit[MAX_NUM_CCs])
{
179

180
  uint16_t         TBS = 0;
181

182
  int              UE_id,n,i,j,CC_id,pCCid,tmp;
183
  UE_list_t        *UE_list = &RC.mac[Mod_id]->UE_list;
184
  eNB_UE_STATS     *eNB_UE_stats,*eNB_UE_stats_i,*eNB_UE_stats_j;
knopp's avatar
knopp committed
185
  int N_RB_DL;
186

knopp's avatar
knopp committed
187
  // clear rb allocations across all CC_id
188 189 190
  for (UE_id = 0; UE_id < NUMBER_OF_UE_MAX; UE_id++) {
    if (UE_list->active[UE_id] != TRUE) continue;

191
    pCCid = UE_PCCID(Mod_id,UE_id);
192

193
    //update CQI information across component carriers
194
    for (n=0; n<UE_list->numactiveCCs[UE_id]; n++) {
gauthier's avatar
gauthier committed
195

196
      CC_id = UE_list->ordered_CCids[n][UE_id];
197
      eNB_UE_stats = &UE_list->eNB_UE_stats[CC_id][UE_id];
198

Cedric Roux's avatar
Cedric Roux committed
199
      eNB_UE_stats->dlsch_mcs1=cqi_to_mcs[UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id]];
200

201
    }
202

203
    // provide the list of CCs sorted according to MCS
204
    for (i=0; i<UE_list->numactiveCCs[UE_id]; i++) {
205
      eNB_UE_stats_i = &UE_list->eNB_UE_stats[UE_list->ordered_CCids[i][UE_id]][UE_id];
206
      for (j=i+1; j<UE_list->numactiveCCs[UE_id]; j++) {
207
        DevAssert( j < MAX_NUM_CCs );
208 209 210
	eNB_UE_stats_j = &UE_list->eNB_UE_stats[UE_list->ordered_CCids[j][UE_id]][UE_id];
        if (eNB_UE_stats_j->dlsch_mcs1 >
            eNB_UE_stats_i->dlsch_mcs1) {
211 212 213 214
          tmp = UE_list->ordered_CCids[i][UE_id];
          UE_list->ordered_CCids[i][UE_id] = UE_list->ordered_CCids[j][UE_id];
          UE_list->ordered_CCids[j][UE_id] = tmp;
        }
gauthier's avatar
gauthier committed
215
      }
216
    }
217 218

    if (UE_list->UE_template[pCCid][UE_id].dl_buffer_total> 0) {
219
      LOG_D(MAC,"[preprocessor] assign RB for UE %d\n",UE_id);
220 221 222

      for (i=0; i<UE_list->numactiveCCs[UE_id]; i++) {
        CC_id = UE_list->ordered_CCids[i][UE_id];
223
	eNB_UE_stats = &UE_list->eNB_UE_stats[CC_id][UE_id];
224

225
        if (eNB_UE_stats->dlsch_mcs1==0) {
226 227 228 229
          nb_rbs_required[CC_id][UE_id] = 4;  // don't let the TBS get too small
        } else {
          nb_rbs_required[CC_id][UE_id] = min_rb_unit[CC_id];
        }
230

231
        TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1,nb_rbs_required[CC_id][UE_id]);
232 233 234

        LOG_D(MAC,"[preprocessor] start RB assignement for UE %d CC_id %d dl buffer %d (RB unit %d, MCS %d, TBS %d) \n",
              UE_id, CC_id, UE_list->UE_template[pCCid][UE_id].dl_buffer_total,
235
              nb_rbs_required[CC_id][UE_id],eNB_UE_stats->dlsch_mcs1,TBS);
236

knopp's avatar
knopp committed
237 238
	N_RB_DL = to_prb(RC.mac[Mod_id]->common_channels[CC_id].mib->message.dl_Bandwidth);

239 240 241 242
        /* calculating required number of RBs for each UE */
        while (TBS < UE_list->UE_template[pCCid][UE_id].dl_buffer_total)  {
          nb_rbs_required[CC_id][UE_id] += min_rb_unit[CC_id];

knopp's avatar
knopp committed
243
          if (nb_rbs_required[CC_id][UE_id] > N_RB_DL) {
244
            TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1,N_RB_DL);
knopp's avatar
knopp committed
245
            nb_rbs_required[CC_id][UE_id] = N_RB_DL;
246 247 248
            break;
          }

249
          TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1,nb_rbs_required[CC_id][UE_id]);
250 251 252
        } // end of while

        LOG_D(MAC,"[eNB %d] Frame %d: UE %d on CC %d: RB unit %d,  nb_required RB %d (TBS %d, mcs %d)\n",
253
              Mod_id, frameP,UE_id, CC_id,  min_rb_unit[CC_id], nb_rbs_required[CC_id][UE_id], TBS, eNB_UE_stats->dlsch_mcs1);
254 255 256 257
      }
    }
  }
}
gauthier's avatar
gauthier committed
258 259


260
// This function scans all CC_ids for a particular UE to find the maximum round index of its HARQ processes
gauthier's avatar
gauthier committed
261

262 263
int maxround(module_id_t Mod_id,uint16_t rnti,int frame,sub_frame_t subframe,uint8_t ul_flag )
{
gauthier's avatar
gauthier committed
264

265
  uint8_t round,round_max=0,UE_id;
266
  int CC_id,harq_pid;
267
  UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
268
  COMMON_channels_t *cc;
gauthier's avatar
gauthier committed
269

270 271
  for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {

272 273
    cc = &RC.mac[Mod_id]->common_channels[CC_id];

274
    UE_id = find_UE_id(Mod_id,rnti);
275 276 277 278
    if (cc->tdd_Config) harq_pid = ((frame*10)+subframe)%10;
    else harq_pid = ((frame*10)+subframe)&7;

    round    = UE_list->UE_sched_ctrl[UE_id].round[CC_id][harq_pid];
279
    if (round > round_max) {
280
      round_max = round;
281
    }
282 283
  }

284
  return round_max;
285
}
gauthier's avatar
gauthier committed
286

287
// This function scans all CC_ids for a particular UE to find the maximum DL CQI
288
// it returns -1 if the UE is not found in PHY layer (get_eNB_UE_stats gives NULL)
289 290
int maxcqi(module_id_t Mod_id,int32_t UE_id)
{
291
  UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
292
  int CC_id,n;
293
  int CQI = 0;
gauthier's avatar
gauthier committed
294

295
  for (n=0; n<UE_list->numactiveCCs[UE_id]; n++) {
296
    CC_id = UE_list->ordered_CCids[n][UE_id];
297

Cedric Roux's avatar
Cedric Roux committed
298 299
    if (UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id] > CQI) {
      CQI = UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id];
300
    }
301
  }
gauthier's avatar
gauthier committed
302

Cedric Roux's avatar
Cedric Roux committed
303
  return CQI;
304
}
gauthier's avatar
gauthier committed
305

306 307 308 309 310
struct sort_ue_dl_params {
  int Mod_idP;
  int frameP;
  int subframeP;
};
gauthier's avatar
gauthier committed
311

312 313 314
static int ue_dl_compare(const void *_a, const void *_b, void *_params)
{
  struct sort_ue_dl_params *params = _params;
315
  UE_list_t *UE_list = &RC.mac[params->Mod_idP]->UE_list;
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369

  int UE_id1 = *(const int *)_a;
  int UE_id2 = *(const int *)_b;

  int rnti1  = UE_RNTI(params->Mod_idP, UE_id1);
  int pCC_id1 = UE_PCCID(params->Mod_idP, UE_id1);
  int round1 = maxround(params->Mod_idP, rnti1, params->frameP, params->subframeP, 1);

  int rnti2  = UE_RNTI(params->Mod_idP, UE_id2);
  int pCC_id2 = UE_PCCID(params->Mod_idP, UE_id2);
  int round2 = maxround(params->Mod_idP, rnti2, params->frameP, params->subframeP, 1);

  int cqi1 = maxcqi(params->Mod_idP, UE_id1);
  int cqi2 = maxcqi(params->Mod_idP, UE_id2);

  if (round1 > round2) return -1;
  if (round1 < round2) return 1;

  if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[1] + UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[2] >
      UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[1] + UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[2])
    return -1;
  if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[1] + UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[2] <
      UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[1] + UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[2])
    return 1;

  if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_head_sdu_creation_time_max >
      UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_head_sdu_creation_time_max)
    return -1;
  if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_head_sdu_creation_time_max <
      UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_head_sdu_creation_time_max)
    return 1;

  if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_total >
      UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_total)
    return -1;
  if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_total <
      UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_total)
    return 1;

  if (cqi1 > cqi2) return -1;
  if (cqi1 < cqi2) return 1;

  return 0;
#if 0
  /* The above order derives from the following.  */
      if(round2 > round1) { // Check first if one of the UEs has an active HARQ process which needs service and swap order
        swap_UEs(UE_list,UE_id1,UE_id2,0);
      } else if (round2 == round1) {
        // RK->NN : I guess this is for fairness in the scheduling. This doesn't make sense unless all UEs have the same configuration of logical channels.  This should be done on the sum of all information that has to be sent.  And still it wouldn't ensure fairness.  It should be based on throughput seen by each UE or maybe using the head_sdu_creation_time, i.e. swap UEs if one is waiting longer for service.
        //  for(j=0;j<MAX_NUM_LCID;j++){
        //    if (eNB_mac_inst[Mod_id][pCC_id1].UE_template[UE_id1].dl_buffer_info[j] <
        //      eNB_mac_inst[Mod_id][pCC_id2].UE_template[UE_id2].dl_buffer_info[j]){

        // first check the buffer status for SRB1 and SRB2
gauthier's avatar
gauthier committed
370

371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
        if ( (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[1] + UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[2]) <
             (UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[1] + UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[2])   ) {
          swap_UEs(UE_list,UE_id1,UE_id2,0);
        } else if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_head_sdu_creation_time_max <
                   UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_head_sdu_creation_time_max   ) {
          swap_UEs(UE_list,UE_id1,UE_id2,0);
        } else if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_total <
                   UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_total   ) {
          swap_UEs(UE_list,UE_id1,UE_id2,0);
        } else if (cqi1 < cqi2) {
          swap_UEs(UE_list,UE_id1,UE_id2,0);
        }
      }
#endif
}
gauthier's avatar
gauthier committed
386

387
// This fuction sorts the UE in order their dlsch buffer and CQI
388
void sort_UEs (module_id_t Mod_idP,
389 390 391
               int         frameP,
               sub_frame_t subframeP)
{
392 393 394 395 396 397
  int               i;
  int               list[NUMBER_OF_UE_MAX];
  int               list_size = 0;
  int               rnti;
  struct sort_ue_dl_params params = { Mod_idP, frameP, subframeP };

398
  UE_list_t *UE_list = &RC.mac[Mod_idP]->UE_list;
399 400

  for (i = 0; i < NUMBER_OF_UE_MAX; i++) {
401 402 403 404 405

    if (UE_list->active[i]==FALSE) continue;
    if ((rnti = UE_RNTI(Mod_idP, i)) == NOT_A_RNTI) continue;
    if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1) continue;

406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
    list[list_size] = i;
    list_size++;
  }

  qsort_r(list, list_size, sizeof(int), ue_dl_compare, &params);

  if (list_size) {
    for (i = 0; i < list_size-1; i++)
      UE_list->next[list[i]] = list[i+1];
    UE_list->next[list[list_size-1]] = -1;
    UE_list->head = list[0];
  } else {
    UE_list->head = -1;
  }

#if 0
gauthier's avatar
gauthier committed
422 423


424 425 426
  int               UE_id1,UE_id2;
  int               pCC_id1,pCC_id2;
  int               cqi1,cqi2,round1,round2;
427
  int               i=0,ii=0;//,j=0;
428
  rnti_t            rnti1,rnti2;
gauthier's avatar
gauthier committed
429

430
  UE_list_t *UE_list = &RC.mac[Mod_idP]->UE_list;
gauthier's avatar
gauthier committed
431

432
  for (i=UE_list->head; i>=0; i=UE_list->next[i]) {
gauthier's avatar
gauthier committed
433

434
    for(ii=UE_list->next[i]; ii>=0; ii=UE_list->next[ii]) {
gauthier's avatar
gauthier committed
435

436 437
      UE_id1  = i;
      rnti1 = UE_RNTI(Mod_idP,UE_id1);
438 439
      if(rnti1 == NOT_A_RNTI)
	continue;
440 441
      if (UE_list->UE_sched_ctrl[UE_id1].ul_out_of_sync == 1)
	continue;
442 443 444
      pCC_id1 = UE_PCCID(Mod_idP,UE_id1);
      cqi1    = maxcqi(Mod_idP,UE_id1); //
      round1  = maxround(Mod_idP,rnti1,frameP,subframeP,0);
445

446 447
      UE_id2 = ii;
      rnti2 = UE_RNTI(Mod_idP,UE_id2);
448 449
      if(rnti2 == NOT_A_RNTI)
        continue;
450 451
      if (UE_list->UE_sched_ctrl[UE_id2].ul_out_of_sync == 1)
	continue;
452
      cqi2    = maxcqi(Mod_idP,UE_id2);
453
      round2  = maxround(Mod_idP,rnti2,frameP,subframeP,0);  //mac_xface->get_ue_active_harq_pid(Mod_id,rnti2,subframe,&harq_pid2,&round2,0);
454
      pCC_id2 = UE_PCCID(Mod_idP,UE_id2);
455

456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
      if(round2 > round1) { // Check first if one of the UEs has an active HARQ process which needs service and swap order
        swap_UEs(UE_list,UE_id1,UE_id2,0);
      } else if (round2 == round1) {
        // RK->NN : I guess this is for fairness in the scheduling. This doesn't make sense unless all UEs have the same configuration of logical channels.  This should be done on the sum of all information that has to be sent.  And still it wouldn't ensure fairness.  It should be based on throughput seen by each UE or maybe using the head_sdu_creation_time, i.e. swap UEs if one is waiting longer for service.
        //  for(j=0;j<MAX_NUM_LCID;j++){
        //    if (eNB_mac_inst[Mod_id][pCC_id1].UE_template[UE_id1].dl_buffer_info[j] <
        //      eNB_mac_inst[Mod_id][pCC_id2].UE_template[UE_id2].dl_buffer_info[j]){

        // first check the buffer status for SRB1 and SRB2

        if ( (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[1] + UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[2]) <
             (UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[1] + UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[2])   ) {
          swap_UEs(UE_list,UE_id1,UE_id2,0);
        } else if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_head_sdu_creation_time_max <
                   UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_head_sdu_creation_time_max   ) {
          swap_UEs(UE_list,UE_id1,UE_id2,0);
        } else if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_total <
                   UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_total   ) {
          swap_UEs(UE_list,UE_id1,UE_id2,0);
        } else if (cqi1 < cqi2) {
          swap_UEs(UE_list,UE_id1,UE_id2,0);
        }
478 479
      }
    }
480
  }
481
#endif
482 483
}

gauthier's avatar
gauthier committed
484

485 486


487
// This function assigns pre-available RBS to each UE in specified sub-bands before scheduling is done
488
void dlsch_scheduler_pre_processor (module_id_t   Mod_id,
489 490 491 492 493
                                    frame_t       frameP,
                                    sub_frame_t   subframeP,
                                    int           N_RBG[MAX_NUM_CCs],
                                    int           *mbsfn_flag)
{
494

495
  unsigned char rballoc_sub[MAX_NUM_CCs][N_RBG_MAX],harq_pid=0,round=0,total_ue_count;
496
  unsigned char MIMO_mode_indicator[MAX_NUM_CCs][N_RBG_MAX];
497
  int                     UE_id, i; 
498
  uint16_t                ii,j;
499 500 501
  uint16_t                nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX];
  uint16_t                nb_rbs_required_remaining[MAX_NUM_CCs][NUMBER_OF_UE_MAX];
  uint16_t                nb_rbs_required_remaining_1[MAX_NUM_CCs][NUMBER_OF_UE_MAX];
502
  uint16_t                average_rbs_per_user[MAX_NUM_CCs] = {0};
503
  rnti_t             rnti;
504
  int                min_rb_unit[MAX_NUM_CCs];
505
  uint16_t r1=0;
506
  uint8_t CC_id;
507
  UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
508

knopp's avatar
knopp committed
509
  int N_RB_DL;
510
  int transmission_mode = 0;
511 512
  UE_sched_ctrl *ue_sched_ctl;
  //  int rrc_status           = RRC_IDLE;
513
  COMMON_channels_t *cc;
514 515

#ifdef TM5
516
  int harq_pid1=0;
517 518 519 520 521 522 523 524
  int round1=0,round2=0;
  int UE_id2;
  uint16_t                i1,i2,i3;
  rnti_t             rnti1,rnti2;
  LTE_eNB_UE_stats  *eNB_UE_stats1 = NULL;
  LTE_eNB_UE_stats  *eNB_UE_stats2 = NULL;
  UE_sched_ctrl *ue_sched_ctl1,*ue_sched_ctl2;
#endif
525 526

  for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
527 528 529

    if (mbsfn_flag[CC_id]>0)  // If this CC is allocated for MBSFN skip it here
      continue;
530 531


532

533
    min_rb_unit[CC_id]=get_min_rb_unit(Mod_id,CC_id);
534

535 536 537
    for (i = 0; i < NUMBER_OF_UE_MAX; i++) {
      if (UE_list->active[i] != TRUE) continue;

538
      UE_id = i;
539
      // Initialize scheduling information for all active UEs
540 541
      

542

543
      dlsch_scheduler_pre_processor_reset(Mod_id,
544 545
        UE_id,
        CC_id,
546 547
        frameP,
        subframeP,
548 549 550 551 552
        N_RBG[CC_id],
        nb_rbs_required,
        nb_rbs_required_remaining,
        rballoc_sub,
        MIMO_mode_indicator);
553

554
    }
555
  }
556 557


558
  // Store the DLSCH buffer for each logical channel
gauthier's avatar
gauthier committed
559
  store_dlsch_buffer (Mod_id,frameP,subframeP);
560

561 562


563
  // Calculate the number of RBs required by each UE on the basis of logical channel's buffer
564
  assign_rbs_required (Mod_id,frameP,subframeP,nb_rbs_required,min_rb_unit);
565

566 567


568
  // Sorts the user on the basis of dlsch logical channel buffer and CQI
569 570 571
  sort_UEs (Mod_id,frameP,subframeP);


572

573
  total_ue_count =0;
574

575
  // loop over all active UEs
576
  for (i=UE_list->head; i>=0; i=UE_list->next[i]) {
577
    rnti = UE_RNTI(Mod_id,i);
578

579
    if(rnti == NOT_A_RNTI)
580
      continue;
581 582
    if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1)
      continue;
583
    UE_id = i;
584

585
    for (ii=0; ii<UE_num_active_CC(UE_list,UE_id); ii++) {
586
      CC_id = UE_list->ordered_CCids[ii][UE_id];
587
      ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
588 589 590 591
      cc=&RC.mac[Mod_id]->common_channels[ii];
      if (cc->tdd_Config) harq_pid = ((frameP*10)+subframeP)%10;
      else harq_pid = ((frameP*10)+subframeP)&7;
      round    = ue_sched_ctl->round[CC_id][harq_pid];
592

593 594
      average_rbs_per_user[CC_id]=0;

595

596
      if(round != 8) {
597
        nb_rbs_required[CC_id][UE_id] = UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid];
598
      }
599

600 601
      //nb_rbs_required_remaining[UE_id] = nb_rbs_required[UE_id];
      if (nb_rbs_required[CC_id][UE_id] > 0) {
602
        total_ue_count = total_ue_count + 1;
603
      }
604 605


606
      // hypothetical assignment
607 608 609 610 611 612 613 614 615
      /*
       * If schedule is enabled and if the priority of the UEs is modified
       * The average rbs per logical channel per user will depend on the level of
       * priority. Concerning the hypothetical assignement, we should assign more
       * rbs to prioritized users. Maybe, we can do a mapping between the
       * average rbs per user and the level of priority or multiply the average rbs
       * per user by a coefficient which represents the degree of priority.
       */

knopp's avatar
knopp committed
616 617
      N_RB_DL = to_prb(RC.mac[Mod_id]->common_channels[CC_id].mib->message.dl_Bandwidth);

618
      if (total_ue_count == 0) {
619
        average_rbs_per_user[CC_id] = 0;
knopp's avatar
knopp committed
620 621
      } else if( (min_rb_unit[CC_id] * total_ue_count) <= (N_RB_DL) ) {
        average_rbs_per_user[CC_id] = (uint16_t) floor(N_RB_DL/total_ue_count);
622
      } else {
623
        average_rbs_per_user[CC_id] = min_rb_unit[CC_id]; // consider the total number of use that can be scheduled UE
624
      }
625 626
    }
  }
627

628 629
  // note: nb_rbs_required is assigned according to total_buffer_dl
  // extend nb_rbs_required to capture per LCID RB required
630
  for(i=UE_list->head; i>=0; i=UE_list->next[i]) {
631
    rnti = UE_RNTI(Mod_id,i);
632

633 634 635 636 637
    if(rnti == NOT_A_RNTI)
      continue;
    if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1)
      continue;

638
    for (ii=0; ii<UE_num_active_CC(UE_list,i); ii++) {
639
      CC_id = UE_list->ordered_CCids[ii][i];
Cedric Roux's avatar
Cedric Roux committed
640
      ue_sched_ctl = &UE_list->UE_sched_ctrl[i];
641
      round    = ue_sched_ctl->round[CC_id][harq_pid];
642

Cedric Roux's avatar
Cedric Roux committed
643 644 645
      // control channel or retransmission
      /* TODO: do we have to check for retransmission? */
      if (mac_eNB_get_rrc_status(Mod_id,rnti) < RRC_RECONFIGURED || round > 0) {
646
        nb_rbs_required_remaining_1[CC_id][i] = nb_rbs_required[CC_id][i];
647
      } else {
648 649
        nb_rbs_required_remaining_1[CC_id][i] = cmin(average_rbs_per_user[CC_id],nb_rbs_required[CC_id][i]);

650
      }
651
    }
652
  }
gauthier's avatar
gauthier committed
653

654
  //Allocation to UEs is done in 2 rounds,
655 656
  // 1st stage: average number of RBs allocated to each UE
  // 2nd stage: remaining RBs are allocated to high priority UEs
657 658 659 660 661 662
  for(r1=0; r1<2; r1++) {

    for(i=UE_list->head; i>=0; i=UE_list->next[i]) {
      for (ii=0; ii<UE_num_active_CC(UE_list,i); ii++) {
        CC_id = UE_list->ordered_CCids[ii][i];

663
        if(r1 == 0) {
664
          nb_rbs_required_remaining[CC_id][i] = nb_rbs_required_remaining_1[CC_id][i];
665
        } else { // rb required based only on the buffer - rb allloctaed in the 1st round + extra reaming rb form the 1st round
666
          nb_rbs_required_remaining[CC_id][i] = nb_rbs_required[CC_id][i]-nb_rbs_required_remaining_1[CC_id][i]+nb_rbs_required_remaining[CC_id][i];
Cedric Roux's avatar
Cedric Roux committed
667
if (nb_rbs_required_remaining[CC_id][i]<0) abort();
668
        }
669 670 671 672 673 674 675

        if (nb_rbs_required[CC_id][i]> 0 )
          LOG_D(MAC,"round %d : nb_rbs_required_remaining[%d][%d]= %d (remaining_1 %d, required %d,  pre_nb_available_rbs %d, N_RBG %d, rb_unit %d)\n",
                r1, CC_id, i,
                nb_rbs_required_remaining[CC_id][i],
                nb_rbs_required_remaining_1[CC_id][i],
                nb_rbs_required[CC_id][i],
676
                UE_list->UE_sched_ctrl[i].pre_nb_available_rbs[CC_id],
677 678 679
                N_RBG[CC_id],
                min_rb_unit[CC_id]);

680
      }
681
    }
682

683
    if (total_ue_count > 0 ) {
684 685 686 687 688
      for(i=UE_list->head; i>=0; i=UE_list->next[i]) {
        UE_id = i;

        for (ii=0; ii<UE_num_active_CC(UE_list,UE_id); ii++) {
          CC_id = UE_list->ordered_CCids[ii][UE_id];
689
	  ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
690
	  round    = ue_sched_ctl->round[CC_id][harq_pid];
691 692 693 694

          rnti = UE_RNTI(Mod_id,UE_id);

          // LOG_D(MAC,"UE %d rnti 0x\n", UE_id, rnti );
695
          if(rnti == NOT_A_RNTI)
696
            continue;
697 698
	  if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1)
	    continue;
699

700
          transmission_mode = get_tmode(Mod_id,CC_id,UE_id);
701 702
	  //          mac_xface->get_ue_active_harq_pid(Mod_id,CC_id,rnti,frameP,subframeP,&harq_pid,&round,0);
          //rrc_status = mac_eNB_get_rrc_status(Mod_id,rnti);
703 704 705 706 707 708 709 710 711 712 713 714
          /* 1st allocate for the retx */

          // retransmission in data channels
          // control channel in the 1st transmission
          // data channel for all TM
          LOG_T(MAC,"calling dlsch_scheduler_pre_processor_allocate .. \n ");
          dlsch_scheduler_pre_processor_allocate (Mod_id,
                                                  UE_id,
                                                  CC_id,
                                                  N_RBG[CC_id],
                                                  transmission_mode,
                                                  min_rb_unit[CC_id],
715
                                                  to_prb(RC.mac[Mod_id]->common_channels[CC_id].mib->message.dl_Bandwidth),
716 717 718 719 720
                                                  nb_rbs_required,
                                                  nb_rbs_required_remaining,
                                                  rballoc_sub,
                                                  MIMO_mode_indicator);

721
#ifdef TM5
722 723 724 725

          // data chanel TM5: to be revisted
          if ((round == 0 )  &&
              (transmission_mode == 5)  &&
726
              (ue_sched_ctl->dl_pow_off[CC_id] != 1)) {
727 728 729

            for(j=0; j<N_RBG[CC_id]; j+=2) {

730 731
              if( (((j == (N_RBG[CC_id]-1))&& (rballoc_sub[CC_id][j] == 0) && (ue_sched_ctl->rballoc_sub_UE[CC_id][j] == 0))  ||
                   ((j < (N_RBG[CC_id]-1)) && (rballoc_sub[CC_id][j+1] == 0) && (ue_sched_ctl->rballoc_sub_UE[CC_id][j+1] == 0)) ) &&
732 733 734 735 736 737
                  (nb_rbs_required_remaining[CC_id][UE_id]>0)) {

                for (ii = UE_list->next[i+1]; ii >=0; ii=UE_list->next[ii]) {

                  UE_id2 = ii;
                  rnti2 = UE_RNTI(Mod_id,UE_id2);
738 739
		  ue_sched_ctl2 = &UE_list->UE_sched_ctrl[UE_id2];
		  round2    = ue_sched_ctl2->round[CC_id];
740
                  if(rnti2 == NOT_A_RNTI)
741
                    continue;
742 743
		  if (UE_list->UE_sched_ctrl[UE_id2].ul_out_of_sync == 1)
		    continue;
744

745
                  eNB_UE_stats2 = UE_list->eNB_UE_stats[CC_id][UE_id2];
746
                  //mac_xface->get_ue_active_harq_pid(Mod_id,CC_id,rnti2,frameP,subframeP,&harq_pid2,&round2,0);
747

748
                  if ((mac_eNB_get_rrc_status(Mod_id,rnti2) >= RRC_RECONFIGURED) &&
749
                      (round2==0) &&
750
                      (get_tmode(Mod_id,CC_id,UE_id2)==5) &&
751
                      (ue_sched_ctl->dl_pow_off[CC_id] != 1)) {
752

753 754
                    if( (((j == (N_RBG[CC_id]-1)) && (ue_sched_ctl->rballoc_sub_UE[CC_id][j] == 0)) ||
                         ((j < (N_RBG[CC_id]-1)) && (ue_sched_ctl->rballoc_sub_UE[CC_id][j+1] == 0))  ) &&
755 756 757 758 759
                        (nb_rbs_required_remaining[CC_id][UE_id2]>0)) {

                      if((((eNB_UE_stats2->DL_pmi_single^eNB_UE_stats1->DL_pmi_single)<<(14-j))&0xc000)== 0x4000) { //MU-MIMO only for 25 RBs configuration

                        rballoc_sub[CC_id][j] = 1;
760 761
                        ue_sched_ctl->rballoc_sub_UE[CC_id][j] = 1;
                        ue_sched_ctl2->rballoc_sub_UE[CC_id][j] = 1;
762 763 764 765
                        MIMO_mode_indicator[CC_id][j] = 0;

                        if (j< N_RBG[CC_id]-1) {
                          rballoc_sub[CC_id][j+1] = 1;
766 767
                          ue_sched_ctl->rballoc_sub_UE[CC_id][j+1] = 1;
                          ue_sched_ctl2->rballoc_sub_UE[CC_id][j+1] = 1;
768 769 770
                          MIMO_mode_indicator[CC_id][j+1] = 0;
                        }

771 772
                        ue_sched_ctl->dl_pow_off[CC_id] = 0;
                        ue_sched_ctl2->dl_pow_off[CC_id] = 0;
773 774 775


                        if ((j == N_RBG[CC_id]-1) &&
knopp's avatar
knopp committed
776 777
                            ((N_RB_DL == 25) ||
                             (N_RB_DL == 50))) {
778
			  
779
                          nb_rbs_required_remaining[CC_id][UE_id] = nb_rbs_required_remaining[CC_id][UE_id] - min_rb_unit[CC_id]+1;
780
                          ue_sched_ctl->pre_nb_available_rbs[CC_id] = ue_sched_ctl->pre_nb_available_rbs[CC_id] + min_rb_unit[CC_id]-1;
781
                          nb_rbs_required_remaining[CC_id][UE_id2] = nb_rbs_required_remaining[CC_id][UE_id2] - min_rb_unit[CC_id]+1;
782
                          ue_sched_ctl2->pre_nb_available_rbs[CC_id] = ue_sched_ctl2->pre_nb_available_rbs[CC_id] + min_rb_unit[CC_id]-1;
783
                        } else {
784 785
                          
			  nb_rbs_required_remaining[CC_id][UE_id] = nb_rbs_required_remaining[CC_id][UE_id] - 4;
786
                          ue_sched_ctl->pre_nb_available_rbs[CC_id] = ue_sched_ctl->pre_nb_available_rbs[CC_id] + 4;
787
                          nb_rbs_required_remaining[CC_id][UE_id2] = nb_rbs_required_remaining[CC_id][UE_id2] - 4;
788
                          ue_sched_ctl2->pre_nb_available_rbs[CC_id] = ue_sched_ctl2->pre_nb_available_rbs[CC_id] + 4;
789 790 791 792 793 794 795 796 797 798 799 800 801
                        }

                        break;
                      }
                    }
                  }
                }
              }
            }
          }

#endif
        }
802
      }
803
    } // total_ue_count
804
  } // end of for for r1 and r2
805 806 807

#ifdef TM5

808
  // This has to be revisited!!!!
809
  for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
810 811 812
    i1=0;
    i2=0;
    i3=0;
813 814

    for (j=0; j<N_RBG[CC_id]; j++) {
815
      if(MIMO_mode_indicator[CC_id][j] == 2) {
816
        i1 = i1+1;
817
      } else if(MIMO_mode_indicator[CC_id][j] == 1) {
818
        i2 = i2+1;
819
      } else if(MIMO_mode_indicator[CC_id][j] == 0) {
820
        i3 = i3+1;
821
      }
822
    }
823

824
    if((i1 < N_RBG[CC_id]) && (i2>0) && (i3==0)) {
825
      PHY_vars_eNB_g[Mod_id][CC_id]->check_for_SUMIMO_transmissions = PHY_vars_eNB_g[Mod_id][CC_id]->check_for_SUMIMO_transmissions + 1;
826
    }
827

828
    if(i3 == N_RBG[CC_id] && i1==0 && i2==0) {
829
      PHY_vars_eNB_g[Mod_id][CC_id]->FULL_MUMIMO_transmissions = PHY_vars_eNB_g[Mod_id][CC_id]->FULL_MUMIMO_transmissions + 1;
830
    }
831

832
    if((i1 < N_RBG[CC_id]) && (i3 > 0)) {
833
      PHY_vars_eNB_g[Mod_id][CC_id]->check_for_MUMIMO_transmissions = PHY_vars_eNB_g[Mod_id][CC_id]->check_for_MUMIMO_transmissions + 1;
834
    }
835

836
    PHY_vars_eNB_g[Mod_id][CC_id]->check_for_total_transmissions = PHY_vars_eNB_g[Mod_id][CC_id]->check_for_total_transmissions + 1;
837

838 839
  }

840 841 842
#endif

  for(i=UE_list->head; i>=0; i=UE_list->next[i]) {
843
    UE_id = i;
844
    ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
845 846

    for (ii=0; ii<UE_num_active_CC(UE_list,UE_id); ii++) {
847
      CC_id = UE_list->ordered_CCids[ii][UE_id];
gauthier's avatar
gauthier committed
848
      //PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].dl_pow_off = dl_pow_off[UE_id];
849

850
      if (ue_sched_ctl->pre_nb_available_rbs[CC_id] > 0 ) {
851
        LOG_D(MAC,"******************DL Scheduling Information for UE%d ************************\n",UE_id);
852
        LOG_D(MAC,"dl power offset UE%d = %d \n",UE_id,ue_sched_ctl->dl_pow_off[CC_id]);
853 854 855 856
        LOG_D(MAC,"***********RB Alloc for every subband for UE%d ***********\n",UE_id);

        for(j=0; j<N_RBG[CC_id]; j++) {
          //PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].rballoc_sub[i] = rballoc_sub_UE[CC_id][UE_id][i];
857
          LOG_D(MAC,"RB Alloc for UE%d and Subband%d = %d\n",UE_id,j,ue_sched_ctl->rballoc_sub_UE[CC_id][j]);
858 859 860
        }

        //PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].pre_nb_available_rbs = pre_nb_available_rbs[CC_id][UE_id];
861
        LOG_D(MAC,"Total RBs allocated for UE%d = %d\n",UE_id,ue_sched_ctl->pre_nb_available_rbs[CC_id]);
gauthier's avatar
gauthier committed
862
      }
863
    }
864 865 866
  }
}

Cedric Roux's avatar
Cedric Roux committed
867
#define SF0_LIMIT 1
868

869
void dlsch_scheduler_pre_processor_reset (int module_idP,
870 871 872 873 874 875 876 877 878 879
					  int UE_id,
					  uint8_t  CC_id,
					  int frameP,
					  int subframeP,					  
					  int N_RBG,
					  uint16_t nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
					  uint16_t nb_rbs_required_remaining[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
					  unsigned char rballoc_sub[MAX_NUM_CCs][N_RBG_MAX],
					  unsigned char MIMO_mode_indicator[MAX_NUM_CCs][N_RBG_MAX])
  
880
{
881
  int i,j;
882
  UE_list_t *UE_list=&RC.mac[module_idP]->UE_list;
883 884
  UE_sched_ctrl *ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
  rnti_t rnti = UE_RNTI(module_idP,UE_id);
885

886
  uint8_t *vrb_map = RC.mac[module_idP]->common_channels[CC_id].vrb_map;
knopp's avatar
knopp committed
887
  int N_RB_DL = to_prb(RC.mac[module_idP]->common_channels[CC_id].mib->message.dl_Bandwidth);
888
  int RBGsize = N_RB_DL/N_RBG,RBGsize_last;
Cedric Roux's avatar
Cedric Roux committed
889 890
#ifdef SF0_LIMIT
  int sf0_upper=-1,sf0_lower=-1;
891
#endif
892 893


894
  //LOG_D(MAC,"Running preprocessor for UE %d (%x)\n",UE_id,rnti);
895
  // initialize harq_pid and round
896

Cedric Roux's avatar
Cedric Roux committed
897 898
  if (ue_sched_ctl->ta_timer) ue_sched_ctl->ta_timer--;

899 900 901
  /*
  eNB_UE_stats *eNB_UE_stats;

902 903 904
  if (eNB_UE_stats == NULL)
    return;

905
  
906 907 908 909
  mac_xface->get_ue_active_harq_pid(module_idP,CC_id,rnti,
				    frameP,subframeP,
				    &ue_sched_ctl->harq_pid[CC_id],
				    &ue_sched_ctl->round[CC_id],
910
				    openair_harq_DL);
911 912
  

913
  if (ue_sched_ctl->ta_timer == 0) {
914

915 916 917
    // WE SHOULD PROTECT the eNB_UE_stats with a mutex here ...

    ue_sched_ctl->ta_timer = 20;  // wait 20 subframes before taking TA measurement from PHY
knopp's avatar
knopp committed
918
    switch (N_RB_DL) {
919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939
    case 6:
      ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update;
      break;
      
    case 15:
      ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/2;
      break;
      
    case 25:
      ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/4;
      break;
      
    case 50:
      ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/8;
      break;
      
    case 75:
      ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/12;
      break;
      
    case 100:
940
	ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/16;
941 942 943 944 945 946 947 948 949
      break;
    }
    // clear the update in case PHY does not have a new measurement after timer expiry
    eNB_UE_stats->timing_advance_update =  0;
  }
  else {
    ue_sched_ctl->ta_timer--;
    ue_sched_ctl->ta_update =0; // don't trigger a timing advance command
  }
950 951