pre_processor.c 55.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The OpenAirInterface Software Alliance licenses this file to You under
 * the OAI Public License, Version 1.0  (the "License"); you may not use this file
 * except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.openairinterface.org/?page_id=698
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 *-------------------------------------------------------------------------------
 * For more information about the OpenAirInterface (OAI) Software Alliance:
 *      contact@openairinterface.org
 */
21 22

/*! \file pre_processor.c
23
 * \brief eNB scheduler preprocessing fuction prior to scheduling
24
 * \author Navid Nikaein and Ankit Bhamri
25
 * \date 2013 - 2014
26
 * \email navid.nikaein@eurecom.fr
27
 * \version 1.0
28 29 30 31
 * @ingroup _mac

 */

32 33 34
#define _GNU_SOURCE
#include <stdlib.h>

35
#include "assertions.h"
36 37 38 39 40 41 42
#include "PHY/defs.h"
#include "PHY/extern.h"

#include "SCHED/defs.h"
#include "SCHED/extern.h"

#include "LAYER2/MAC/defs.h"
43
#include "LAYER2/MAC/proto.h"
44 45
#include "LAYER2/MAC/extern.h"
#include "UTIL/LOG/log.h"
46
#include "UTIL/LOG/vcd_signal_dumper.h"
47 48 49 50 51
#include "UTIL/OPT/opt.h"
#include "OCG.h"
#include "OCG_extern.h"
#include "RRC/LITE/extern.h"
#include "RRC/L2_INTERFACE/openair_rrc_L2_interface.h"
52
#include "rlc.h"
53 54


55

56 57 58 59 60 61 62 63 64 65
#define DEBUG_eNB_SCHEDULER 1
#define DEBUG_HEADER_PARSING 1
//#define DEBUG_PACKET_TRACE 1

//#define ICIC 0

/*
  #ifndef USER_MODE
  #define msg debug_msg
  #endif
knopp's avatar
knopp committed
66
*/
67

68
/* this function checks that get_eNB_UE_stats returns
Cedric Roux's avatar
Cedric Roux committed
69
 * a non-NULL pointer for all the active CCs of an UE
70
 */
71
/*
72
int phy_stats_exist(module_id_t Mod_id, int rnti)
73 74
{
  int CC_id;
Cedric Roux's avatar
Cedric Roux committed
75 76
  int i;
  int UE_id          = find_UE_id(Mod_id, rnti);
77
  UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
Cedric Roux's avatar
Cedric Roux committed
78 79 80 81 82 83 84 85 86 87 88 89
  if (UE_id == -1) {
    LOG_W(MAC, "[eNB %d] UE %x not found, should be there (in phy_stats_exist)\n",
	  Mod_id, rnti);
    return 0;
  }
  if (UE_list->numactiveCCs[UE_id] == 0) {
    LOG_W(MAC, "[eNB %d] UE %x has no active CC (in phy_stats_exist)\n",
	  Mod_id, rnti);
    return 0;
  }
  for (i = 0; i < UE_list->numactiveCCs[UE_id]; i++) {
    CC_id = UE_list->ordered_CCids[i][UE_id];
90 91
    if (mac_xface->get_eNB_UE_stats(Mod_id, CC_id, rnti) == NULL)
      return 0;
92
  }
93 94
  return 1;
}
95
*/
96

97
// This function stores the downlink buffer for all the logical channels
98 99
void store_dlsch_buffer (module_id_t Mod_id,
                         frame_t     frameP,
100 101
                         sub_frame_t subframeP)
{
102

knopp's avatar
knopp committed
103
  int                   UE_id,i;
104
  rnti_t                rnti;
105
  mac_rlc_status_resp_t rlc_status;
106
  UE_list_t             *UE_list = &RC.mac[Mod_id]->UE_list;
knopp's avatar
knopp committed
107 108
  UE_TEMPLATE           *UE_template;

109 110
  for (UE_id = 0; UE_id < NUMBER_OF_UE_MAX; UE_id++) {
    if (UE_list->active[UE_id] != TRUE) continue;
knopp's avatar
knopp committed
111 112

    UE_template = &UE_list->UE_template[UE_PCCID(Mod_id,UE_id)][UE_id];
113 114

    // clear logical channel interface variables
knopp's avatar
knopp committed
115 116
    UE_template->dl_buffer_total = 0;
    UE_template->dl_pdus_total = 0;
117 118

    for(i=0; i< MAX_NUM_LCID; i++) {
knopp's avatar
knopp committed
119 120 121 122 123
      UE_template->dl_buffer_info[i]=0;
      UE_template->dl_pdus_in_buffer[i]=0;
      UE_template->dl_buffer_head_sdu_creation_time[i]=0;
      UE_template->dl_buffer_head_sdu_remaining_size_to_send[i]=0;
    }
124

knopp's avatar
knopp committed
125
    rnti = UE_RNTI(Mod_id,UE_id);
126 127 128

    for(i=0; i< MAX_NUM_LCID; i++) { // loop over all the logical channels

129
      rlc_status = mac_rlc_status_ind(Mod_id,rnti, Mod_id,frameP,subframeP,ENB_FLAG_YES,MBMS_FLAG_NO,i,0 );
knopp's avatar
knopp committed
130 131 132
      UE_template->dl_buffer_info[i] = rlc_status.bytes_in_buffer; //storing the dlsch buffer for each logical channel
      UE_template->dl_pdus_in_buffer[i] = rlc_status.pdus_in_buffer;
      UE_template->dl_buffer_head_sdu_creation_time[i] = rlc_status.head_sdu_creation_time ;
133 134
      UE_template->dl_buffer_head_sdu_creation_time_max = cmax(UE_template->dl_buffer_head_sdu_creation_time_max,
          rlc_status.head_sdu_creation_time );
knopp's avatar
knopp committed
135 136
      UE_template->dl_buffer_head_sdu_remaining_size_to_send[i] = rlc_status.head_sdu_remaining_size_to_send;
      UE_template->dl_buffer_head_sdu_is_segmented[i] = rlc_status.head_sdu_is_segmented;
137 138
      UE_template->dl_buffer_total += UE_template->dl_buffer_info[i];//storing the total dlsch buffer
      UE_template->dl_pdus_total   += UE_template->dl_pdus_in_buffer[i];
139

140
#ifdef DEBUG_eNB_SCHEDULER
141

knopp's avatar
knopp committed
142 143 144 145
      /* note for dl_buffer_head_sdu_remaining_size_to_send[i] :
       * 0 if head SDU has not been segmented (yet), else remaining size not already segmented and sent
       */
      if (UE_template->dl_buffer_info[i]>0)
146 147
        LOG_D(MAC,
              "[eNB %d] Frame %d Subframe %d : RLC status for UE %d in LCID%d: total of %d pdus and size %d, head sdu queuing time %d, remaining size %d, is segmeneted %d \n",
148 149 150 151 152 153 154
              Mod_id, frameP, subframeP, UE_id,
              i, UE_template->dl_pdus_in_buffer[i],UE_template->dl_buffer_info[i],
              UE_template->dl_buffer_head_sdu_creation_time[i],
              UE_template->dl_buffer_head_sdu_remaining_size_to_send[i],
              UE_template->dl_buffer_head_sdu_is_segmented[i]
             );

155
#endif
156

knopp's avatar
knopp committed
157
    }
158

159
    //#ifdef DEBUG_eNB_SCHEDULER
knopp's avatar
knopp committed
160 161
    if ( UE_template->dl_buffer_total>0)
      LOG_D(MAC,"[eNB %d] Frame %d Subframe %d : RLC status for UE %d : total DL buffer size %d and total number of pdu %d \n",
162 163 164 165 166 167
            Mod_id, frameP, subframeP, UE_id,
            UE_template->dl_buffer_total,
            UE_template->dl_pdus_total
           );

    //#endif
168 169 170
  }
}

171

172
// This function returns the estimated number of RBs required by each UE for downlink scheduling
knopp's avatar
knopp committed
173
void assign_rbs_required (module_id_t Mod_id,
174 175 176 177 178
                          frame_t     frameP,
                          sub_frame_t subframe,
                          uint16_t    nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
                          int         min_rb_unit[MAX_NUM_CCs])
{
179

knopp's avatar
knopp committed
180
  uint16_t         TBS = 0;
181

knopp's avatar
knopp committed
182
  int              UE_id,n,i,j,CC_id,pCCid,tmp;
183
  UE_list_t        *UE_list = &RC.mac[Mod_id]->UE_list;
184
  eNB_UE_STATS     *eNB_UE_stats,*eNB_UE_stats_i,*eNB_UE_stats_j;
185
  int N_RB_DL;
186

187
  // clear rb allocations across all CC_id
188 189 190
  for (UE_id = 0; UE_id < NUMBER_OF_UE_MAX; UE_id++) {
    if (UE_list->active[UE_id] != TRUE) continue;

knopp's avatar
knopp committed
191
    pCCid = UE_PCCID(Mod_id,UE_id);
192

knopp's avatar
knopp committed
193
    //update CQI information across component carriers
194
    for (n=0; n<UE_list->numactiveCCs[UE_id]; n++) {
195

knopp's avatar
knopp committed
196
      CC_id = UE_list->ordered_CCids[n][UE_id];
197
      eNB_UE_stats = &UE_list->eNB_UE_stats[CC_id][UE_id];
198

Cedric Roux's avatar
Cedric Roux committed
199
      eNB_UE_stats->dlsch_mcs1=cqi_to_mcs[UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id]];
200

knopp's avatar
knopp committed
201
    }
202

knopp's avatar
knopp committed
203
    // provide the list of CCs sorted according to MCS
204
    for (i=0; i<UE_list->numactiveCCs[UE_id]; i++) {
205
      eNB_UE_stats_i = &UE_list->eNB_UE_stats[UE_list->ordered_CCids[i][UE_id]][UE_id];
206
      for (j=i+1; j<UE_list->numactiveCCs[UE_id]; j++) {
207
        DevAssert( j < MAX_NUM_CCs );
208 209 210
	eNB_UE_stats_j = &UE_list->eNB_UE_stats[UE_list->ordered_CCids[j][UE_id]][UE_id];
        if (eNB_UE_stats_j->dlsch_mcs1 >
            eNB_UE_stats_i->dlsch_mcs1) {
211 212 213 214
          tmp = UE_list->ordered_CCids[i][UE_id];
          UE_list->ordered_CCids[i][UE_id] = UE_list->ordered_CCids[j][UE_id];
          UE_list->ordered_CCids[j][UE_id] = tmp;
        }
215
      }
knopp's avatar
knopp committed
216
    }
217 218

    if (UE_list->UE_template[pCCid][UE_id].dl_buffer_total> 0) {
219
      LOG_D(MAC,"[preprocessor] assign RB for UE %d\n",UE_id);
220 221 222

      for (i=0; i<UE_list->numactiveCCs[UE_id]; i++) {
        CC_id = UE_list->ordered_CCids[i][UE_id];
223
	eNB_UE_stats = &UE_list->eNB_UE_stats[CC_id][UE_id];
224

225
        if (eNB_UE_stats->dlsch_mcs1==0) {
226 227 228 229
          nb_rbs_required[CC_id][UE_id] = 4;  // don't let the TBS get too small
        } else {
          nb_rbs_required[CC_id][UE_id] = min_rb_unit[CC_id];
        }
230

231
        TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1,nb_rbs_required[CC_id][UE_id]);
232 233 234

        LOG_D(MAC,"[preprocessor] start RB assignement for UE %d CC_id %d dl buffer %d (RB unit %d, MCS %d, TBS %d) \n",
              UE_id, CC_id, UE_list->UE_template[pCCid][UE_id].dl_buffer_total,
235
              nb_rbs_required[CC_id][UE_id],eNB_UE_stats->dlsch_mcs1,TBS);
236

237 238
	N_RB_DL = to_prb(RC.mac[Mod_id]->common_channels[CC_id].mib->message.dl_Bandwidth);

239 240 241 242
        /* calculating required number of RBs for each UE */
        while (TBS < UE_list->UE_template[pCCid][UE_id].dl_buffer_total)  {
          nb_rbs_required[CC_id][UE_id] += min_rb_unit[CC_id];

243
          if (nb_rbs_required[CC_id][UE_id] > N_RB_DL) {
244
            TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1,N_RB_DL);
245
            nb_rbs_required[CC_id][UE_id] = N_RB_DL;
246 247 248
            break;
          }

249
          TBS = get_TBS_DL(eNB_UE_stats->dlsch_mcs1,nb_rbs_required[CC_id][UE_id]);
250 251 252
        } // end of while

        LOG_D(MAC,"[eNB %d] Frame %d: UE %d on CC %d: RB unit %d,  nb_required RB %d (TBS %d, mcs %d)\n",
253
              Mod_id, frameP,UE_id, CC_id,  min_rb_unit[CC_id], nb_rbs_required[CC_id][UE_id], TBS, eNB_UE_stats->dlsch_mcs1);
knopp's avatar
knopp committed
254 255 256 257
      }
    }
  }
}
258 259


knopp's avatar
knopp committed
260
// This function scans all CC_ids for a particular UE to find the maximum round index of its HARQ processes
261

262 263
int maxround(module_id_t Mod_id,uint16_t rnti,int frame,sub_frame_t subframe,uint8_t ul_flag )
{
264

265
  uint8_t round,round_max=0,UE_id;
266
  int CC_id,harq_pid;
267
  UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
268
  COMMON_channels_t *cc;
269

270 271
  for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {

272 273
    cc = &RC.mac[Mod_id]->common_channels[CC_id];

274
    UE_id = find_UE_id(Mod_id,rnti);
275 276 277 278
    if (cc->tdd_Config) harq_pid = ((frame*10)+subframe)%10;
    else harq_pid = ((frame*10)+subframe)&7;

    round    = UE_list->UE_sched_ctrl[UE_id].round[CC_id][harq_pid];
279
    if (round > round_max) {
knopp's avatar
knopp committed
280
      round_max = round;
281
    }
282 283
  }

284
  return round_max;
knopp's avatar
knopp committed
285
}
286

knopp's avatar
knopp committed
287
// This function scans all CC_ids for a particular UE to find the maximum DL CQI
288
// it returns -1 if the UE is not found in PHY layer (get_eNB_UE_stats gives NULL)
289 290
int maxcqi(module_id_t Mod_id,int32_t UE_id)
{
291
  UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
292
  int CC_id,n;
knopp's avatar
knopp committed
293
  int CQI = 0;
294

295
  for (n=0; n<UE_list->numactiveCCs[UE_id]; n++) {
296
    CC_id = UE_list->ordered_CCids[n][UE_id];
297

Cedric Roux's avatar
Cedric Roux committed
298 299
    if (UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id] > CQI) {
      CQI = UE_list->UE_sched_ctrl[UE_id].dl_cqi[CC_id];
300
    }
301
  }
302

Cedric Roux's avatar
Cedric Roux committed
303
  return CQI;
knopp's avatar
knopp committed
304
}
305

306 307 308 309 310
struct sort_ue_dl_params {
  int Mod_idP;
  int frameP;
  int subframeP;
};
311

312 313 314
static int ue_dl_compare(const void *_a, const void *_b, void *_params)
{
  struct sort_ue_dl_params *params = _params;
315
  UE_list_t *UE_list = &RC.mac[params->Mod_idP]->UE_list;
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369

  int UE_id1 = *(const int *)_a;
  int UE_id2 = *(const int *)_b;

  int rnti1  = UE_RNTI(params->Mod_idP, UE_id1);
  int pCC_id1 = UE_PCCID(params->Mod_idP, UE_id1);
  int round1 = maxround(params->Mod_idP, rnti1, params->frameP, params->subframeP, 1);

  int rnti2  = UE_RNTI(params->Mod_idP, UE_id2);
  int pCC_id2 = UE_PCCID(params->Mod_idP, UE_id2);
  int round2 = maxround(params->Mod_idP, rnti2, params->frameP, params->subframeP, 1);

  int cqi1 = maxcqi(params->Mod_idP, UE_id1);
  int cqi2 = maxcqi(params->Mod_idP, UE_id2);

  if (round1 > round2) return -1;
  if (round1 < round2) return 1;

  if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[1] + UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[2] >
      UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[1] + UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[2])
    return -1;
  if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[1] + UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[2] <
      UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[1] + UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[2])
    return 1;

  if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_head_sdu_creation_time_max >
      UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_head_sdu_creation_time_max)
    return -1;
  if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_head_sdu_creation_time_max <
      UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_head_sdu_creation_time_max)
    return 1;

  if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_total >
      UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_total)
    return -1;
  if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_total <
      UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_total)
    return 1;

  if (cqi1 > cqi2) return -1;
  if (cqi1 < cqi2) return 1;

  return 0;
#if 0
  /* The above order derives from the following.  */
      if(round2 > round1) { // Check first if one of the UEs has an active HARQ process which needs service and swap order
        swap_UEs(UE_list,UE_id1,UE_id2,0);
      } else if (round2 == round1) {
        // RK->NN : I guess this is for fairness in the scheduling. This doesn't make sense unless all UEs have the same configuration of logical channels.  This should be done on the sum of all information that has to be sent.  And still it wouldn't ensure fairness.  It should be based on throughput seen by each UE or maybe using the head_sdu_creation_time, i.e. swap UEs if one is waiting longer for service.
        //  for(j=0;j<MAX_NUM_LCID;j++){
        //    if (eNB_mac_inst[Mod_id][pCC_id1].UE_template[UE_id1].dl_buffer_info[j] <
        //      eNB_mac_inst[Mod_id][pCC_id2].UE_template[UE_id2].dl_buffer_info[j]){

        // first check the buffer status for SRB1 and SRB2
370

371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
        if ( (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[1] + UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[2]) <
             (UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[1] + UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[2])   ) {
          swap_UEs(UE_list,UE_id1,UE_id2,0);
        } else if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_head_sdu_creation_time_max <
                   UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_head_sdu_creation_time_max   ) {
          swap_UEs(UE_list,UE_id1,UE_id2,0);
        } else if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_total <
                   UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_total   ) {
          swap_UEs(UE_list,UE_id1,UE_id2,0);
        } else if (cqi1 < cqi2) {
          swap_UEs(UE_list,UE_id1,UE_id2,0);
        }
      }
#endif
}
386

knopp's avatar
knopp committed
387
// This fuction sorts the UE in order their dlsch buffer and CQI
knopp's avatar
knopp committed
388
void sort_UEs (module_id_t Mod_idP,
389 390 391
               int         frameP,
               sub_frame_t subframeP)
{
392 393 394 395 396 397
  int               i;
  int               list[NUMBER_OF_UE_MAX];
  int               list_size = 0;
  int               rnti;
  struct sort_ue_dl_params params = { Mod_idP, frameP, subframeP };

398
  UE_list_t *UE_list = &RC.mac[Mod_idP]->UE_list;
399 400

  for (i = 0; i < NUMBER_OF_UE_MAX; i++) {
401 402 403 404 405

    if (UE_list->active[i]==FALSE) continue;
    if ((rnti = UE_RNTI(Mod_idP, i)) == NOT_A_RNTI) continue;
    if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1) continue;

406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
    list[list_size] = i;
    list_size++;
  }

  qsort_r(list, list_size, sizeof(int), ue_dl_compare, &params);

  if (list_size) {
    for (i = 0; i < list_size-1; i++)
      UE_list->next[list[i]] = list[i+1];
    UE_list->next[list[list_size-1]] = -1;
    UE_list->head = list[0];
  } else {
    UE_list->head = -1;
  }

#if 0
422 423


knopp's avatar
knopp committed
424 425 426
  int               UE_id1,UE_id2;
  int               pCC_id1,pCC_id2;
  int               cqi1,cqi2,round1,round2;
427
  int               i=0,ii=0;//,j=0;
knopp's avatar
knopp committed
428
  rnti_t            rnti1,rnti2;
429

430
  UE_list_t *UE_list = &RC.mac[Mod_idP]->UE_list;
431

432
  for (i=UE_list->head; i>=0; i=UE_list->next[i]) {
433

434
    for(ii=UE_list->next[i]; ii>=0; ii=UE_list->next[ii]) {
435

436 437
      UE_id1  = i;
      rnti1 = UE_RNTI(Mod_idP,UE_id1);
438 439
      if(rnti1 == NOT_A_RNTI)
	continue;
440 441
      if (UE_list->UE_sched_ctrl[UE_id1].ul_out_of_sync == 1)
	continue;
442 443 444
      pCC_id1 = UE_PCCID(Mod_idP,UE_id1);
      cqi1    = maxcqi(Mod_idP,UE_id1); //
      round1  = maxround(Mod_idP,rnti1,frameP,subframeP,0);
445

446 447
      UE_id2 = ii;
      rnti2 = UE_RNTI(Mod_idP,UE_id2);
448 449
      if(rnti2 == NOT_A_RNTI)
        continue;
450 451
      if (UE_list->UE_sched_ctrl[UE_id2].ul_out_of_sync == 1)
	continue;
452
      cqi2    = maxcqi(Mod_idP,UE_id2);
453
      round2  = maxround(Mod_idP,rnti2,frameP,subframeP,0);  //mac_xface->get_ue_active_harq_pid(Mod_id,rnti2,subframe,&harq_pid2,&round2,0);
knopp's avatar
knopp committed
454
      pCC_id2 = UE_PCCID(Mod_idP,UE_id2);
455

456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
      if(round2 > round1) { // Check first if one of the UEs has an active HARQ process which needs service and swap order
        swap_UEs(UE_list,UE_id1,UE_id2,0);
      } else if (round2 == round1) {
        // RK->NN : I guess this is for fairness in the scheduling. This doesn't make sense unless all UEs have the same configuration of logical channels.  This should be done on the sum of all information that has to be sent.  And still it wouldn't ensure fairness.  It should be based on throughput seen by each UE or maybe using the head_sdu_creation_time, i.e. swap UEs if one is waiting longer for service.
        //  for(j=0;j<MAX_NUM_LCID;j++){
        //    if (eNB_mac_inst[Mod_id][pCC_id1].UE_template[UE_id1].dl_buffer_info[j] <
        //      eNB_mac_inst[Mod_id][pCC_id2].UE_template[UE_id2].dl_buffer_info[j]){

        // first check the buffer status for SRB1 and SRB2

        if ( (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[1] + UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_info[2]) <
             (UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[1] + UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_info[2])   ) {
          swap_UEs(UE_list,UE_id1,UE_id2,0);
        } else if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_head_sdu_creation_time_max <
                   UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_head_sdu_creation_time_max   ) {
          swap_UEs(UE_list,UE_id1,UE_id2,0);
        } else if (UE_list->UE_template[pCC_id1][UE_id1].dl_buffer_total <
                   UE_list->UE_template[pCC_id2][UE_id2].dl_buffer_total   ) {
          swap_UEs(UE_list,UE_id1,UE_id2,0);
        } else if (cqi1 < cqi2) {
          swap_UEs(UE_list,UE_id1,UE_id2,0);
        }
knopp's avatar
knopp committed
478 479
      }
    }
480
  }
481
#endif
482 483
}

484

knopp's avatar
knopp committed
485 486


487
// This function assigns pre-available RBS to each UE in specified sub-bands before scheduling is done
knopp's avatar
knopp committed
488
void dlsch_scheduler_pre_processor (module_id_t   Mod_id,
489 490 491 492 493
                                    frame_t       frameP,
                                    sub_frame_t   subframeP,
                                    int           N_RBG[MAX_NUM_CCs],
                                    int           *mbsfn_flag)
{
knopp's avatar
knopp committed
494

495
  unsigned char rballoc_sub[MAX_NUM_CCs][N_RBG_MAX],harq_pid=0,round=0,total_ue_count;
knopp's avatar
knopp committed
496
  unsigned char MIMO_mode_indicator[MAX_NUM_CCs][N_RBG_MAX];
497
  int                     UE_id, i; 
498
  uint16_t                ii,j;
knopp's avatar
knopp committed
499 500 501
  uint16_t                nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX];
  uint16_t                nb_rbs_required_remaining[MAX_NUM_CCs][NUMBER_OF_UE_MAX];
  uint16_t                nb_rbs_required_remaining_1[MAX_NUM_CCs][NUMBER_OF_UE_MAX];
502
  uint16_t                average_rbs_per_user[MAX_NUM_CCs] = {0};
503
  rnti_t             rnti;
knopp's avatar
knopp committed
504
  int                min_rb_unit[MAX_NUM_CCs];
505
  uint16_t r1=0;
506
  uint8_t CC_id;
507
  UE_list_t *UE_list = &RC.mac[Mod_id]->UE_list;
508

509
  int N_RB_DL;
510
  int transmission_mode = 0;
511 512
  UE_sched_ctrl *ue_sched_ctl;
  //  int rrc_status           = RRC_IDLE;
513
  COMMON_channels_t *cc;
514 515

#ifdef TM5
516
  int harq_pid1=0;
517 518 519 520 521 522 523 524
  int round1=0,round2=0;
  int UE_id2;
  uint16_t                i1,i2,i3;
  rnti_t             rnti1,rnti2;
  LTE_eNB_UE_stats  *eNB_UE_stats1 = NULL;
  LTE_eNB_UE_stats  *eNB_UE_stats2 = NULL;
  UE_sched_ctrl *ue_sched_ctl1,*ue_sched_ctl2;
#endif
525 526

  for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
knopp's avatar
knopp committed
527 528 529

    if (mbsfn_flag[CC_id]>0)  // If this CC is allocated for MBSFN skip it here
      continue;
530 531


knopp's avatar
knopp committed
532

533
    min_rb_unit[CC_id]=get_min_rb_unit(Mod_id,CC_id);
534

535 536 537
    for (i = 0; i < NUMBER_OF_UE_MAX; i++) {
      if (UE_list->active[i] != TRUE) continue;

knopp's avatar
knopp committed
538
      UE_id = i;
539
      // Initialize scheduling information for all active UEs
540 541
      

542

543
      dlsch_scheduler_pre_processor_reset(Mod_id,
544 545
        UE_id,
        CC_id,
546 547
        frameP,
        subframeP,
548 549 550 551 552
        N_RBG[CC_id],
        nb_rbs_required,
        nb_rbs_required_remaining,
        rballoc_sub,
        MIMO_mode_indicator);
553

knopp's avatar
knopp committed
554
    }
555
  }
knopp's avatar
knopp committed
556 557


558
  // Store the DLSCH buffer for each logical channel
559
  store_dlsch_buffer (Mod_id,frameP,subframeP);
560

knopp's avatar
knopp committed
561 562


563
  // Calculate the number of RBs required by each UE on the basis of logical channel's buffer
knopp's avatar
knopp committed
564
  assign_rbs_required (Mod_id,frameP,subframeP,nb_rbs_required,min_rb_unit);
565

knopp's avatar
knopp committed
566 567


568
  // Sorts the user on the basis of dlsch logical channel buffer and CQI
knopp's avatar
knopp committed
569 570 571
  sort_UEs (Mod_id,frameP,subframeP);


knopp's avatar
knopp committed
572

573
  total_ue_count =0;
574

knopp's avatar
knopp committed
575
  // loop over all active UEs
576
  for (i=UE_list->head; i>=0; i=UE_list->next[i]) {
577
    rnti = UE_RNTI(Mod_id,i);
578

579
    if(rnti == NOT_A_RNTI)
knopp's avatar
knopp committed
580
      continue;
581 582
    if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1)
      continue;
583
    UE_id = i;
584

585
    for (ii=0; ii<UE_num_active_CC(UE_list,UE_id); ii++) {
knopp's avatar
knopp committed
586
      CC_id = UE_list->ordered_CCids[ii][UE_id];
587
      ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
588 589 590 591
      cc=&RC.mac[Mod_id]->common_channels[ii];
      if (cc->tdd_Config) harq_pid = ((frameP*10)+subframeP)%10;
      else harq_pid = ((frameP*10)+subframeP)&7;
      round    = ue_sched_ctl->round[CC_id][harq_pid];
592

knopp's avatar
knopp committed
593 594
      average_rbs_per_user[CC_id]=0;

595

596
      if(round != 8) {
597
        nb_rbs_required[CC_id][UE_id] = UE_list->UE_template[CC_id][UE_id].nb_rb[harq_pid];
598
      }
599

knopp's avatar
knopp committed
600 601
      //nb_rbs_required_remaining[UE_id] = nb_rbs_required[UE_id];
      if (nb_rbs_required[CC_id][UE_id] > 0) {
602
        total_ue_count = total_ue_count + 1;
knopp's avatar
knopp committed
603
      }
604 605


606
      // hypothetical assignment
607 608 609 610 611 612 613 614 615
      /*
       * If schedule is enabled and if the priority of the UEs is modified
       * The average rbs per logical channel per user will depend on the level of
       * priority. Concerning the hypothetical assignement, we should assign more
       * rbs to prioritized users. Maybe, we can do a mapping between the
       * average rbs per user and the level of priority or multiply the average rbs
       * per user by a coefficient which represents the degree of priority.
       */

616 617
      N_RB_DL = to_prb(RC.mac[Mod_id]->common_channels[CC_id].mib->message.dl_Bandwidth);

618
      if (total_ue_count == 0) {
619
        average_rbs_per_user[CC_id] = 0;
620 621
      } else if( (min_rb_unit[CC_id] * total_ue_count) <= (N_RB_DL) ) {
        average_rbs_per_user[CC_id] = (uint16_t) floor(N_RB_DL/total_ue_count);
622
      } else {
623
        average_rbs_per_user[CC_id] = min_rb_unit[CC_id]; // consider the total number of use that can be scheduled UE
624
      }
knopp's avatar
knopp committed
625 626
    }
  }
627

628 629
  // note: nb_rbs_required is assigned according to total_buffer_dl
  // extend nb_rbs_required to capture per LCID RB required
630
  for(i=UE_list->head; i>=0; i=UE_list->next[i]) {
631
    rnti = UE_RNTI(Mod_id,i);
632

633 634 635 636 637
    if(rnti == NOT_A_RNTI)
      continue;
    if (UE_list->UE_sched_ctrl[i].ul_out_of_sync == 1)
      continue;

638
    for (ii=0; ii<UE_num_active_CC(UE_list,i); ii++) {
639
      CC_id = UE_list->ordered_CCids[ii][i];
Cedric Roux's avatar
Cedric Roux committed
640
      ue_sched_ctl = &UE_list->UE_sched_ctrl[i];
641
      round    = ue_sched_ctl->round[CC_id][harq_pid];
642

Cedric Roux's avatar
Cedric Roux committed
643 644 645
      // control channel or retransmission
      /* TODO: do we have to check for retransmission? */
      if (mac_eNB_get_rrc_status(Mod_id,rnti) < RRC_RECONFIGURED || round > 0) {
646
        nb_rbs_required_remaining_1[CC_id][i] = nb_rbs_required[CC_id][i];
647
      } else {
648 649
        nb_rbs_required_remaining_1[CC_id][i] = cmin(average_rbs_per_user[CC_id],nb_rbs_required[CC_id][i]);

650
      }
knopp's avatar
knopp committed
651
    }
652
  }
653

654
  //Allocation to UEs is done in 2 rounds,
655 656
  // 1st stage: average number of RBs allocated to each UE
  // 2nd stage: remaining RBs are allocated to high priority UEs
657 658 659 660 661 662
  for(r1=0; r1<2; r1++) {

    for(i=UE_list->head; i>=0; i=UE_list->next[i]) {
      for (ii=0; ii<UE_num_active_CC(UE_list,i); ii++) {
        CC_id = UE_list->ordered_CCids[ii][i];

663
        if(r1 == 0) {
664
          nb_rbs_required_remaining[CC_id][i] = nb_rbs_required_remaining_1[CC_id][i];
665
        } else { // rb required based only on the buffer - rb allloctaed in the 1st round + extra reaming rb form the 1st round
666
          nb_rbs_required_remaining[CC_id][i] = nb_rbs_required[CC_id][i]-nb_rbs_required_remaining_1[CC_id][i]+nb_rbs_required_remaining[CC_id][i];
Cedric Roux's avatar
Cedric Roux committed
667
if (nb_rbs_required_remaining[CC_id][i]<0) abort();
668
        }
669 670 671 672 673 674 675

        if (nb_rbs_required[CC_id][i]> 0 )
          LOG_D(MAC,"round %d : nb_rbs_required_remaining[%d][%d]= %d (remaining_1 %d, required %d,  pre_nb_available_rbs %d, N_RBG %d, rb_unit %d)\n",
                r1, CC_id, i,
                nb_rbs_required_remaining[CC_id][i],
                nb_rbs_required_remaining_1[CC_id][i],
                nb_rbs_required[CC_id][i],
676
                UE_list->UE_sched_ctrl[i].pre_nb_available_rbs[CC_id],
677 678 679
                N_RBG[CC_id],
                min_rb_unit[CC_id]);

680
      }
knopp's avatar
knopp committed
681
    }
682

683
    if (total_ue_count > 0 ) {
684 685 686 687 688
      for(i=UE_list->head; i>=0; i=UE_list->next[i]) {
        UE_id = i;

        for (ii=0; ii<UE_num_active_CC(UE_list,UE_id); ii++) {
          CC_id = UE_list->ordered_CCids[ii][UE_id];
689
	  ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
690
	  round    = ue_sched_ctl->round[CC_id][harq_pid];
691 692 693 694

          rnti = UE_RNTI(Mod_id,UE_id);

          // LOG_D(MAC,"UE %d rnti 0x\n", UE_id, rnti );
695
          if(rnti == NOT_A_RNTI)
696
            continue;
697 698
	  if (UE_list->UE_sched_ctrl[UE_id].ul_out_of_sync == 1)
	    continue;
699

700
          transmission_mode = get_tmode(Mod_id,CC_id,UE_id);
701 702
	  //          mac_xface->get_ue_active_harq_pid(Mod_id,CC_id,rnti,frameP,subframeP,&harq_pid,&round,0);
          //rrc_status = mac_eNB_get_rrc_status(Mod_id,rnti);
703 704 705 706 707 708 709 710 711 712 713 714
          /* 1st allocate for the retx */

          // retransmission in data channels
          // control channel in the 1st transmission
          // data channel for all TM
          LOG_T(MAC,"calling dlsch_scheduler_pre_processor_allocate .. \n ");
          dlsch_scheduler_pre_processor_allocate (Mod_id,
                                                  UE_id,
                                                  CC_id,
                                                  N_RBG[CC_id],
                                                  transmission_mode,
                                                  min_rb_unit[CC_id],
715
                                                  to_prb(RC.mac[Mod_id]->common_channels[CC_id].mib->message.dl_Bandwidth),
716 717 718 719 720
                                                  nb_rbs_required,
                                                  nb_rbs_required_remaining,
                                                  rballoc_sub,
                                                  MIMO_mode_indicator);

721
#ifdef TM5
722 723 724 725

          // data chanel TM5: to be revisted
          if ((round == 0 )  &&
              (transmission_mode == 5)  &&
726
              (ue_sched_ctl->dl_pow_off[CC_id] != 1)) {
727 728 729

            for(j=0; j<N_RBG[CC_id]; j+=2) {

730 731
              if( (((j == (N_RBG[CC_id]-1))&& (rballoc_sub[CC_id][j] == 0) && (ue_sched_ctl->rballoc_sub_UE[CC_id][j] == 0))  ||
                   ((j < (N_RBG[CC_id]-1)) && (rballoc_sub[CC_id][j+1] == 0) && (ue_sched_ctl->rballoc_sub_UE[CC_id][j+1] == 0)) ) &&
732 733 734 735 736 737
                  (nb_rbs_required_remaining[CC_id][UE_id]>0)) {

                for (ii = UE_list->next[i+1]; ii >=0; ii=UE_list->next[ii]) {

                  UE_id2 = ii;
                  rnti2 = UE_RNTI(Mod_id,UE_id2);
738 739
		  ue_sched_ctl2 = &UE_list->UE_sched_ctrl[UE_id2];
		  round2    = ue_sched_ctl2->round[CC_id];
740
                  if(rnti2 == NOT_A_RNTI)
741
                    continue;
742 743
		  if (UE_list->UE_sched_ctrl[UE_id2].ul_out_of_sync == 1)
		    continue;
744

745
                  eNB_UE_stats2 = UE_list->eNB_UE_stats[CC_id][UE_id2];
746
                  //mac_xface->get_ue_active_harq_pid(Mod_id,CC_id,rnti2,frameP,subframeP,&harq_pid2,&round2,0);
747

748
                  if ((mac_eNB_get_rrc_status(Mod_id,rnti2) >= RRC_RECONFIGURED) &&
749
                      (round2==0) &&
750
                      (get_tmode(Mod_id,CC_id,UE_id2)==5) &&
751
                      (ue_sched_ctl->dl_pow_off[CC_id] != 1)) {
752

753 754
                    if( (((j == (N_RBG[CC_id]-1)) && (ue_sched_ctl->rballoc_sub_UE[CC_id][j] == 0)) ||
                         ((j < (N_RBG[CC_id]-1)) && (ue_sched_ctl->rballoc_sub_UE[CC_id][j+1] == 0))  ) &&
755 756 757 758 759
                        (nb_rbs_required_remaining[CC_id][UE_id2]>0)) {

                      if((((eNB_UE_stats2->DL_pmi_single^eNB_UE_stats1->DL_pmi_single)<<(14-j))&0xc000)== 0x4000) { //MU-MIMO only for 25 RBs configuration

                        rballoc_sub[CC_id][j] = 1;
760 761
                        ue_sched_ctl->rballoc_sub_UE[CC_id][j] = 1;
                        ue_sched_ctl2->rballoc_sub_UE[CC_id][j] = 1;
762 763 764 765
                        MIMO_mode_indicator[CC_id][j] = 0;

                        if (j< N_RBG[CC_id]-1) {
                          rballoc_sub[CC_id][j+1] = 1;
766 767
                          ue_sched_ctl->rballoc_sub_UE[CC_id][j+1] = 1;
                          ue_sched_ctl2->rballoc_sub_UE[CC_id][j+1] = 1;
768 769 770
                          MIMO_mode_indicator[CC_id][j+1] = 0;
                        }

771 772
                        ue_sched_ctl->dl_pow_off[CC_id] = 0;
                        ue_sched_ctl2->dl_pow_off[CC_id] = 0;
773 774 775


                        if ((j == N_RBG[CC_id]-1) &&
776 777
                            ((N_RB_DL == 25) ||
                             (N_RB_DL == 50))) {
778
			  
779
                          nb_rbs_required_remaining[CC_id][UE_id] = nb_rbs_required_remaining[CC_id][UE_id] - min_rb_unit[CC_id]+1;
780
                          ue_sched_ctl->pre_nb_available_rbs[CC_id] = ue_sched_ctl->pre_nb_available_rbs[CC_id] + min_rb_unit[CC_id]-1;
781
                          nb_rbs_required_remaining[CC_id][UE_id2] = nb_rbs_required_remaining[CC_id][UE_id2] - min_rb_unit[CC_id]+1;
782
                          ue_sched_ctl2->pre_nb_available_rbs[CC_id] = ue_sched_ctl2->pre_nb_available_rbs[CC_id] + min_rb_unit[CC_id]-1;
783
                        } else {
784 785
                          
			  nb_rbs_required_remaining[CC_id][UE_id] = nb_rbs_required_remaining[CC_id][UE_id] - 4;
786
                          ue_sched_ctl->pre_nb_available_rbs[CC_id] = ue_sched_ctl->pre_nb_available_rbs[CC_id] + 4;
787
                          nb_rbs_required_remaining[CC_id][UE_id2] = nb_rbs_required_remaining[CC_id][UE_id2] - 4;
788
                          ue_sched_ctl2->pre_nb_available_rbs[CC_id] = ue_sched_ctl2->pre_nb_available_rbs[CC_id] + 4;
789 790 791 792 793 794 795 796 797 798 799 800 801
                        }

                        break;
                      }
                    }
                  }
                }
              }
            }
          }

#endif
        }
knopp's avatar
knopp committed
802
      }
803
    } // total_ue_count
804
  } // end of for for r1 and r2
805 806 807

#ifdef TM5

knopp's avatar
knopp committed
808
  // This has to be revisited!!!!
809
  for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
knopp's avatar
knopp committed
810 811 812
    i1=0;
    i2=0;
    i3=0;
813 814

    for (j=0; j<N_RBG[CC_id]; j++) {
815
      if(MIMO_mode_indicator[CC_id][j] == 2) {
816
        i1 = i1+1;
817
      } else if(MIMO_mode_indicator[CC_id][j] == 1) {
818
        i2 = i2+1;
819
      } else if(MIMO_mode_indicator[CC_id][j] == 0) {
820
        i3 = i3+1;
821
      }
knopp's avatar
knopp committed
822
    }
823

824
    if((i1 < N_RBG[CC_id]) && (i2>0) && (i3==0)) {
knopp's avatar
knopp committed
825
      PHY_vars_eNB_g[Mod_id][CC_id]->check_for_SUMIMO_transmissions = PHY_vars_eNB_g[Mod_id][CC_id]->check_for_SUMIMO_transmissions + 1;
826
    }
827

828
    if(i3 == N_RBG[CC_id] && i1==0 && i2==0) {
knopp's avatar
knopp committed
829
      PHY_vars_eNB_g[Mod_id][CC_id]->FULL_MUMIMO_transmissions = PHY_vars_eNB_g[Mod_id][CC_id]->FULL_MUMIMO_transmissions + 1;
830
    }
831

832
    if((i1 < N_RBG[CC_id]) && (i3 > 0)) {
knopp's avatar
knopp committed
833
      PHY_vars_eNB_g[Mod_id][CC_id]->check_for_MUMIMO_transmissions = PHY_vars_eNB_g[Mod_id][CC_id]->check_for_MUMIMO_transmissions + 1;
834
    }
835

knopp's avatar
knopp committed
836
    PHY_vars_eNB_g[Mod_id][CC_id]->check_for_total_transmissions = PHY_vars_eNB_g[Mod_id][CC_id]->check_for_total_transmissions + 1;
837

838 839
  }

840 841 842
#endif

  for(i=UE_list->head; i>=0; i=UE_list->next[i]) {
843
    UE_id = i;
844
    ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
845 846

    for (ii=0; ii<UE_num_active_CC(UE_list,UE_id); ii++) {
knopp's avatar
knopp committed
847
      CC_id = UE_list->ordered_CCids[ii][UE_id];
848
      //PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].dl_pow_off = dl_pow_off[UE_id];
849

850
      if (ue_sched_ctl->pre_nb_available_rbs[CC_id] > 0 ) {
851
        LOG_D(MAC,"******************DL Scheduling Information for UE%d ************************\n",UE_id);
852
        LOG_D(MAC,"dl power offset UE%d = %d \n",UE_id,ue_sched_ctl->dl_pow_off[CC_id]);
853 854 855 856
        LOG_D(MAC,"***********RB Alloc for every subband for UE%d ***********\n",UE_id);

        for(j=0; j<N_RBG[CC_id]; j++) {
          //PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].rballoc_sub[i] = rballoc_sub_UE[CC_id][UE_id][i];
857
          LOG_D(MAC,"RB Alloc for UE%d and Subband%d = %d\n",UE_id,j,ue_sched_ctl->rballoc_sub_UE[CC_id][j]);
858 859 860
        }

        //PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].pre_nb_available_rbs = pre_nb_available_rbs[CC_id][UE_id];
861
        LOG_D(MAC,"Total RBs allocated for UE%d = %d\n",UE_id,ue_sched_ctl->pre_nb_available_rbs[CC_id]);
862
      }
knopp's avatar
knopp committed
863
    }
864 865 866
  }
}

Cedric Roux's avatar
Cedric Roux committed
867
#define SF0_LIMIT 1
868

869
void dlsch_scheduler_pre_processor_reset (int module_idP,
870 871 872 873 874 875 876 877 878 879
					  int UE_id,
					  uint8_t  CC_id,
					  int frameP,
					  int subframeP,					  
					  int N_RBG,
					  uint16_t nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
					  uint16_t nb_rbs_required_remaining[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
					  unsigned char rballoc_sub[MAX_NUM_CCs][N_RBG_MAX],
					  unsigned char MIMO_mode_indicator[MAX_NUM_CCs][N_RBG_MAX])
  
880
{
881
  int i,j;
882
  UE_list_t *UE_list=&RC.mac[module_idP]->UE_list;
883 884
  UE_sched_ctrl *ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
  rnti_t rnti = UE_RNTI(module_idP,UE_id);
885

886
  uint8_t *vrb_map = RC.mac[module_idP]->common_channels[CC_id].vrb_map;
887
  int N_RB_DL = to_prb(RC.mac[module_idP]->common_channels[CC_id].mib->message.dl_Bandwidth);
888
  int RBGsize = N_RB_DL/N_RBG,RBGsize_last;
Cedric Roux's avatar
Cedric Roux committed
889 890
#ifdef SF0_LIMIT
  int sf0_upper=-1,sf0_lower=-1;
891
#endif
892 893


894
  //LOG_D(MAC,"Running preprocessor for UE %d (%x)\n",UE_id,rnti);
895
  // initialize harq_pid and round
896

Cedric Roux's avatar
Cedric Roux committed
897 898
  if (ue_sched_ctl->ta_timer) ue_sched_ctl->ta_timer--;

899 900 901
  /*
  eNB_UE_stats *eNB_UE_stats;

902 903 904
  if (eNB_UE_stats == NULL)
    return;

905
  
906 907 908 909
  mac_xface->get_ue_active_harq_pid(module_idP,CC_id,rnti,
				    frameP,subframeP,
				    &ue_sched_ctl->harq_pid[CC_id],
				    &ue_sched_ctl->round[CC_id],
910
				    openair_harq_DL);
911 912
  

913
  if (ue_sched_ctl->ta_timer == 0) {
914

915 916 917
    // WE SHOULD PROTECT the eNB_UE_stats with a mutex here ...

    ue_sched_ctl->ta_timer = 20;  // wait 20 subframes before taking TA measurement from PHY
918
    switch (N_RB_DL) {
919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939
    case 6:
      ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update;
      break;
      
    case 15:
      ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/2;
      break;
      
    case 25:
      ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/4;
      break;
      
    case 50:
      ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/8;
      break;
      
    case 75:
      ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/12;
      break;
      
    case 100:
940
	ue_sched_ctl->ta_update = eNB_UE_stats->timing_advance_update/16;
941 942 943 944 945 946 947 948 949
      break;
    }
    // clear the update in case PHY does not have a new measurement after timer expiry
    eNB_UE_stats->timing_advance_update =  0;
  }
  else {
    ue_sched_ctl->ta_timer--;
    ue_sched_ctl->ta_update =0; // don't trigger a timing advance command
  }
950 951
  

952 953 954
  if (UE_id==0) {
    VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_UE0_TIMING_ADVANCE,ue_sched_ctl->ta_update);
  }
955 956
  */

957
  nb_rbs_required[CC_id][UE_id]=0;
958 959
  ue_sched_ctl->pre_nb_available_rbs[CC_id] = 0;
  ue_sched_ctl->dl_pow_off[CC_id] = 2;
960
  nb_rbs_required_remaining[CC_id][UE_id] = 0;
961 962
  
  switch (N_RB_DL) {
Cedric Roux's avatar
Cedric Roux committed
963 964 965 966 967 968
  case 6:   RBGsize = 1; RBGsize_last = 1; break;
  case 15:  RBGsize = 2; RBGsize_last = 1; break;
  case 25:  RBGsize = 2; RBGsize_last = 1; break;
  case 50:  RBGsize = 3; RBGsize_last = 2; break;
  case 75:  RBGsize = 4; RBGsize_last = 3; break;
  case 100: RBGsize = 4; RBGsize_last = 4; break;
969
  default: AssertFatal(1==0,"unsupported RBs (%d)\n", N_RB_DL);
Cedric Roux's avatar
Cedric Roux committed
970
  }
971
  
Cedric Roux's avatar
Cedric Roux committed
972
#ifdef SF0_LIMIT
973 974
  switch (N_RBG) {
  case 6:
Cedric Roux's avatar
Cedric Roux committed
975 976
    sf0_lower=0;
    sf0_upper=5;
977 978
    break;
  case 8:
Cedric Roux's avatar
Cedric Roux committed
979 980
    sf0_lower=2;
    sf0_upper=5;
981 982
    break;
  case 13:
Cedric Roux's avatar
Cedric Roux committed
983 984
    sf0_lower=4;
    sf0_upper=7;
985 986
    break;
  case 17:
Cedric Roux's avatar
Cedric Roux committed
987 988
    sf0_lower=7;
    sf0_upper=9;
989 990
    break;
  case 25:
Cedric Roux's avatar
Cedric Roux committed
991 992
    sf0_lower=11;
    sf0_upper=13;
993
    break;
994
  default: AssertFatal(1==0,"unsupported RBs (%d)\n", N_RB_DL);
995 996
  }
#endif
997
  // Initialize Subbands according to VRB map
998
  for (i=0; i<N_RBG; i++) {
Cedric Roux's avatar
Cedric Roux committed
999 1000
    int rb_size = i==N_RBG-1 ? RBGsize_last : RBGsize;

1001
    ue_sched_ctl->rballoc_sub_UE[CC_id][i] = 0;
1002
    rballoc_sub[CC_id][i] = 0;
Cedric Roux's avatar
Cedric Roux committed
1003 1004 1005 1006 1007 1008 1009 1010
#ifdef SF0_LIMIT
    // for avoiding 6+ PRBs around DC in subframe 0 (avoid excessive errors)
    /* TODO: make it proper - allocate those RBs, do not "protect" them, but
     * compute number of available REs and limit MCS according to the
     * TBS table 36.213 7.1.7.2.1-1 (can be done after pre-processor)
     */
    if (subframeP==0 &&
	i >= sf0_lower && i <= sf0_upper)
1011 1012 1013
      rballoc_sub[CC_id][i]=1;
#endif
    // for SI-RNTI,RA-RNTI and P-RNTI allocations
Cedric Roux's avatar
Cedric Roux committed
1014 1015
    for (j = 0; j < rb_size; j++) {
      if (vrb_map[j+(i*RBGsize)] != 0)  {
1016
	rballoc_sub[CC_id][i] = 1;
1017
	//LOG_D(MAC,"Frame %d, subframe %d : vrb %d allocated\n",frameP,subframeP,j+(i*RBGsize));
1018 1019 1020
	break;
      }
    }
1021
    //LOG_D(MAC,"Frame %d Subframe %d CC_id %d RBG %i : rb_alloc %d\n",frameP,subframeP,CC_id,i,rballoc_sub[CC_id][i]);
1022
    MIMO_mode_indicator[CC_id][i] = 2;
1023 1024 1025 1026 1027
  }
}


void dlsch_scheduler_pre_processor_allocate (module_id_t   Mod_id,
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039
    int           UE_id,
    uint8_t       CC_id,
    int           N_RBG,
    int           transmission_mode,
    int           min_rb_unit,
    uint8_t       N_RB_DL,
    uint16_t      nb_rbs_required[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
    uint16_t      nb_rbs_required_remaining[MAX_NUM_CCs][NUMBER_OF_UE_MAX],
    unsigned char rballoc_sub[MAX_NUM_CCs][N_RBG_MAX],
    unsigned char MIMO_mode_indicator[MAX_NUM_CCs][N_RBG_MAX])
{

1040
  int i;
1041
  UE_list_t *UE_list=&RC.mac[Mod_id]->UE_list;
1042
  UE_sched_ctrl *ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
1043 1044 1045 1046

  for(i=0; i<N_RBG; i++) {

    if((rballoc_sub[CC_id][i] == 0)           &&
1047
        (ue_sched_ctl->rballoc_sub_UE[CC_id][i] == 0) &&
1048
        (nb_rbs_required_remaining[CC_id][UE_id]>0)   &&
1049
        (ue_sched_ctl->pre_nb_available_rbs[CC_id] < nb_rbs_required[CC_id][UE_id])) {
1050

1051
      // if this UE is not scheduled for TM5
1052
      if (ue_sched_ctl->dl_pow_off[CC_id] != 0 )  {
1053

1054
	if ((i == N_RBG-1) && ((N_RB_DL == 25) || (N_RB_DL == 50))) {
Cedric Roux's avatar
Cedric Roux committed
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
	  if (nb_rbs_required_remaining[CC_id][UE_id] >=  min_rb_unit-1){
            rballoc_sub[CC_id][i] = 1;
            ue_sched_ctl->rballoc_sub_UE[CC_id][i] = 1;
            MIMO_mode_indicator[CC_id][i] = 1;
            if (transmission_mode == 5 ) {
              ue_sched_ctl->dl_pow_off[CC_id] = 1;
            }
            nb_rbs_required_remaining[CC_id][UE_id] = nb_rbs_required_remaining[CC_id][UE_id] - min_rb_unit+1;
            ue_sched_ctl->pre_nb_available_rbs[CC_id] = ue_sched_ctl->pre_nb_available_rbs[CC_id] + min_rb_unit - 1;
          }
1065
        } else {
1066 1067
	  if (nb_rbs_required_remaining[CC_id][UE_id] >=  min_rb_unit){
	    rballoc_sub[CC_id][i] = 1;
1068
	    ue_sched_ctl->rballoc_sub_UE[CC_id][i] = 1;
1069 1070
	    MIMO_mode_indicator[CC_id][i] = 1;
	    if (transmission_mode == 5 ) {
1071
	      ue_sched_ctl->dl_pow_off[CC_id] = 1;
1072 1073
	    }
	    nb_rbs_required_remaining[CC_id][UE_id] = nb_rbs_required_remaining[CC_id][UE_id] - min_rb_unit;
1074
	    ue_sched_ctl->pre_nb_available_rbs[CC_id] = ue_sched_ctl->pre_nb_available_rbs[CC_id] + min_rb_unit;
1075 1076
	  }
	}
1077 1078
      } // dl_pow_off[CC_id][UE_id] ! = 0
    }
1079
  }
1080 1081 1082
}


1083
/// ULSCH PRE_PROCESSOR
1084

1085

1086
void ulsch_scheduler_pre_processor(module_id_t module_idP,
1087 1088
                                   int frameP,
                                   sub_frame_t subframeP,
1089
                                   uint16_t *first_rb)
1090
{
1091 1092 1093

  int16_t            i;
  uint16_t           UE_id,n,r;
1094
  uint8_t            CC_id, harq_pid;
1095 1096
  uint16_t           nb_allocated_rbs[MAX_NUM_CCs][NUMBER_OF_UE_MAX],total_allocated_rbs[MAX_NUM_CCs],average_rbs_per_user[MAX_NUM_CCs];
  int16_t            total_remaining_rbs[MAX_NUM_CCs];
1097 1098 1099 1100 1101
  uint16_t           max_num_ue_to_be_scheduled = 0;
  uint16_t           total_ue_count             = 0;
  rnti_t             rnti                       = -1;
  UE_list_t          *UE_list                   = &RC.mac[module_idP]->UE_list;
  UE_TEMPLATE        *UE_template               = 0;
1102 1103
  int                N_RB_DL;
  int                N_RB_UL;
1104
  //LOG_D(MAC,"In ulsch_preprocessor: assign max mcs min rb\n");
1105 1106
  // maximize MCS and then allocate required RB according to the buffer occupancy with the limit of max available UL RB
  assign_max_mcs_min_rb(module_idP,frameP, subframeP, first_rb);
1107

1108
  //LOG_D(MAC,"In ulsch_preprocessor: sort ue \n");
1109
  // sort ues
1110 1111
  sort_ue_ul (module_idP,frameP, subframeP);

1112

1113 1114
  // we need to distribute RBs among UEs
  // step1:  reset the vars
1115
  for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
1116 1117 1118 1119 1120
    N_RB_DL                     = to_prb(RC.mac[module_idP]->common_channels[CC_id].mib->message.dl_Bandwidth);
    N_RB_UL                     = to_prb(RC.mac[module_idP]->common_channels[CC_id].ul_Bandwidth);
    total_allocated_rbs[CC_id]  = 0;
    total_remaining_rbs[CC_id]  = 0;
    average_rbs_per_user[CC_id] = 0;
gauthier's avatar