Newer
Older
if ((ce_ptr-mac_header_control_elements) > 0) {
memcpy((void*)mac_header_ptr,mac_header_control_elements,ce_ptr-mac_header_control_elements);
mac_header_ptr+=(unsigned char)(ce_ptr-mac_header_control_elements);
}
#ifdef DEBUG_HEADER_PARSING
LOG_T(MAC," [UE %d] header : ", crnti);
for (i=0;i<((unsigned char*)mac_header_ptr - mac_header);i++)
LOG_T(MAC,"%2x.",mac_header[i]);
LOG_T(MAC,"\n");
#endif
return((unsigned char*)mac_header_ptr - mac_header);
}
void ue_get_sdu(module_id_t module_idP,frame_t frameP,sub_frame_t subframe, u8 eNB_index,u8 *ulsch_buffer,u16 buflen, u8 *access_mode) {
mac_rlc_status_resp_t rlc_status;
u8 dcch_header_len=0,dcch1_header_len=0,dtch_header_len=0;
u8 dcch_header_len_tmp=0, dtch_header_len_tmp=0;
u8 bsr_header_len=0, bsr_ce_len=0, bsr_len=0;
u8 phr_header_len=0, phr_ce_len=0,phr_len=0;
u16 sdu_lengths[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
u8 sdu_lcids[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
u8 payload_offset=0,num_sdus=0;
u8 ulsch_buff[MAX_ULSCH_PAYLOAD_BYTES];
u16 sdu_length_total=0;
BSR_SHORT bsr_short;
BSR_LONG bsr_long;
BSR_SHORT *bsr_s=&bsr_short;
BSR_LONG *bsr_l=&bsr_long;
POWER_HEADROOM_CMD phr;
POWER_HEADROOM_CMD *phr_p=&phr;
unsigned short short_padding=0, post_padding=0;
int j; // used for padding
// Compute header length
vcd_signal_dumper_dump_function_by_name(VCD_SIGNAL_DUMPER_FUNCTIONS_UE_GET_SDU, VCD_FUNCTION_IN);
if (*access_mode==CBA_ACCESS){
LOG_D(MAC,"[UE %d] frameP %d subframe %d try CBA transmission\n",
module_idP, frameP, subframe);
//if (UE_mac_inst[module_idP].scheduling_info.LCID_status[DTCH] == LCID_EMPTY)
if (use_cba_access(module_idP,frameP,subframe,eNB_index)==0){
*access_mode=POSTPONED_ACCESS;
vcd_signal_dumper_dump_function_by_name(VCD_SIGNAL_DUMPER_FUNCTIONS_UE_GET_SDU, VCD_FUNCTION_OUT);
return;
}
LOG_D(MAC,"[UE %d] frameP %d subframe %d CBA transmission oppurtunity, tbs %d\n",
module_idP, frameP, subframe,buflen);
}
#endif
dcch_header_len=2;//sizeof(SCH_SUBHEADER_SHORT);
dcch1_header_len=2;//sizeof(SCH_SUBHEADER_SHORT);
// hypo length,in case of long header skip the padding byte
dtch_header_len=(buflen > 128 ) ? 3 : 2 ; //sizeof(SCH_SUBHEADER_LONG)-1 : sizeof(SCH_SUBHEADER_SHORT);
bsr_header_len = 1;//sizeof(SCH_SUBHEADER_FIXED);
phr_header_len = 1;//sizeof(SCH_SUBHEADER_FIXED);
phr_ce_len = (UE_mac_inst[module_idP].PHR_reporting_active == 1) ? 1 /* sizeof(POWER_HEADROOM_CMD)*/: 0;
if (phr_ce_len > 0){
phr_len = phr_ce_len + phr_header_len;
LOG_D(MAC,"[UE %d] header size info: PHR len %d (ce%d,hdr%d) buff_len %d\n",
module_idP, phr_len, phr_ce_len, phr_header_len, buflen);
}else
phr_len=0;
bsr_ce_len = get_bsr_len (module_idP, buflen-phr_len);
if (bsr_ce_len > 0 ){
bsr_len = bsr_ce_len + bsr_header_len;
LOG_D(MAC,"[UE %d] header size info: dcch %d, dcch1 %d, dtch %d, bsr (ce%d,hdr%d) buff_len %d\n",
module_idP, dcch_header_len,dcch1_header_len,dtch_header_len, bsr_ce_len, bsr_header_len, buflen);
} else
bsr_len = 0;
// check for UL bandwidth requests and add SR control element
// Check for DCCH first
sdu_lengths[0]=0;
if (UE_mac_inst[module_idP].scheduling_info.LCID_status[DCCH] == LCID_NOT_EMPTY) {
rlc_status = mac_rlc_status_ind(0, module_idP,frameP,ENB_FLAG_NO,MBMS_FLAG_NO,
DCCH,
(buflen-dcch_header_len-bsr_len-phr_len));
LOG_D(MAC, "[UE %d] Frame %d : UL-DCCH -> ULSCH, RRC message has %d bytes to "
"send (Transport Block size %d, mac header len %d)\n",
module_idP,frameP, rlc_status.bytes_in_buffer,buflen,dcch_header_len);
sdu_lengths[0] += mac_rlc_data_req(0, module_idP,frameP,ENB_FLAG_NO, MBMS_FLAG_NO,
DCCH,
(char *)&ulsch_buff[sdu_lengths[0]]);
sdu_length_total += sdu_lengths[0];
sdu_lcids[0] = DCCH;
LOG_D(MAC,"[UE %d] TX Got %d bytes for DCCH\n",module_idP,sdu_lengths[0]);
num_sdus = 1;
update_bsr(module_idP, frameP, DCCH, UE_mac_inst[module_idP].scheduling_info.LCGID[DCCH]);
}
else {
}
// DCCH1
if (UE_mac_inst[module_idP].scheduling_info.LCID_status[DCCH1] == LCID_NOT_EMPTY) {
rlc_status = mac_rlc_status_ind(0, module_idP,frameP,ENB_FLAG_NO,MBMS_FLAG_NO,
DCCH1,
(buflen-bsr_len-phr_len-dcch_header_len-dcch1_header_len-sdu_length_total));
LOG_D(MAC,"[UE %d] Frame %d : UL-DCCH1 -> ULSCH, RRC message has %d bytes to"
" send (Transport Block size %d, mac header len %d)\n",
module_idP,frameP, rlc_status.bytes_in_buffer,buflen,dcch1_header_len);
sdu_lengths[num_sdus] = mac_rlc_data_req(0, module_idP,frameP,ENB_FLAG_NO,MBMS_FLAG_NO,
DCCH1,
(char *)&ulsch_buff[sdu_lengths[0]]);
sdu_length_total += sdu_lengths[num_sdus];
sdu_lcids[num_sdus] = DCCH1;
LOG_D(MAC,"[UE %d] TX Got %d bytes for DCCH1\n",module_idP,sdu_lengths[num_sdus]);
num_sdus++;
//update_bsr(module_idP, frameP, DCCH1);
}
else {
}
if ((UE_mac_inst[module_idP].scheduling_info.LCID_status[DTCH] == LCID_NOT_EMPTY) &&
((bsr_len+phr_len+dcch_header_len+dcch1_header_len+dtch_header_len+sdu_length_total) <= buflen)){
// optimize the dtch header lenght
//if ((UE_mac_inst[module_idP].scheduling_info.BSR_bytes[DTCH] > 128) &&
/* if (((UE_mac_inst[module_idP].scheduling_info.BSR_bytes[DTCH] >= 128) &&
((UE_mac_inst[module_idP].scheduling_info.BSR_bytes[DTCH]+bsr_len+phr_len+dcch_header_len+dcch1_header_len+dtch_header_len) > buflen)&&
buflen >=128 ))
dtch_header_len = 3;//sizeof(SCH_SUBHEADER_LONG);
else
dtch_header_len = 2;//sizeof(SCH_SUBHEADER_SHORT);
rlc_status = mac_rlc_status_ind(0, module_idP,frameP,ENB_FLAG_NO,MBMS_FLAG_NO,
DTCH,
buflen-bsr_len-phr_len-dcch_header_len-dcch1_header_len-dtch_header_len-sdu_length_total);
LOG_D(MAC,"[UE %d] Frame %d : UL-DTCH -> ULSCH, %d bytes to send (Transport Block size %d, mac header len %d, BSR byte[DTCH] %d)\n",
module_idP,frameP, rlc_status.bytes_in_buffer,buflen,dtch_header_len,
UE_mac_inst[module_idP].scheduling_info.BSR_bytes[DTCH]);
sdu_lengths[num_sdus] = mac_rlc_data_req(0, module_idP,frameP, ENB_FLAG_NO, MBMS_FLAG_NO,
DTCH,
(char *)&ulsch_buff[sdu_length_total]);
//adjust dtch header
dtch_header_len = (sdu_lengths[num_sdus] >= 128) ? 3 : 2;
LOG_D(MAC,"[UE %d] TX Got %d bytes for DTCH\n",module_idP,sdu_lengths[num_sdus]);
sdu_lcids[num_sdus] = DTCH;
sdu_length_total += sdu_lengths[num_sdus];
num_sdus++;
UE_mac_inst[module_idP].ul_active = update_bsr(module_idP, frameP, DTCH, UE_mac_inst[module_idP].scheduling_info.LCGID[DTCH]);
}
else { // no rlc pdu : generate the dummy header
if (lcgid < 0 ) {
} else if ((lcgid ==MAX_NUM_LCGID) && (bsr_ce_len == sizeof(BSR_LONG))) {
bsr_s = NULL;
bsr_l->Buffer_size0 = UE_mac_inst[module_idP].scheduling_info.BSR[LCGID0];
bsr_l->Buffer_size1 = UE_mac_inst[module_idP].scheduling_info.BSR[LCGID1];
bsr_l->Buffer_size2 = UE_mac_inst[module_idP].scheduling_info.BSR[LCGID2];
bsr_l->Buffer_size3 = UE_mac_inst[module_idP].scheduling_info.BSR[LCGID3];
LOG_D(MAC, "[UE %d] Frame %d report long BSR (level LCGID0 %d,level LCGID1 %d,level LCGID2 %d,level LCGID3 %d)\n", module_idP,frameP,
UE_mac_inst[module_idP].scheduling_info.BSR[LCGID0],
UE_mac_inst[module_idP].scheduling_info.BSR[LCGID1],
UE_mac_inst[module_idP].scheduling_info.BSR[LCGID2],
UE_mac_inst[module_idP].scheduling_info.BSR[LCGID3]);
} else if (bsr_ce_len == sizeof(BSR_SHORT)) {
bsr_l = NULL;
bsr_s->LCGID = lcgid;
bsr_s->Buffer_size = UE_mac_inst[module_idP].scheduling_info.BSR[lcgid];
LOG_D(MAC,"[UE %d] Frame %d report SHORT BSR with level %d for LCGID %d\n",
module_idP, frameP, UE_mac_inst[module_idP].scheduling_info.BSR[lcgid],lcgid);
} else {
bsr_s = NULL;
bsr_l = NULL;
}
if (phr_ce_len == sizeof(POWER_HEADROOM_CMD)){
phr_p->PH = get_phr_mapping(module_idP,eNB_index);
phr_p->R = 0;
LOG_D(MAC,"[UE %d] Frame %d report PHR with mapping (%d->%d) for LCID %d\n",
module_idP,frameP, mac_xface->get_PHR(module_idP,eNB_index), phr_p->PH,POWER_HEADROOM);
}else
phr_p=NULL;
LOG_T(MAC,"[UE %d] Frame %d: bsr s %p bsr_l %p, phr_p %p\n", module_idP,frameP,bsr_s, bsr_l, phr_p);
// adjust the header length
dcch_header_len_tmp = dcch_header_len;
dtch_header_len_tmp = dtch_header_len;
if (dtch_header_len==0)
dcch_header_len = (dcch_header_len>0)? 1: dcch_header_len;
else
dtch_header_len= (dtch_header_len >0)? 1: dtch_header_len; // for short and long, cut the length+F fields
if ((buflen-bsr_len-phr_len-dcch_header_len-dcch1_header_len-dtch_header_len-sdu_length_total) <= 2) {
short_padding = buflen-bsr_len-phr_len-dcch_header_len-dcch1_header_len-dtch_header_len-sdu_length_total;
post_padding = 0;
}
else {
if ((buflen-bsr_len-phr_len-dcch_header_len-dcch1_header_len-dtch_header_len-sdu_length_total) == buflen) {
*access_mode=CANCELED_ACCESS;
}
short_padding = 0;
if (dtch_header_len==0)
dcch_header_len = dcch_header_len_tmp;
else
dtch_header_len= dtch_header_len_tmp;
post_padding = buflen-bsr_len-phr_len-dcch_header_len-dcch1_header_len-dtch_header_len-sdu_length_total -1 ;
// Generate header
// if (num_sdus>0) {
payload_offset = generate_ulsch_header(ulsch_buffer, // mac header
num_sdus, // num sdus
short_padding, // short pading
sdu_lengths, // sdu length
sdu_lcids, // sdu lcid
phr_p, // power headroom
NULL, // crnti
NULL, // truncated bsr
bsr_s, // short bsr
bsr_l,
post_padding); // long_bsr
LOG_I(MAC,"[UE %d] Generate header :bufflen %d sdu_length_total %d, num_sdus %d, sdu_lengths[0] %d, sdu_lcids[0] %d => payload offset %d, dcch_header_len %d, dtch_header_len %d, padding %d,post_padding %d, bsr len %d, phr len %d, reminder %d \n",
module_idP,buflen, sdu_length_total,num_sdus,sdu_lengths[0],sdu_lcids[0],payload_offset, dcch_header_len, dtch_header_len,
short_padding,post_padding, bsr_len, phr_len,buflen-sdu_length_total-payload_offset);
// cycle through SDUs and place in ulsch_buffer
memcpy(&ulsch_buffer[payload_offset],ulsch_buff,sdu_length_total);
// fill remainder of DLSCH with random data
for (j=0;j<(buflen-sdu_length_total-payload_offset);j++)
ulsch_buffer[payload_offset+sdu_length_total+j] = (char)(taus()&0xff);
#if defined(USER_MODE) && defined(OAI_EMU)
if (oai_emulation.info.opt_enabled)
trace_pdu(0, ulsch_buffer, buflen, module_idP, 3, UE_mac_inst[module_idP].crnti, subframe, 0, 0);
LOG_D(OPT,"[UE %d][ULSCH] Frame %d trace pdu for rnti %x with size %d\n",
module_idP, frameP, UE_mac_inst[module_idP].crnti, buflen);
#endif
LOG_D(MAC,"[UE %d][SR] Gave SDU to PHY, clearing any scheduling request\n",
module_idP,payload_offset, sdu_length_total);
UE_mac_inst[module_idP].scheduling_info.SR_pending=0;
UE_mac_inst[module_idP].scheduling_info.SR_COUNTER=0;
vcd_signal_dumper_dump_function_by_name(VCD_SIGNAL_DUMPER_FUNCTIONS_UE_GET_SDU, VCD_FUNCTION_OUT);
}
// called at each subframe
// Performs :
// 1. Trigger PDCP every 5ms
// 2. Call RRC for link status return to PHY
// 3. Perform SR/BSR procedures for scheduling feedback
// 4. Perform PHR procedures
UE_L2_STATE_t ue_scheduler(module_id_t module_idP,frame_t frameP, sub_frame_t subframeP, lte_subframe_t directionP,u8 eNB_indexP) {
int lcid; // lcid index
int TTI= 1;
int bucketsizeduration = -1;
int bucketsizeduration_max = -1;
// mac_rlc_status_resp_t rlc_status[MAX_NUM_LCGID]; // 4
// s8 lcg_id;
struct RACH_ConfigCommon *rach_ConfigCommon = (struct RACH_ConfigCommon *)NULL;
winckel
committed
#ifdef EXMIMO
winckel
committed
#endif
#if defined(ENABLE_ITTI)
MessageDef *msg_p;
const char *msg_name;
instance_t instance;
int result;
#endif
vcd_signal_dumper_dump_function_by_name(VCD_SIGNAL_DUMPER_FUNCTIONS_UE_SCHEDULER, VCD_FUNCTION_IN);
#if defined(ENABLE_ITTI)
do {
// Checks if a message has been sent to MAC sub-task
itti_poll_msg (TASK_MAC_UE, &msg_p);
if (msg_p != NULL) {
msg_name = ITTI_MSG_NAME (msg_p);
instance = ITTI_MSG_INSTANCE (msg_p);
switch (ITTI_MSG_ID(msg_p)) {
case RRC_MAC_CCCH_DATA_REQ:
LOG_I(MAC, "Received %s from %s: instance %d, frameP %d, eNB_index %d\n",
winckel
committed
msg_name, ITTI_MSG_ORIGIN_NAME(msg_p), instance,
RRC_MAC_CCCH_DATA_REQ (msg_p).frame, RRC_MAC_CCCH_DATA_REQ (msg_p).enb_index);
winckel
committed
default:
LOG_E(MAC, "Received unexpected message %s\n", msg_name);
break;
}
result = itti_free (ITTI_MSG_ORIGIN_ID(msg_p), msg_p);
AssertFatal (result == EXIT_SUCCESS, "Failed to free memory (%d)!\n", result);
}
} while(msg_p != NULL);
#endif
//Mac_rlc_xface->frameP=frameP;
//Rrc_xface->Frame_index=Mac_rlc_xface->frameP;
//if (subframe%5 == 0)
#ifdef EXMIMO
pdcp_run(frameP, 0, module_idP, eNB_index);
ret = pthread_mutex_trylock (&pdcp_mutex);
if (ret != 0) {
if (ret==EBUSY)
LOG_E(PDCP,"Mutex busy\n");
else
LOG_E(PDCP,"Cannot lock mutex\n");

Florian Kaltenberger
committed
//return(-1);
}
else {
pdcp_instance_cnt++;
pthread_mutex_unlock(&pdcp_mutex);

Florian Kaltenberger
committed
if (pdcp_instance_cnt == 0) {
if (pthread_cond_signal(&pdcp_cond) != 0) {
LOG_E(PDCP,"pthread_cond_signal unsuccessfull\n");
//return(-1);
}
}
else {
LOG_W(PDCP,"PDCP thread busy!!! inst_cnt=%d\n",pdcp_instance_cnt);
}
}
#endif
UE_mac_inst[module_idP].frame = frameP;
UE_mac_inst[module_idP].subframe = subframeP;
#ifdef CELLULAR
rrc_rx_tx(module_idP, frameP, 0, eNB_indexP);
#else
switch (rrc_rx_tx(module_idP,
frameP,
0,
eNB_indexP)) {
case RRC_OK:
break;
case RRC_ConnSetup_failed:
LOG_E(MAC,"RRCConnectionSetup failed, returning to IDLE state\n");
return(CONNECTION_LOST);
break;
case RRC_PHY_RESYNCH:
LOG_E(MAC,"RRC Loss of synch, returning PHY_RESYNCH\n");
return(PHY_RESYNCH);
nikaeinn
committed
case RRC_Handover_failed:
LOG_N(MAC,"Handover failure for UE %d eNB_index %d\n",module_idP,eNB_indexP);
nikaeinn
committed
//Invalid...need to add another MAC UE state for re-connection procedure
mac_xface->phy_config_afterHO_ue(module_idP,eNB_indexP,(MobilityControlInfo_t *)NULL,1);
nikaeinn
committed
//return(3);
break;
case RRC_HO_STARTED:
LOG_I(MAC,"RRC handover, Instruct PHY to start the contention-free PRACH and synchronization\n");
return(PHY_HO_PRACH);
default:
break;
}
#endif
// Check Contention resolution timer (put in a function later)
if (UE_mac_inst[module_idP].RA_contention_resolution_timer_active == 1) {
if (UE_mac_inst[module_idP].radioResourceConfigCommon)
rach_ConfigCommon = &UE_mac_inst[module_idP].radioResourceConfigCommon->rach_ConfigCommon;
else {
LOG_E(MAC,"FATAL: radioResourceConfigCommon is NULL!!!\n");
mac_xface->macphy_exit("");
vcd_signal_dumper_dump_function_by_name(VCD_SIGNAL_DUMPER_FUNCTIONS_UE_SCHEDULER, VCD_FUNCTION_OUT);
return(RRC_OK);
}
LOG_I(MAC,"Frame %d: Contention resolution timer %d/%d\n",frameP,UE_mac_inst[module_idP].RA_contention_resolution_cnt,
((1+rach_ConfigCommon->ra_SupervisionInfo.mac_ContentionResolutionTimer)<<3));
UE_mac_inst[module_idP].RA_contention_resolution_cnt++;
if (UE_mac_inst[module_idP].RA_contention_resolution_cnt ==
((1+rach_ConfigCommon->ra_SupervisionInfo.mac_ContentionResolutionTimer)<<3)) {
UE_mac_inst[module_idP].RA_active = 0;
// Signal PHY to quit RA procedure
LOG_E(MAC,"Contention resolution timer expired, RA failed\n");
mac_xface->ra_failed(module_idP,eNB_indexP);
}
}
// call SR procedure to generate pending SR and BSR for next PUCCH/PUSCH TxOp. This should implement the procedures
// outlined in Sections 5.4.4 an 5.4.5 of 36.321
// Put this in another function
// Get RLC status info and update Bj for all lcids that are active
nikaeinn
committed
for (lcid=DCCH; lcid <= DTCH; lcid++ ) {
if ((lcid == 0) ||(UE_mac_inst[module_idP].logicalChannelConfig[lcid])) {
// meausre the Bj
if ((directionP == SF_UL)&& (UE_mac_inst[module_idP].scheduling_info.Bj[lcid] >= 0)){
if (UE_mac_inst[module_idP].logicalChannelConfig[lcid]->ul_SpecificParameters) {
bucketsizeduration = UE_mac_inst[module_idP].logicalChannelConfig[lcid]->ul_SpecificParameters->prioritisedBitRate * TTI;
bucketsizeduration_max = get_ms_bucketsizeduration(UE_mac_inst[module_idP].logicalChannelConfig[lcid]->ul_SpecificParameters->bucketSizeDuration);
}
else {
LOG_E(MAC,"[UE %d] lcid %d, NULL ul_SpecificParameters\n",module_idP,lcid);
mac_xface->macphy_exit("");
}
if ( UE_mac_inst[module_idP].scheduling_info.Bj[lcid] > bucketsizeduration_max )
UE_mac_inst[module_idP].scheduling_info.Bj[lcid] = bucketsizeduration_max;
else
UE_mac_inst[module_idP].scheduling_info.Bj[lcid] = bucketsizeduration;
}
if (update_bsr(module_idP,frameP, lcid, UE_mac_inst[module_idP].scheduling_info.LCGID[lcid])) {
UE_mac_inst[module_idP].scheduling_info.SR_pending= 1;
LOG_D(MAC,"[UE %d][SR] Frame %d subframe %d SR for PUSCH is pending for LCGID %d with BSR level %d (%d bytes in RLC)\n",
module_idP, frameP,subframeP,UE_mac_inst[module_idP].scheduling_info.LCGID[lcid],
UE_mac_inst[module_idP].scheduling_info.BSR[UE_mac_inst[module_idP].scheduling_info.LCGID[lcid]],
UE_mac_inst[module_idP].scheduling_info.BSR_bytes[UE_mac_inst[module_idP].scheduling_info.LCGID[lcid]]);
}
}
}
// UE has no valid phy config dedicated || no valid/released SR
if ((UE_mac_inst[module_idP].physicalConfigDedicated == NULL)) {
// cancel all pending SRs
UE_mac_inst[module_idP].scheduling_info.SR_pending=0;
UE_mac_inst[module_idP].ul_active=0;
LOG_T(MAC,"[UE %d] Release all SRs \n", module_idP);
vcd_signal_dumper_dump_function_by_name(VCD_SIGNAL_DUMPER_FUNCTIONS_UE_SCHEDULER, VCD_FUNCTION_OUT);
return(CONNECTION_OK);
}
if ((UE_mac_inst[module_idP].physicalConfigDedicated->schedulingRequestConfig == NULL) ||
(UE_mac_inst[module_idP].physicalConfigDedicated->schedulingRequestConfig->present == SchedulingRequestConfig_PR_release)){
// initiate RA with CRNTI included in msg3 (no contention) as descibed in 36.321 sec 5.1.5
// cancel all pending SRs
UE_mac_inst[module_idP].scheduling_info.SR_pending=0;
UE_mac_inst[module_idP].ul_active=0;
LOG_T(MAC,"[UE %d] Release all SRs \n", module_idP);
}
// Put this in a function
// Call PHR procedure as described in Section 5.4.6 in 36.321
if (UE_mac_inst[module_idP].PHR_state == MAC_MainConfig__phr_Config_PR_setup){ // normal operation
if (UE_mac_inst[module_idP].PHR_reconfigured == 1) { // upon (re)configuration of the power headroom reporting functionality by upper layers
UE_mac_inst[module_idP].PHR_reporting_active = 1;
UE_mac_inst[module_idP].PHR_reconfigured = 0;
} else {
//LOG_D(MAC,"PHR normal operation %d active %d \n", UE_mac_inst[module_idP].scheduling_info.periodicPHR_SF, UE_mac_inst[module_idP].PHR_reporting_active);
if ((UE_mac_inst[module_idP].scheduling_info.prohibitPHR_SF <= 0) &&
((mac_xface->get_PL(module_idP,eNB_indexP) < UE_mac_inst[module_idP].scheduling_info.PathlossChange_db) ||
(UE_mac_inst[module_idP].power_backoff_db[eNB_indexP] > UE_mac_inst[module_idP].scheduling_info.PathlossChange_db)))
// trigger PHR and reset the timer later when the PHR report is sent
UE_mac_inst[module_idP].PHR_reporting_active = 1;
else if (UE_mac_inst[module_idP].PHR_reporting_active ==0 )
UE_mac_inst[module_idP].scheduling_info.prohibitPHR_SF--;
if (UE_mac_inst[module_idP].scheduling_info.periodicPHR_SF <= 0 )
// trigger PHR and reset the timer later when the PHR report is sent
UE_mac_inst[module_idP].PHR_reporting_active = 1;
else if (UE_mac_inst[module_idP].PHR_reporting_active == 0 )
UE_mac_inst[module_idP].scheduling_info.periodicPHR_SF--;
}
} else { // release / nothing
UE_mac_inst[module_idP].PHR_reporting_active = 0; // release PHR
}
//If the UE has UL resources allocated for new transmission for this TTI here:
vcd_signal_dumper_dump_function_by_name(VCD_SIGNAL_DUMPER_FUNCTIONS_UE_SCHEDULER, VCD_FUNCTION_OUT);
return(CONNECTION_OK);
}
// to be improved
#ifdef CBA
double uniform_rngen(int min, int max) {
double random = (double)taus()/((double)0xffffffff);
return (max - min) * random + min;
}
int use_cba_access(module_id_t module_idP,frame_t frameP,u8 subframe, u8 eNB_index){
if (( ((UE_mac_inst[module_idP].scheduling_info.BSR[LCGID1]>0)&&(UE_mac_inst[module_idP].scheduling_info.BSR[LCGID1]<64)) ||
((UE_mac_inst[module_idP].scheduling_info.BSR[LCGID2]>0)&&(UE_mac_inst[module_idP].scheduling_info.BSR[LCGID2]<64)) ||
((UE_mac_inst[module_idP].scheduling_info.BSR[LCGID3]>0)&&(UE_mac_inst[module_idP].scheduling_info.BSR[LCGID3]<64)) )
// && (UE_mac_inst[module_idP].ul_active == 0) // check if the ul is acrtive
&& (UE_mac_inst[module_idP].cba_last_access[0] <= 0) ) { // backoff
// LOG_D(MAC,"[UE %d] Frame %d Subframe %d: the current CBA backoff is %d \n", module_idP, frameP, subframe,
// UE_mac_inst[module_idP].cba_last_access[0] );
UE_mac_inst[module_idP].cba_last_access[0]= round(uniform_rngen(1,10));
LOG_D(MAC,"[UE %d] Frame %d Subframe %d: start a new CBA backoff %d UL active state %d \n", module_idP, frameP, subframe,
UE_mac_inst[module_idP].cba_last_access[0], UE_mac_inst[module_idP].ul_active);
return 1;
} else if (( ((UE_mac_inst[module_idP].scheduling_info.BSR[LCGID1]> 0 )) ||
((UE_mac_inst[module_idP].scheduling_info.BSR[LCGID2]> 0 )) ||
((UE_mac_inst[module_idP].scheduling_info.BSR[LCGID3]> 0 )) )
// && (UE_mac_inst[module_idP].ul_active == 0) // check if the ul is acrtive
&& (UE_mac_inst[module_idP].cba_last_access[0]> 0) ){
UE_mac_inst[module_idP].cba_last_access[0]-=1;
LOG_D(MAC,"[UE %d] Frame %d Subframe %d: CBA backoff is decreased by one to %d UL active state %d \n",
module_idP, frameP, subframe,
UE_mac_inst[module_idP].cba_last_access[0], UE_mac_inst[module_idP].ul_active);
} /*else if (( ((UE_mac_inst[module_idP].scheduling_info.BSR[LCGID1] == 0 )) &&
((UE_mac_inst[module_idP].scheduling_info.BSR[LCGID2] == 0 )) &&
((UE_mac_inst[module_idP].scheduling_info.BSR[LCGID3] == 0 )) )
&& (UE_mac_inst[module_idP].cba_last_access[0]> 0) ){
UE_mac_inst[module_idP].cba_last_access[0]-=1;
return 0;
}
#endif
int get_bsr_lcgid (module_id_t module_idP){
int lcgid, lcgid_tmp=-1;
int num_active_lcgid = 0;
for (lcgid = 0 ; lcgid < MAX_NUM_LCGID; lcgid++){
if (UE_mac_inst[module_idP].scheduling_info.BSR[lcgid] > 0 ){
lcgid_tmp = lcgid;
num_active_lcgid+=1;
}
}
if (num_active_lcgid == 0)
else if (num_active_lcgid == 1)
else
}
u8 get_bsr_len (module_id_t module_idP, u16 buflen) {
int lcgid=0;
u8 bsr_len=0, num_lcgid=0;
int pdu = 0;
for (lcgid=0; lcgid < MAX_NUM_LCGID; lcgid++ ) {
if (UE_mac_inst[module_idP].scheduling_info.BSR_bytes[lcgid] > 0 )
pdu += (UE_mac_inst[module_idP].scheduling_info.BSR_bytes[lcgid] + bsr_len + 2); //2 = sizeof(SCH_SUBHEADER_SHORT)
if (UE_mac_inst[module_idP].scheduling_info.BSR_bytes[lcgid] > 128 ) // long header size: adjust the header size
pdu += 1;
// current phy buff can not transport all sdu for this lcgid -> transmit a bsr for this lcgid
if ( (pdu > buflen) && (UE_mac_inst[module_idP].scheduling_info.BSR_bytes[lcgid] > 0 ) ){
num_lcgid +=1;
bsr_len = (num_lcgid >= 2 ) ? sizeof(BSR_LONG) : sizeof(BSR_SHORT) ;
}
LOG_D(MAC,"BSR Bytes %d for lcgid %d bsr len %d num lcgid %d\n", UE_mac_inst[module_idP].scheduling_info.BSR_bytes[lcgid], lcgid, bsr_len, num_lcgid);
}
if ( bsr_len > 0 )
LOG_D(MAC,"[UE %d] Prepare a %s (Transport Block Size %d, MAC pdu Size %d) \n",
module_idP, map_int_to_str(BSR_names, bsr_len), buflen, pdu);
return bsr_len;
}
boolean_t update_bsr(module_id_t module_idP, frame_t frameP, u8 lcid, u8 lcg_id){
mac_rlc_status_resp_t rlc_status;
if ((lcg_id < 0) || (lcg_id > MAX_NUM_LCGID) )
return sr_pending;
nikaeinn
committed
// fixme: need a better way to reset
if ((lcid == DCCH) || (lcid == DTCH)){
UE_mac_inst[module_idP].scheduling_info.BSR[lcg_id]=0;
UE_mac_inst[module_idP].scheduling_info.BSR_bytes[lcg_id]=0;
nikaeinn
committed
}
// for (lcid =0 ; lcid < MAX_NUM_LCID; lcid++) {
if (UE_mac_inst[module_idP].scheduling_info.LCGID[lcid] == lcg_id) {
rlc_status = mac_rlc_status_ind(0, module_idP,frameP,ENB_FLAG_NO,MBMS_FLAG_NO,
lcid,
0);
if (rlc_status.bytes_in_buffer > 0 ) {
UE_mac_inst[module_idP].scheduling_info.LCID_status[lcid] = LCID_NOT_EMPTY;
UE_mac_inst[module_idP].scheduling_info.BSR[lcg_id] += locate (BSR_TABLE,BSR_TABLE_SIZE, rlc_status.bytes_in_buffer);
UE_mac_inst[module_idP].scheduling_info.BSR_bytes[lcg_id] += rlc_status.bytes_in_buffer;
// UE_mac_inst[module_idP].scheduling_info.BSR_short_lcid = lcid; // only applicable to short bsr
LOG_D(MAC,"[UE %d] BSR level %d (LCGID %d, rlc buffer %d byte)\n",
module_idP, UE_mac_inst[module_idP].scheduling_info.BSR[lcg_id],lcg_id, UE_mac_inst[module_idP].scheduling_info.BSR_bytes[lcg_id]);
}
else
UE_mac_inst[module_idP].scheduling_info.LCID_status[lcid]=LCID_EMPTY;
}
//}
return sr_pending;
}
u8 locate (const u32 *table, int size, int value){
u8 ju, jm, jl;
int ascend;
if (value == 0) return 0; //elseif (value > 150000) return 63;
jl = 0; // lower bound
ju = size ;// upper bound
ascend = (table[ju] >= table[jl]) ? 1 : 0; // determine the order of the the table: 1 if ascending order of table, 0 otherwise
while (ju-jl > 1) { //If we are not yet done,
jm = (ju+jl) >> 1; //compute a midpoint,
if ((value >= table[jm]) == ascend)
jl=jm; // replace the lower limit
else
ju=jm; //replace the upper limit
LOG_T(MAC,"[UE] searching BSR index %d for (BSR TABLE %d < value %d)\n", jm, table[jm], value);
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
}
if (value == table[jl]) return jl;
else return jl+1; //equally ju
}
int get_sf_periodicBSRTimer(u8 sf_offset){
switch (sf_offset) {
case MAC_MainConfig__ul_SCH_Config__periodicBSR_Timer_sf5:
return 5;
break;
case MAC_MainConfig__ul_SCH_Config__periodicBSR_Timer_sf10:
return 10;
break;
case MAC_MainConfig__ul_SCH_Config__periodicBSR_Timer_sf16:
return 16;
break;
case MAC_MainConfig__ul_SCH_Config__periodicBSR_Timer_sf20:
return 20;
break;
case MAC_MainConfig__ul_SCH_Config__periodicBSR_Timer_sf32:
return 32;
break;
case MAC_MainConfig__ul_SCH_Config__periodicBSR_Timer_sf40:
return 40;
break;
case MAC_MainConfig__ul_SCH_Config__periodicBSR_Timer_sf64:
return 64;
break;
case MAC_MainConfig__ul_SCH_Config__periodicBSR_Timer_sf80:
return 80;
break;
case MAC_MainConfig__ul_SCH_Config__periodicBSR_Timer_sf128:
return 128;
break;
case MAC_MainConfig__ul_SCH_Config__periodicBSR_Timer_sf160:
return 160;
break;
case MAC_MainConfig__ul_SCH_Config__periodicBSR_Timer_sf320:
return 320;
break;
case MAC_MainConfig__ul_SCH_Config__periodicBSR_Timer_sf640:
return 640;
break;
case MAC_MainConfig__ul_SCH_Config__periodicBSR_Timer_sf1280:
return 1280;
break;
case MAC_MainConfig__ul_SCH_Config__periodicBSR_Timer_sf2560:
return 2560;
break;
case MAC_MainConfig__ul_SCH_Config__periodicBSR_Timer_infinity:
default:
return -1;
break;
}
}
int get_sf_retxBSRTimer(u8 sf_offset){
switch (sf_offset) {
case MAC_MainConfig__ul_SCH_Config__retxBSR_Timer_sf320:
return 320;
break;
case MAC_MainConfig__ul_SCH_Config__retxBSR_Timer_sf640:
return 640;
break;
case MAC_MainConfig__ul_SCH_Config__retxBSR_Timer_sf1280:
return 1280;
break;
case MAC_MainConfig__ul_SCH_Config__retxBSR_Timer_sf2560:
return 2560;
break;
case MAC_MainConfig__ul_SCH_Config__retxBSR_Timer_sf5120:
return 5120;
break;
case MAC_MainConfig__ul_SCH_Config__retxBSR_Timer_sf10240:
return 10240;
break;
default:
return -1;
break;
}
}
int get_ms_bucketsizeduration(u8 bucketsizeduration){
switch (bucketsizeduration) {
case LogicalChannelConfig__ul_SpecificParameters__bucketSizeDuration_ms50:
return 50;
break;
case LogicalChannelConfig__ul_SpecificParameters__bucketSizeDuration_ms100:
return 100;
break;
case LogicalChannelConfig__ul_SpecificParameters__bucketSizeDuration_ms150:
return 150;
break;
case LogicalChannelConfig__ul_SpecificParameters__bucketSizeDuration_ms300:
return 300;
break;
case LogicalChannelConfig__ul_SpecificParameters__bucketSizeDuration_ms500:
return 500;
break;
case LogicalChannelConfig__ul_SpecificParameters__bucketSizeDuration_ms1000:
return 1000;
break;
default:
return 0;
break;
}
}
void update_phr(module_id_t module_idP){
UE_mac_inst[module_idP].PHR_reporting_active =0;
UE_mac_inst[module_idP].scheduling_info.periodicPHR_SF = get_sf_perioidicPHR_Timer(UE_mac_inst[module_idP].scheduling_info.periodicPHR_Timer);
UE_mac_inst[module_idP].scheduling_info.prohibitPHR_SF = get_sf_prohibitPHR_Timer(UE_mac_inst[module_idP].scheduling_info.prohibitPHR_Timer);
// LOG_D(MAC,"phr %d %d\n ",UE_mac_inst[module_idP].scheduling_info.periodicPHR_SF, UE_mac_inst[module_idP].scheduling_info.prohibitPHR_SF);
u8 get_phr_mapping (module_id_t module_idP, u8 eNB_index){
//power headroom reporting range is from -23 ...+40 dB, as described in 36313
//note: mac_xface->get_Po_NOMINAL_PUSCH(module_idP) is float
if (mac_xface->get_PHR(module_idP,eNB_index) < -23)
return 0;
else if (mac_xface->get_PHR(module_idP,eNB_index) >= 40)
return 63;
else // -23 to 40
return (u8) mac_xface->get_PHR(module_idP,eNB_index) + PHR_MAPPING_OFFSET;
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
}
int get_sf_perioidicPHR_Timer(u8 perioidicPHR_Timer){
return (perioidicPHR_Timer+1)*10;
}
int get_sf_prohibitPHR_Timer(u8 prohibitPHR_Timer){
return (prohibitPHR_Timer)*10;
}
int get_db_dl_PathlossChange(u8 dl_PathlossChange){
switch (dl_PathlossChange){
case MAC_MainConfig__phr_Config__setup__dl_PathlossChange_dB1:
return 1;
break;
case MAC_MainConfig__phr_Config__setup__dl_PathlossChange_dB3:
return 3;
break;
case MAC_MainConfig__phr_Config__setup__dl_PathlossChange_dB6:
return 6;
break;
case MAC_MainConfig__phr_Config__setup__dl_PathlossChange_infinity:
default:
return -1;
break;
}
}