Skip to content
GitLab
Explore
Sign in
Register
Primary navigation
Search or go to…
Project
openairinterface5G
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Service Desk
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Worker.N
openairinterface5G
Commits
10e91d4e
Commit
10e91d4e
authored
7 years ago
by
Niccolò Iardella
Committed by
nikaeinn
7 years ago
Browse files
Options
Downloads
Patches
Plain Diff
Move the accounting phase of the DL pre-processor in a separate procedure
parent
5357ef20
No related branches found
No related tags found
No related merge requests found
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
openair2/LAYER2/MAC/pre_processor.c
+589
-585
589 additions, 585 deletions
openair2/LAYER2/MAC/pre_processor.c
openair2/LAYER2/MAC/proto.h
+0
-5
0 additions, 5 deletions
openair2/LAYER2/MAC/proto.h
with
589 additions
and
590 deletions
openair2/LAYER2/MAC/pre_processor.c
+
589
−
585
View file @
10e91d4e
...
@@ -593,588 +593,591 @@ void sort_UEs(module_id_t Mod_idP, slice_id_t slice_id, int frameP, sub_frame_t
...
@@ -593,588 +593,591 @@ void sort_UEs(module_id_t Mod_idP, slice_id_t slice_id, int frameP, sub_frame_t
#endif
#endif
}
}
void
dlsch_scheduler_pre_processor_accounting
(
module_id_t
Mod_id
,
slice_id_t
slice_id
,
frame_t
frameP
,
sub_frame_t
subframeP
,
int
N_RBG
[
MAX_NUM_CCs
],
int
min_rb_unit
[
MAX_NUM_CCs
],
uint8_t
rballoc_sub
[
MAX_NUM_CCs
][
N_RBG_MAX
],
uint8_t
MIMO_mode_indicator
[
MAX_NUM_CCs
][
N_RBG_MAX
],
uint16_t
nb_rbs_required
[
MAX_NUM_CCs
][
NUMBER_OF_UE_MAX
])
{
int
UE_id
,
CC_id
;
int
ii
,
r1
;
rnti_t
rnti
;
uint8_t
harq_pid
,
round
,
transmission_mode
;
uint8_t
total_rbs_used
[
MAX_NUM_CCs
];
uint8_t
total_ue_count
[
MAX_NUM_CCs
];
uint16_t
average_rbs_per_user
[
MAX_NUM_CCs
];
uint16_t
nb_rbs_required_remaining
[
MAX_NUM_CCs
][
NUMBER_OF_UE_MAX
];
uint16_t
nb_rbs_required_remaining_1
[
MAX_NUM_CCs
][
NUMBER_OF_UE_MAX
];
int
N_RB_DL
;
UE_list_t
*
UE_list
=
&
RC
.
mac
[
Mod_id
]
->
UE_list
;
UE_sched_ctrl
*
ue_sched_ctl
;
COMMON_channels_t
*
cc
;
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
total_ue_count
[
CC_id
]
=
0
;
total_rbs_used
[
CC_id
]
=
0
;
average_rbs_per_user
[
CC_id
]
=
0
;
for
(
UE_id
=
0
;
UE_id
<
NUMBER_OF_UE_MAX
;
++
UE_id
)
{
nb_rbs_required_remaining
[
CC_id
][
UE_id
]
=
0
;
}
}
// loop over all active UEs
for
(
UE_id
=
UE_list
->
head
;
UE_id
>=
0
;
UE_id
=
UE_list
->
next
[
UE_id
])
{
rnti
=
UE_RNTI
(
Mod_id
,
UE_id
);
if
(
rnti
==
NOT_A_RNTI
)
continue
;
if
(
UE_list
->
UE_sched_ctrl
[
UE_id
].
ul_out_of_sync
==
1
)
continue
;
if
(
!
ue_slice_membership
(
UE_id
,
slice_id
))
continue
;
// This function assigns pre-available RBS to each UE in specified sub-bands before scheduling is done
for
(
ii
=
0
;
ii
<
UE_num_active_CC
(
UE_list
,
UE_id
);
ii
++
)
{
void
CC_id
=
UE_list
->
ordered_CCids
[
ii
][
UE_id
];
dlsch_scheduler_pre_processor
(
module_id_t
Mod_id
,
ue_sched_ctl
=
&
UE_list
->
UE_sched_ctrl
[
UE_id
];
slice_id_t
slice_id
,
cc
=
&
RC
.
mac
[
Mod_id
]
->
common_channels
[
CC_id
];
frame_t
frameP
,
// TODO Can we use subframe2harqpid() here?
sub_frame_t
subframeP
,
if
(
cc
->
tdd_Config
)
int
N_RBG
[
MAX_NUM_CCs
],
int
*
mbsfn_flag
)
harq_pid
=
((
frameP
*
10
)
+
subframeP
)
%
10
;
{
else
harq_pid
=
((
frameP
*
10
)
+
subframeP
)
&
7
;
unsigned
char
rballoc_sub
[
MAX_NUM_CCs
][
N_RBG_MAX
],
harq_pid
=
round
=
ue_sched_ctl
->
round
[
CC_id
][
harq_pid
];
0
,
round
=
0
,
total_ue_count
[
MAX_NUM_CCs
],
total_rbs_used
[
MAX_NUM_CCs
];
unsigned
char
MIMO_mode_indicator
[
MAX_NUM_CCs
][
N_RBG_MAX
];
int
UE_id
,
i
;
uint16_t
ii
,
j
;
uint16_t
nb_rbs_required
[
MAX_NUM_CCs
][
NUMBER_OF_UE_MAX
];
uint16_t
nb_rbs_required_remaining
[
MAX_NUM_CCs
][
NUMBER_OF_UE_MAX
];
uint16_t
nb_rbs_required_remaining_1
[
MAX_NUM_CCs
][
NUMBER_OF_UE_MAX
];
uint16_t
average_rbs_per_user
[
MAX_NUM_CCs
]
=
{
0
};
rnti_t
rnti
;
int
min_rb_unit
[
MAX_NUM_CCs
];
uint16_t
r1
=
0
;
uint8_t
CC_id
;
UE_list_t
*
UE_list
=
&
RC
.
mac
[
Mod_id
]
->
UE_list
;
int
N_RB_DL
;
int
transmission_mode
=
0
;
UE_sched_ctrl
*
ue_sched_ctl
;
// int rrc_status = RRC_IDLE;
COMMON_channels_t
*
cc
;
#ifdef TM5
int
harq_pid1
=
0
;
int
round1
=
0
,
round2
=
0
;
int
UE_id2
;
uint16_t
i1
,
i2
,
i3
;
rnti_t
rnti1
,
rnti2
;
LTE_eNB_UE_stats
*
eNB_UE_stats1
=
NULL
;
LTE_eNB_UE_stats
*
eNB_UE_stats2
=
NULL
;
UE_sched_ctrl
*
ue_sched_ctl1
,
*
ue_sched_ctl2
;
#endif
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
if
(
mbsfn_flag
[
CC_id
]
>
0
)
// If this CC is allocated for MBSFN skip it here
continue
;
average_rbs_per_user
[
CC_id
]
=
0
;
if
(
round
!=
8
)
{
nb_rbs_required
[
CC_id
][
UE_id
]
=
UE_list
->
UE_template
[
CC_id
][
UE_id
].
nb_rb
[
harq_pid
];
total_rbs_used
[
CC_id
]
+=
nb_rbs_required
[
CC_id
][
UE_id
];
}
min_rb_unit
[
CC_id
]
=
get_min_rb_unit
(
Mod_id
,
CC_id
);
//nb_rbs_required_remaining[UE_id] = nb_rbs_required[UE_id];
if
(
nb_rbs_required
[
CC_id
][
UE_id
]
>
0
)
{
total_ue_count
[
CC_id
]
=
total_ue_count
[
CC_id
]
+
1
;
}
}
}
for
(
i
=
0
;
i
<
NUMBER_OF_UE_MAX
;
i
++
)
{
// loop over all active UEs and calculate avg rb per user based on total active UEs
if
(
UE_
list
->
active
[
i
]
!=
TRUE
)
for
(
UE_
id
=
UE_list
->
head
;
UE_id
>=
0
;
UE_id
=
UE_list
->
next
[
UE_id
])
{
continue
;
rnti
=
UE_RNTI
(
Mod_id
,
UE_id
)
;
if
(
!
ue_slice_membership
(
i
,
slice_id
))
if
(
rnti
==
NOT_A_RNTI
)
continue
;
continue
;
if
(
UE_list
->
UE_sched_ctrl
[
UE_id
].
ul_out_of_sync
==
1
)
continue
;
if
(
!
ue_slice_membership
(
UE_id
,
slice_id
))
continue
;
UE_id
=
i
;
for
(
ii
=
0
;
ii
<
UE_num_active_CC
(
UE_list
,
UE_id
);
ii
++
)
{
// Initialize scheduling information for all active UEs
CC_id
=
UE_list
->
ordered_CCids
[
ii
][
UE_id
];
// hypothetical assignment
/*
* If schedule is enabled and if the priority of the UEs is modified
* The average rbs per logical channel per user will depend on the level of
* priority. Concerning the hypothetical assignement, we should assign more
* rbs to prioritized users. Maybe, we can do a mapping between the
* average rbs per user and the level of priority or multiply the average rbs
* per user by a coefficient which represents the degree of priority.
*/
N_RB_DL
=
to_prb
(
RC
.
mac
[
Mod_id
]
->
common_channels
[
CC_id
].
mib
->
message
.
dl_Bandwidth
)
-
total_rbs_used
[
CC_id
];
dlsch_scheduler_pre_processor_reset
(
Mod_id
,
// recalculate based on the what is left after retransmission
UE_id
,
ue_sched_ctl
=
&
UE_list
->
UE_sched_ctrl
[
UE_id
];
CC_id
,
ue_sched_ctl
->
max_rbs_allowed_slice
[
CC_id
][
slice_id
]
=
frameP
,
flexran_nb_rbs_allowed_slice
(
slice_percentage
[
slice_id
],
N_RB_DL
);
subframeP
,
N_RBG
[
CC_id
],
nb_rbs_required
,
nb_rbs_required_remaining
,
total_ue_count
,
total_rbs_used
,
rballoc_sub
,
MIMO_mode_indicator
);
}
if
(
total_ue_count
[
CC_id
]
==
0
)
{
average_rbs_per_user
[
CC_id
]
=
0
;
}
else
if
((
min_rb_unit
[
CC_id
]
*
total_ue_count
[
CC_id
])
<=
(
ue_sched_ctl
->
max_rbs_allowed_slice
[
CC_id
][
slice_id
]))
{
average_rbs_per_user
[
CC_id
]
=
(
uint16_t
)
floor
(
ue_sched_ctl
->
max_rbs_allowed_slice
[
CC_id
][
slice_id
]
/
total_ue_count
[
CC_id
]);
}
else
{
// consider the total number of use that can be scheduled UE
average_rbs_per_user
[
CC_id
]
=
min_rb_unit
[
CC_id
];
}
}
}
}
// note: nb_rbs_required is assigned according to total_buffer_dl
// extend nb_rbs_required to capture per LCID RB required
for
(
UE_id
=
UE_list
->
head
;
UE_id
>=
0
;
UE_id
=
UE_list
->
next
[
UE_id
])
{
rnti
=
UE_RNTI
(
Mod_id
,
UE_id
);
// Store the DLSCH buffer for each logical channel
if
(
rnti
==
NOT_A_RNTI
)
store_dlsch_buffer
(
Mod_id
,
slice_id
,
frameP
,
subframeP
);
continue
;
if
(
UE_list
->
UE_sched_ctrl
[
UE_id
].
ul_out_of_sync
==
1
)
continue
;
if
(
!
ue_slice_membership
(
UE_id
,
slice_id
))
continue
;
// Calculate the number of RBs required by each UE on the basis of logical channel's buffer
for
(
ii
=
0
;
ii
<
UE_num_active_CC
(
UE_list
,
UE_id
);
ii
++
)
{
assign_rbs_required
(
Mod_id
,
slice_id
,
frameP
,
subframeP
,
nb_rbs_required
,
CC_id
=
UE_list
->
ordered_CCids
[
ii
][
UE_id
];
min_rb_unit
);
ue_sched_ctl
=
&
UE_list
->
UE_sched_ctrl
[
UE_id
];
cc
=
&
RC
.
mac
[
Mod_id
]
->
common_channels
[
CC_id
];
// TODO Can we use subframe2harqpid() here?
if
(
cc
->
tdd_Config
)
harq_pid
=
((
frameP
*
10
)
+
subframeP
)
%
10
;
else
harq_pid
=
((
frameP
*
10
)
+
subframeP
)
&
7
;
round
=
ue_sched_ctl
->
round
[
CC_id
][
harq_pid
];
// control channel or retransmission
/* TODO: do we have to check for retransmission? */
if
(
mac_eNB_get_rrc_status
(
Mod_id
,
rnti
)
<
RRC_RECONFIGURED
||
round
>
0
)
{
nb_rbs_required_remaining_1
[
CC_id
][
UE_id
]
=
nb_rbs_required
[
CC_id
][
UE_id
];
}
else
{
nb_rbs_required_remaining_1
[
CC_id
][
UE_id
]
=
cmin
(
average_rbs_per_user
[
CC_id
],
nb_rbs_required
[
CC_id
][
UE_id
]);
}
}
}
//Allocation to UEs is done in 2 rounds,
// 1st stage: average number of RBs allocated to each UE
// 2nd stage: remaining RBs are allocated to high priority UEs
for
(
r1
=
0
;
r1
<
2
;
r1
++
)
{
for
(
UE_id
=
UE_list
->
head
;
UE_id
>=
0
;
UE_id
=
UE_list
->
next
[
UE_id
])
{
for
(
ii
=
0
;
ii
<
UE_num_active_CC
(
UE_list
,
UE_id
);
ii
++
)
{
CC_id
=
UE_list
->
ordered_CCids
[
ii
][
UE_id
];
if
(
r1
==
0
)
{
nb_rbs_required_remaining
[
CC_id
][
UE_id
]
=
nb_rbs_required_remaining_1
[
CC_id
][
UE_id
];
}
else
{
// rb required based only on the buffer - rb allocated in the 1st round + extra reaming rb form the 1st round
nb_rbs_required_remaining
[
CC_id
][
UE_id
]
=
nb_rbs_required
[
CC_id
][
UE_id
]
-
nb_rbs_required_remaining_1
[
CC_id
][
UE_id
]
+
nb_rbs_required_remaining
[
CC_id
][
UE_id
];
if
(
nb_rbs_required_remaining
[
CC_id
][
UE_id
]
<
0
)
abort
();
}
if
(
nb_rbs_required
[
CC_id
][
UE_id
]
>
0
)
LOG_D
(
MAC
,
"round %d : nb_rbs_required_remaining[%d][%d]= %d (remaining_1 %d, required %d, pre_nb_available_rbs %d, N_RBG %d, rb_unit %d)
\n
"
,
r1
,
CC_id
,
UE_id
,
nb_rbs_required_remaining
[
CC_id
][
UE_id
],
nb_rbs_required_remaining_1
[
CC_id
][
UE_id
],
nb_rbs_required
[
CC_id
][
UE_id
],
UE_list
->
UE_sched_ctrl
[
UE_id
].
pre_nb_available_rbs
[
CC_id
],
N_RBG
[
CC_id
],
min_rb_unit
[
CC_id
]);
// Sorts the user on the basis of dlsch logical channel buffer and CQI
}
sort_UEs
(
Mod_id
,
slice_id
,
frameP
,
subframeP
);
}
for
(
UE_id
=
UE_list
->
head
;
UE_id
>=
0
;
UE_id
=
UE_list
->
next
[
UE_id
])
{
for
(
ii
=
0
;
ii
<
UE_num_active_CC
(
UE_list
,
UE_id
);
ii
++
)
{
CC_id
=
UE_list
->
ordered_CCids
[
ii
][
UE_id
];
// if there are UEs with traffic
if
(
total_ue_count
[
CC_id
]
>
0
)
{
// ue_sched_ctl = &UE_list->UE_sched_ctrl[UE_id];
// round = ue_sched_ctl->round[CC_id][harq_pid];
rnti
=
UE_RNTI
(
Mod_id
,
UE_id
);
// LOG_D(MAC,"UE %d rnti 0x\n", UE_id, rnti );
if
(
rnti
==
NOT_A_RNTI
)
continue
;
if
(
UE_list
->
UE_sched_ctrl
[
UE_id
].
ul_out_of_sync
==
1
)
continue
;
if
(
!
ue_slice_membership
(
UE_id
,
slice_id
))
continue
;
transmission_mode
=
get_tmode
(
Mod_id
,
CC_id
,
UE_id
);
// mac_xface->get_ue_active_harq_pid(Mod_id,CC_id,rnti,frameP,subframeP,&harq_pid,&round,0);
// rrc_status = mac_eNB_get_rrc_status(Mod_id,rnti);
/* 1st allocate for the retx */
// retransmission in data channels
// control channel in the 1st transmission
// data channel for all TM
LOG_T
(
MAC
,
"calling dlsch_scheduler_pre_processor_allocate ..
\n
"
);
dlsch_scheduler_pre_processor_allocate
(
Mod_id
,
UE_id
,
CC_id
,
N_RBG
[
CC_id
],
transmission_mode
,
min_rb_unit
[
CC_id
],
to_prb
(
RC
.
mac
[
Mod_id
]
->
common_channels
[
CC_id
].
mib
->
message
.
dl_Bandwidth
),
nb_rbs_required
,
nb_rbs_required_remaining
,
rballoc_sub
,
MIMO_mode_indicator
);
// loop over all active UEs
#ifdef TM5
for
(
i
=
UE_list
->
head
;
i
>=
0
;
i
=
UE_list
->
next
[
i
])
{
// data chanel TM5: to be revisited
rnti
=
UE_RNTI
(
Mod_id
,
i
);
if
((
round
==
0
)
&&
(
transmission_mode
==
5
)
&&
if
(
rnti
==
NOT_A_RNTI
)
(
ue_sched_ctl
->
dl_pow_off
[
CC_id
]
!=
1
))
{
continue
;
if
(
UE_list
->
UE_sched_ctrl
[
i
].
ul_out_of_sync
==
1
)
for
(
j
=
0
;
j
<
N_RBG
[
CC_id
];
j
+=
2
)
{
continue
;
UE_id
=
i
;
if
((((
j
==
(
N_RBG
[
CC_id
]
-
1
))
if
(
!
ue_slice_membership
(
UE_id
,
slice_id
))
&&
(
rballoc_sub
[
CC_id
][
j
]
==
0
)
continue
;
&&
(
ue_sched_ctl
->
rballoc_sub_UE
[
CC_id
][
j
]
==
0
))
||
((
j
<
(
N_RBG
[
CC_id
]
-
1
))
&&
(
rballoc_sub
[
CC_id
][
j
+
1
]
==
0
)
&&
(
ue_sched_ctl
->
rballoc_sub_UE
[
CC_id
][
j
+
1
]
==
0
)))
&&
(
nb_rbs_required_remaining
[
CC_id
][
UE_id
]
>
0
))
{
for
(
ii
=
UE_list
->
next
[
UE_id
+
1
];
ii
>=
0
;
ii
=
UE_list
->
next
[
ii
])
{
UE_id2
=
ii
;
rnti2
=
UE_RNTI
(
Mod_id
,
UE_id2
);
ue_sched_ctl2
=
&
UE_list
->
UE_sched_ctrl
[
UE_id2
];
round2
=
ue_sched_ctl2
->
round
[
CC_id
];
if
(
rnti2
==
NOT_A_RNTI
)
continue
;
if
(
UE_list
->
UE_sched_ctrl
[
UE_id2
].
ul_out_of_sync
==
1
)
continue
;
eNB_UE_stats2
=
UE_list
->
eNB_UE_stats
[
CC_id
][
UE_id2
];
//mac_xface->get_ue_active_harq_pid(Mod_id,CC_id,rnti2,frameP,subframeP,&harq_pid2,&round2,0);
if
((
mac_eNB_get_rrc_status
(
Mod_id
,
rnti2
)
>=
RRC_RECONFIGURED
)
&&
(
round2
==
0
)
&&
(
get_tmode
(
Mod_id
,
CC_id
,
UE_id2
)
==
5
)
&&
(
ue_sched_ctl
->
dl_pow_off
[
CC_id
]
!=
1
))
{
if
((((
j
==
(
N_RBG
[
CC_id
]
-
1
))
&&
(
ue_sched_ctl
->
rballoc_sub_UE
[
CC_id
][
j
]
==
0
))
||
((
j
<
(
N_RBG
[
CC_id
]
-
1
))
&&
(
ue_sched_ctl
->
rballoc_sub_UE
[
CC_id
][
j
+
1
]
==
0
)))
&&
(
nb_rbs_required_remaining
[
CC_id
]
[
UE_id2
]
>
0
))
{
if
((((
eNB_UE_stats2
->
DL_pmi_single
^
eNB_UE_stats1
->
DL_pmi_single
)
<<
(
14
-
j
))
&
0xc000
)
==
0x4000
)
{
//MU-MIMO only for 25 RBs configuration
rballoc_sub
[
CC_id
][
j
]
=
1
;
ue_sched_ctl
->
rballoc_sub_UE
[
CC_id
]
[
j
]
=
1
;
ue_sched_ctl2
->
rballoc_sub_UE
[
CC_id
]
[
j
]
=
1
;
MIMO_mode_indicator
[
CC_id
]
[
j
]
=
0
;
if
(
j
<
N_RBG
[
CC_id
]
-
1
)
{
rballoc_sub
[
CC_id
][
j
+
1
]
=
1
;
ue_sched_ctl
->
rballoc_sub_UE
[
CC_id
][
j
+
1
]
=
1
;
ue_sched_ctl2
->
rballoc_sub_UE
[
CC_id
][
j
+
1
]
=
1
;
MIMO_mode_indicator
[
CC_id
][
j
+
1
]
=
0
;
}
ue_sched_ctl
->
dl_pow_off
[
CC_id
]
=
0
;
ue_sched_ctl2
->
dl_pow_off
[
CC_id
]
=
0
;
if
((
j
==
N_RBG
[
CC_id
]
-
1
)
&&
((
N_RB_DL
==
25
)
||
(
N_RB_DL
==
50
)))
{
nb_rbs_required_remaining
[
CC_id
][
UE_id
]
=
nb_rbs_required_remaining
[
CC_id
][
UE_id
]
-
min_rb_unit
[
CC_id
]
+
1
;
ue_sched_ctl
->
pre_nb_available_rbs
[
CC_id
]
=
ue_sched_ctl
->
pre_nb_available_rbs
[
CC_id
]
+
min_rb_unit
[
CC_id
]
-
1
;
nb_rbs_required_remaining
[
CC_id
][
UE_id2
]
=
nb_rbs_required_remaining
[
CC_id
][
UE_id2
]
-
min_rb_unit
[
CC_id
]
+
1
;
ue_sched_ctl2
->
pre_nb_available_rbs
[
CC_id
]
=
ue_sched_ctl2
->
pre_nb_available_rbs
[
CC_id
]
+
min_rb_unit
[
CC_id
]
-
1
;
}
else
{
nb_rbs_required_remaining
[
CC_id
][
UE_id
]
=
nb_rbs_required_remaining
[
CC_id
][
UE_id
]
-
4
;
ue_sched_ctl
->
pre_nb_available_rbs
[
CC_id
]
=
ue_sched_ctl
->
pre_nb_available_rbs
[
CC_id
]
+
4
;
nb_rbs_required_remaining
[
CC_id
][
UE_id2
]
=
nb_rbs_required_remaining
[
CC_id
][
UE_id2
]
-
4
;
ue_sched_ctl2
->
pre_nb_available_rbs
[
CC_id
]
=
ue_sched_ctl2
->
pre_nb_available_rbs
[
CC_id
]
+
4
;
}
break
;
}
}
}
}
}
}
}
#endif
}
// total_ue_count
}
// CC
}
// UE
}
// end of for for r1 and r2
}
for
(
ii
=
0
;
ii
<
UE_num_active_CC
(
UE_list
,
UE_id
);
ii
++
)
{
// This function assigns pre-available RBS to each UE in specified sub-bands before scheduling is done
CC_id
=
UE_list
->
ordered_CCids
[
ii
][
UE_id
];
void
ue_sched_ctl
=
&
UE_list
->
UE_sched_ctrl
[
UE_id
];
dlsch_scheduler_pre_processor
(
module_id_t
Mod_id
,
cc
=
&
RC
.
mac
[
Mod_id
]
->
common_channels
[
ii
];
slice_id_t
slice_id
,
if
(
cc
->
tdd_Config
)
frame_t
frameP
,
harq_pid
=
((
frameP
*
10
)
+
subframeP
)
%
10
;
sub_frame_t
subframeP
,
else
int
N_RBG
[
MAX_NUM_CCs
],
harq_pid
=
((
frameP
*
10
)
+
subframeP
)
&
7
;
int
*
mbsfn_flag
)
{
round
=
ue_sched_ctl
->
round
[
CC_id
][
harq_pid
];
average_rbs_per_user
[
CC_id
]
=
0
;
int
UE_id
;
uint8_t
CC_id
;
uint16_t
i
,
j
;
uint8_t
rballoc_sub
[
MAX_NUM_CCs
][
N_RBG_MAX
];
uint8_t
MIMO_mode_indicator
[
MAX_NUM_CCs
][
N_RBG_MAX
];
// If TM5 is revisited, we can move this inside accounting
if
(
round
!=
8
)
{
int
min_rb_unit
[
MAX_NUM_CCs
];
nb_rbs_required
[
CC_id
][
UE_id
]
=
uint16_t
nb_rbs_required
[
MAX_NUM_CCs
][
NUMBER_OF_UE_MAX
];
UE_list
->
UE_template
[
CC_id
][
UE_id
].
nb_rb
[
harq_pid
];
total_rbs_used
[
CC_id
]
+=
nb_rbs_required
[
CC_id
][
UE_id
];
}
//nb_rbs_required_remaining[UE_id] = nb_rbs_required[UE_id];
UE_list_t
*
UE_list
=
&
RC
.
mac
[
Mod_id
]
->
UE_list
;
if
(
nb_rbs_required
[
CC_id
][
UE_id
]
>
0
)
{
UE_sched_ctrl
*
ue_sched_ctl
;
total_ue_count
[
CC_id
]
=
total_ue_count
[
CC_id
]
+
1
;
// int rrc_status = RRC_IDLE;
}
}
#ifdef TM5
}
int
harq_pid1
=
0
;
int
round1
=
0
,
round2
=
0
;
int
UE_id2
;
uint16_t
i1
,
i2
,
i3
;
rnti_t
rnti1
,
rnti2
;
LTE_eNB_UE_stats
*
eNB_UE_stats1
=
NULL
;
LTE_eNB_UE_stats
*
eNB_UE_stats2
=
NULL
;
UE_sched_ctrl
*
ue_sched_ctl1
,
*
ue_sched_ctl2
;
#endif
// loop over all active UEs and calculate avg rb per user based on total active UEs
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
for
(
i
=
UE_list
->
head
;
i
>=
0
;
i
=
UE_list
->
next
[
i
])
{
rnti
=
UE_RNTI
(
Mod_id
,
i
);
if
(
rnti
==
NOT_A_RNTI
)
if
(
mbsfn_flag
[
CC_id
]
>
0
)
// If this CC is allocated for MBSFN skip it here
continue
;
continue
;
if
(
UE_list
->
UE_sched_ctrl
[
i
].
ul_out_of_sync
==
1
)
continue
;
UE_id
=
i
;
if
(
!
ue_slice_membership
(
UE_id
,
slice_id
))
continue
;
for
(
ii
=
0
;
ii
<
UE_num_active_CC
(
UE_list
,
UE_id
);
ii
++
)
{
min_rb_unit
[
CC_id
]
=
get_min_rb_unit
(
Mod_id
,
CC_id
);
CC_id
=
UE_list
->
ordered_CCids
[
ii
][
UE_id
];
// hypothetical assignment
/*
* If schedule is enabled and if the priority of the UEs is modified
* The average rbs per logical channel per user will depend on the level of
* priority. Concerning the hypothetical assignement, we should assign more
* rbs to prioritized users. Maybe, we can do a mapping between the
* average rbs per user and the level of priority or multiply the average rbs
* per user by a coefficient which represents the degree of priority.
*/
N_RB_DL
=
to_prb
(
RC
.
mac
[
Mod_id
]
->
common_channels
[
CC_id
].
mib
->
message
.
dl_Bandwidth
)
-
total_rbs_used
[
CC_id
];
//recalcualte based on the what is left after retransmission
ue_sched_ctl
=
&
UE_list
->
UE_sched_ctrl
[
UE_id
];
ue_sched_ctl
->
max_rbs_allowed_slice
[
CC_id
][
slice_id
]
=
flexran_nb_rbs_allowed_slice
(
slice_percentage
[
slice_id
],
N_RB_DL
);
if
(
total_ue_count
[
CC_id
]
==
0
)
{
average_rbs_per_user
[
CC_id
]
=
0
;
}
else
if
((
min_rb_unit
[
CC_id
]
*
total_ue_count
[
CC_id
])
<=
(
ue_sched_ctl
->
max_rbs_allowed_slice
[
CC_id
][
slice_id
]))
{
average_rbs_per_user
[
CC_id
]
=
(
uint16_t
)
floor
(
ue_sched_ctl
->
max_rbs_allowed_slice
[
CC_id
][
slice_id
]
/
total_ue_count
[
CC_id
]);
}
else
{
average_rbs_per_user
[
CC_id
]
=
min_rb_unit
[
CC_id
];
// consider the total number of use that can be scheduled UE
}
}
}
// note: nb_rbs_required is assigned according to total_buffer_dl
for
(
UE_id
=
0
;
UE_id
<
NUMBER_OF_UE_MAX
;
++
UE_id
)
{
// extend nb_rbs_required to capture per LCID RB required
if
(
UE_list
->
active
[
UE_id
]
!=
TRUE
)
for
(
i
=
UE_list
->
head
;
i
>=
0
;
i
=
UE_list
->
next
[
i
])
{
continue
;
rnti
=
UE_RNTI
(
Mod_id
,
i
);
if
(
rnti
==
NOT_A_RNTI
)
if
(
!
ue_slice_membership
(
UE_id
,
slice_id
))
continue
;
continue
;
if
(
UE_list
->
UE_sched_ctrl
[
i
].
ul_out_of_sync
==
1
)
continue
;
if
(
!
ue_slice_membership
(
i
,
slice_id
))
continue
;
for
(
ii
=
0
;
ii
<
UE_num_active_CC
(
UE_list
,
i
);
ii
++
)
{
// Initialize scheduling information for all active UEs
CC_id
=
UE_list
->
ordered_CCids
[
ii
][
i
];
dlsch_scheduler_pre_processor_reset
(
Mod_id
,
ue_sched_ctl
=
&
UE_list
->
UE_sched_ctrl
[
i
];
UE_id
,
round
=
ue_sched_ctl
->
round
[
CC_id
][
harq_pid
];
CC_id
,
frameP
,
// control channel or retransmission
subframeP
,
/* TODO: do we have to check for retransmission? */
N_RBG
[
CC_id
],
if
(
mac_eNB_get_rrc_status
(
Mod_id
,
rnti
)
<
RRC_RECONFIGURED
nb_rbs_required
,
||
round
>
0
)
{
rballoc_sub
,
nb_rbs_required_remaining_1
[
CC_id
][
i
]
=
MIMO_mode_indicator
);
nb_rbs_required
[
CC_id
][
i
];
}
else
{
nb_rbs_required_remaining_1
[
CC_id
][
i
]
=
cmin
(
average_rbs_per_user
[
CC_id
],
nb_rbs_required
[
CC_id
][
i
]);
}
}
}
}
}
//Allocation to UEs is done in 2 rounds,
// Store the DLSCH buffer for each logical channel
// 1st stage: average number of RBs allocated to each UE
store_dlsch_buffer
(
Mod_id
,
slice_id
,
frameP
,
subframeP
);
// 2nd stage: remaining RBs are allocated to high priority UEs
for
(
r1
=
0
;
r1
<
2
;
r1
++
)
{
for
(
i
=
UE_list
->
head
;
i
>=
0
;
i
=
UE_list
->
next
[
i
])
{
for
(
ii
=
0
;
ii
<
UE_num_active_CC
(
UE_list
,
i
);
ii
++
)
{
CC_id
=
UE_list
->
ordered_CCids
[
ii
][
i
];
if
(
r1
==
0
)
{
nb_rbs_required_remaining
[
CC_id
][
i
]
=
nb_rbs_required_remaining_1
[
CC_id
][
i
];
}
else
{
// rb required based only on the buffer - rb allloctaed in the 1st round + extra reaming rb form the 1st round
nb_rbs_required_remaining
[
CC_id
][
i
]
=
nb_rbs_required
[
CC_id
][
i
]
-
nb_rbs_required_remaining_1
[
CC_id
][
i
]
+
nb_rbs_required_remaining
[
CC_id
][
i
];
if
(
nb_rbs_required_remaining
[
CC_id
][
i
]
<
0
)
abort
();
}
if
(
nb_rbs_required
[
CC_id
][
i
]
>
0
)
LOG_D
(
MAC
,
"round %d : nb_rbs_required_remaining[%d][%d]= %d (remaining_1 %d, required %d, pre_nb_available_rbs %d, N_RBG %d, rb_unit %d)
\n
"
,
r1
,
CC_id
,
i
,
nb_rbs_required_remaining
[
CC_id
][
i
],
nb_rbs_required_remaining_1
[
CC_id
][
i
],
nb_rbs_required
[
CC_id
][
i
],
UE_list
->
UE_sched_ctrl
[
i
].
pre_nb_available_rbs
[
CC_id
],
N_RBG
[
CC_id
],
min_rb_unit
[
CC_id
]);
}
}
for
(
i
=
UE_list
->
head
;
i
>=
0
;
i
=
UE_list
->
next
[
i
])
{
UE_id
=
i
;
for
(
ii
=
0
;
ii
<
UE_num_active_CC
(
UE_list
,
UE_id
);
ii
++
)
{
// if there are UEs with traffic
if
(
total_ue_count
[
CC_id
]
>
0
)
{
CC_id
=
UE_list
->
ordered_CCids
[
ii
][
UE_id
];
ue_sched_ctl
=
&
UE_list
->
UE_sched_ctrl
[
UE_id
];
round
=
ue_sched_ctl
->
round
[
CC_id
][
harq_pid
];
rnti
=
UE_RNTI
(
Mod_id
,
UE_id
);
// Calculate the number of RBs required by each UE on the basis of logical channel's buffer
assign_rbs_required
(
Mod_id
,
slice_id
,
frameP
,
subframeP
,
nb_rbs_required
,
min_rb_unit
);
// LOG_D(MAC,"UE %d rnti 0x\n", UE_id, rnti );
// Sorts the user on the basis of dlsch logical channel buffer and CQI
if
(
rnti
==
NOT_A_RNTI
)
sort_UEs
(
Mod_id
,
slice_id
,
frameP
,
subframeP
);
continue
;
if
(
UE_list
->
UE_sched_ctrl
[
UE_id
].
ul_out_of_sync
==
1
)
continue
;
if
(
!
ue_slice_membership
(
i
,
slice_id
))
continue
;
transmission_mode
=
get_tmode
(
Mod_id
,
CC_id
,
UE_id
);
// mac_xface->get_ue_active_harq_pid(Mod_id,CC_id,rnti,frameP,subframeP,&harq_pid,&round,0);
//rrc_status = mac_eNB_get_rrc_status(Mod_id,rnti);
/* 1st allocate for the retx */
// retransmission in data channels
// control channel in the 1st transmission
// data channel for all TM
LOG_T
(
MAC
,
"calling dlsch_scheduler_pre_processor_allocate ..
\n
"
);
dlsch_scheduler_pre_processor_allocate
(
Mod_id
,
UE_id
,
CC_id
,
N_RBG
[
CC_id
],
transmission_mode
,
min_rb_unit
[
CC_id
],
to_prb
(
RC
.
mac
[
Mod_id
]
->
common_channels
[
CC_id
].
mib
->
message
.
dl_Bandwidth
),
nb_rbs_required
,
nb_rbs_required_remaining
,
rballoc_sub
,
MIMO_mode_indicator
);
#ifdef TM5
// This function does the main allocation of the number of RBs
dlsch_scheduler_pre_processor_accounting
(
Mod_id
,
slice_id
,
frameP
,
subframeP
,
N_RBG
,
min_rb_unit
,
rballoc_sub
,
MIMO_mode_indicator
,
nb_rbs_required
);
// data chanel TM5: to be revisted
if
((
round
==
0
)
&&
(
transmission_mode
==
5
)
&&
(
ue_sched_ctl
->
dl_pow_off
[
CC_id
]
!=
1
))
{
for
(
j
=
0
;
j
<
N_RBG
[
CC_id
];
j
+=
2
)
{
if
((((
j
==
(
N_RBG
[
CC_id
]
-
1
))
&&
(
rballoc_sub
[
CC_id
][
j
]
==
0
)
&&
(
ue_sched_ctl
->
rballoc_sub_UE
[
CC_id
][
j
]
==
0
))
||
((
j
<
(
N_RBG
[
CC_id
]
-
1
))
&&
(
rballoc_sub
[
CC_id
][
j
+
1
]
==
0
)
&&
(
ue_sched_ctl
->
rballoc_sub_UE
[
CC_id
][
j
+
1
]
==
0
)))
&&
(
nb_rbs_required_remaining
[
CC_id
][
UE_id
]
>
0
))
{
for
(
ii
=
UE_list
->
next
[
i
+
1
];
ii
>=
0
;
ii
=
UE_list
->
next
[
ii
])
{
UE_id2
=
ii
;
rnti2
=
UE_RNTI
(
Mod_id
,
UE_id2
);
ue_sched_ctl2
=
&
UE_list
->
UE_sched_ctrl
[
UE_id2
];
round2
=
ue_sched_ctl2
->
round
[
CC_id
];
if
(
rnti2
==
NOT_A_RNTI
)
continue
;
if
(
UE_list
->
UE_sched_ctrl
[
UE_id2
].
ul_out_of_sync
==
1
)
continue
;
eNB_UE_stats2
=
UE_list
->
eNB_UE_stats
[
CC_id
][
UE_id2
];
//mac_xface->get_ue_active_harq_pid(Mod_id,CC_id,rnti2,frameP,subframeP,&harq_pid2,&round2,0);
if
((
mac_eNB_get_rrc_status
(
Mod_id
,
rnti2
)
>=
RRC_RECONFIGURED
)
&&
(
round2
==
0
)
&&
(
get_tmode
(
Mod_id
,
CC_id
,
UE_id2
)
==
5
)
&&
(
ue_sched_ctl
->
dl_pow_off
[
CC_id
]
!=
1
))
{
if
((((
j
==
(
N_RBG
[
CC_id
]
-
1
))
&&
(
ue_sched_ctl
->
rballoc_sub_UE
[
CC_id
][
j
]
==
0
))
||
((
j
<
(
N_RBG
[
CC_id
]
-
1
))
&&
(
ue_sched_ctl
->
rballoc_sub_UE
[
CC_id
][
j
+
1
]
==
0
)))
&&
(
nb_rbs_required_remaining
[
CC_id
]
[
UE_id2
]
>
0
))
{
if
((((
eNB_UE_stats2
->
DL_pmi_single
^
eNB_UE_stats1
->
DL_pmi_single
)
<<
(
14
-
j
))
&
0xc000
)
==
0x4000
)
{
//MU-MIMO only for 25 RBs configuration
rballoc_sub
[
CC_id
][
j
]
=
1
;
ue_sched_ctl
->
rballoc_sub_UE
[
CC_id
]
[
j
]
=
1
;
ue_sched_ctl2
->
rballoc_sub_UE
[
CC_id
]
[
j
]
=
1
;
MIMO_mode_indicator
[
CC_id
]
[
j
]
=
0
;
if
(
j
<
N_RBG
[
CC_id
]
-
1
)
{
rballoc_sub
[
CC_id
][
j
+
1
]
=
1
;
ue_sched_ctl
->
rballoc_sub_UE
[
CC_id
][
j
+
1
]
=
1
;
ue_sched_ctl2
->
rballoc_sub_UE
[
CC_id
][
j
+
1
]
=
1
;
MIMO_mode_indicator
[
CC_id
][
j
+
1
]
=
0
;
}
ue_sched_ctl
->
dl_pow_off
[
CC_id
]
=
0
;
ue_sched_ctl2
->
dl_pow_off
[
CC_id
]
=
0
;
if
((
j
==
N_RBG
[
CC_id
]
-
1
)
&&
((
N_RB_DL
==
25
)
||
(
N_RB_DL
==
50
)))
{
nb_rbs_required_remaining
[
CC_id
][
UE_id
]
=
nb_rbs_required_remaining
[
CC_id
][
UE_id
]
-
min_rb_unit
[
CC_id
]
+
1
;
ue_sched_ctl
->
pre_nb_available_rbs
[
CC_id
]
=
ue_sched_ctl
->
pre_nb_available_rbs
[
CC_id
]
+
min_rb_unit
[
CC_id
]
-
1
;
nb_rbs_required_remaining
[
CC_id
][
UE_id2
]
=
nb_rbs_required_remaining
[
CC_id
][
UE_id2
]
-
min_rb_unit
[
CC_id
]
+
1
;
ue_sched_ctl2
->
pre_nb_available_rbs
[
CC_id
]
=
ue_sched_ctl2
->
pre_nb_available_rbs
[
CC_id
]
+
min_rb_unit
[
CC_id
]
-
1
;
}
else
{
nb_rbs_required_remaining
[
CC_id
][
UE_id
]
=
nb_rbs_required_remaining
[
CC_id
][
UE_id
]
-
4
;
ue_sched_ctl
->
pre_nb_available_rbs
[
CC_id
]
=
ue_sched_ctl
->
pre_nb_available_rbs
[
CC_id
]
+
4
;
nb_rbs_required_remaining
[
CC_id
][
UE_id2
]
=
nb_rbs_required_remaining
[
CC_id
][
UE_id2
]
-
4
;
ue_sched_ctl2
->
pre_nb_available_rbs
[
CC_id
]
=
ue_sched_ctl2
->
pre_nb_available_rbs
[
CC_id
]
+
4
;
}
break
;
}
}
}
}
}
}
}
#endif
}
// total_ue_count
}
}
}
// end of for for r1 and r2
#ifdef TM5
#ifdef TM5
// This has to be revisited!!!!
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
i1
=
0
;
i2
=
0
;
i3
=
0
;
for
(
j
=
0
;
j
<
N_RBG
[
CC_id
];
j
++
)
{
if
(
MIMO_mode_indicator
[
CC_id
][
j
]
==
2
)
{
i1
=
i1
+
1
;
}
else
if
(
MIMO_mode_indicator
[
CC_id
][
j
]
==
1
)
{
i2
=
i2
+
1
;
}
else
if
(
MIMO_mode_indicator
[
CC_id
][
j
]
==
0
)
{
i3
=
i3
+
1
;
}
}
// This has to be revisited!!!!
if
((
i1
<
N_RBG
[
CC_id
])
&&
(
i2
>
0
)
&&
(
i3
==
0
))
{
for
(
CC_id
=
0
;
CC_id
<
MAX_NUM_CCs
;
CC_id
++
)
{
PHY_vars_eNB_g
[
Mod_id
][
CC_id
]
->
check_for_SUMIMO_transmissions
=
i1
=
0
;
PHY_vars_eNB_g
[
Mod_id
][
CC_id
]
->
i2
=
0
;
check_for_SUMIMO_transmissions
+
1
;
i3
=
0
;
}
for
(
j
=
0
;
j
<
N_RBG
[
CC_id
];
j
++
)
{
if
(
MIMO_mode_indicator
[
CC_id
][
j
]
==
2
)
{
i1
=
i1
+
1
;
}
else
if
(
MIMO_mode_indicator
[
CC_id
][
j
]
==
1
)
{
i2
=
i2
+
1
;
}
else
if
(
MIMO_mode_indicator
[
CC_id
][
j
]
==
0
)
{
i3
=
i3
+
1
;
}
}
if
((
i1
<
N_RBG
[
CC_id
])
&&
(
i2
>
0
)
&&
(
i3
==
0
))
{
PHY_vars_eNB_g
[
Mod_id
][
CC_id
]
->
check_for_SUMIMO_transmissions
=
PHY_vars_eNB_g
[
Mod_id
][
CC_id
]
->
check_for_SUMIMO_transmissions
+
1
;
}
if
(
i3
==
N_RBG
[
CC_id
]
&&
i1
==
0
&&
i2
==
0
)
{
if
(
i3
==
N_RBG
[
CC_id
]
&&
i1
==
0
&&
i2
==
0
)
{
PHY_vars_eNB_g
[
Mod_id
][
CC_id
]
->
FULL_MUMIMO_transmissions
=
PHY_vars_eNB_g
[
Mod_id
][
CC_id
]
->
FULL_MUMIMO_transmissions
=
PHY_vars_eNB_g
[
Mod_id
][
CC_id
]
->
FULL_MUMIMO_transmissions
+
PHY_vars_eNB_g
[
Mod_id
][
CC_id
]
->
FULL_MUMIMO_transmissions
+
1
;
1
;
}
}
if
((
i1
<
N_RBG
[
CC_id
])
&&
(
i3
>
0
))
{
if
((
i1
<
N_RBG
[
CC_id
])
&&
(
i3
>
0
))
{
PHY_vars_eNB_g
[
Mod_id
][
CC_id
]
->
check_for_MUMIMO_transmissions
=
PHY_vars_eNB_g
[
Mod_id
][
CC_id
]
->
check_for_MUMIMO_transmissions
=
PHY_vars_eNB_g
[
Mod_id
][
CC_id
]
->
PHY_vars_eNB_g
[
Mod_id
][
CC_id
]
->
check_for_MUMIMO_transmissions
+
1
;
check_for_MUMIMO_transmissions
+
1
;
}
}
PHY_vars_eNB_g
[
Mod_id
][
CC_id
]
->
check_for_total_transmissions
=
PHY_vars_eNB_g
[
Mod_id
][
CC_id
]
->
check_for_total_transmissions
=
PHY_vars_eNB_g
[
Mod_id
][
CC_id
]
->
check_for_total_transmissions
+
PHY_vars_eNB_g
[
Mod_id
][
CC_id
]
->
check_for_total_transmissions
+
1
;
1
;
}
}
#endif
#endif
for
(
i
=
UE_list
->
head
;
i
>=
0
;
i
=
UE_list
->
next
[
i
])
{
for
(
UE_id
=
UE_list
->
head
;
UE_id
>=
0
;
UE_id
=
UE_list
->
next
[
UE_id
])
{
UE_id
=
i
;
ue_sched_ctl
=
&
UE_list
->
UE_sched_ctrl
[
UE_id
];
for
(
ii
=
0
;
ii
<
UE_num_active_CC
(
UE_list
,
UE_id
);
ii
++
)
{
ue_sched_ctl
=
&
UE_list
->
UE_sched_ctrl
[
UE_id
];
CC_id
=
UE_list
->
ordered_CCids
[
ii
][
UE_id
];
for
(
i
=
0
;
i
<
UE_num_active_CC
(
UE_list
,
UE_id
);
i
++
)
{
//PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].dl_pow_off = dl_pow_off[UE_id];
CC_id
=
UE_list
->
ordered_CCids
[
i
][
UE_id
];
//PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].dl_pow_off = dl_pow_off[UE_id];
if
(
ue_sched_ctl
->
pre_nb_available_rbs
[
CC_id
]
>
0
)
{
if
(
ue_sched_ctl
->
pre_nb_available_rbs
[
CC_id
]
>
0
)
{
LOG_D
(
MAC
,
LOG_D
(
MAC
,
"******************DL Scheduling Information for UE%d ************************
\n
"
,
UE_id
);
"******************DL Scheduling Information for UE%d ************************
\n
"
,
LOG_D
(
MAC
,
"dl power offset UE%d = %d
\n
"
,
UE_id
,
ue_sched_ctl
->
dl_pow_off
[
CC_id
]);
UE_id
);
LOG_D
(
MAC
,
"***********RB Alloc for every subband for UE%d ***********
\n
"
,
UE_id
);
LOG_D
(
MAC
,
"dl power offset UE%d = %d
\n
"
,
UE_id
,
ue_sched_ctl
->
dl_pow_off
[
CC_id
]);
LOG_D
(
MAC
,
"***********RB Alloc for every subband for UE%d ***********
\n
"
,
UE_id
);
for
(
j
=
0
;
j
<
N_RBG
[
CC_id
];
j
++
)
{
//PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].rballoc_sub[i] = rballoc_sub_UE[CC_id][UE_id][i];
LOG_D
(
MAC
,
"RB Alloc for UE%d and Subband%d = %d
\n
"
,
UE_id
,
j
,
ue_sched_ctl
->
rballoc_sub_UE
[
CC_id
][
j
]);
}
//PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].pre_nb_available_rbs = pre_nb_available_rbs[CC_id][UE_id];
for
(
j
=
0
;
j
<
N_RBG
[
CC_id
];
j
++
)
{
LOG_D
(
MAC
,
"[eNB %d][SLICE %d]Total RBs allocated for UE%d = %d
\n
"
,
Mod_id
,
slice_id
,
UE_id
,
//PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].rballoc_sub[UE_id] = rballoc_sub_UE[CC_id][UE_id][UE_id];
ue_sched_ctl
->
pre_nb_available_rbs
[
CC_id
]);
LOG_D
(
MAC
,
"RB Alloc for UE%d and Subband%d = %d
\n
"
,
UE_id
,
j
,
ue_sched_ctl
->
rballoc_sub_UE
[
CC_id
][
j
]);
}
}
}
//PHY_vars_eNB_g[Mod_id]->mu_mimo_mode[UE_id].pre_nb_available_rbs = pre_nb_available_rbs[CC_id][UE_id];
LOG_D
(
MAC
,
"[eNB %d][SLICE %d]Total RBs allocated for UE%d = %d
\n
"
,
Mod_id
,
slice_id
,
UE_id
,
ue_sched_ctl
->
pre_nb_available_rbs
[
CC_id
]);
}
}
}
}
}
}
#define SF0_LIMIT 1
#define SF0_LIMIT 1
void
void
dlsch_scheduler_pre_processor_reset
(
int
module_idP
,
dlsch_scheduler_pre_processor_reset
(
int
module_idP
,
int
UE_id
,
int
UE_id
,
uint8_t
CC_id
,
uint8_t
CC_id
,
int
frameP
,
int
frameP
,
int
subframeP
,
int
subframeP
,
int
N_RBG
,
int
N_RBG
,
uint16_t
nb_rbs_required
[
MAX_NUM_CCs
]
uint16_t
nb_rbs_required
[
MAX_NUM_CCs
]
[
NUMBER_OF_UE_MAX
],
[
NUMBER_OF_UE_MAX
],
uint16_t
unsigned
char
nb_rbs_required_remaining
rballoc_sub
[
MAX_NUM_CCs
]
[
MAX_NUM_CCs
][
NUMBER_OF_UE_MAX
],
[
N_RBG_MAX
],
unsigned
char
total_ue_count
[
MAX_NUM_CCs
],
unsigned
char
unsigned
char
total_rbs_used
[
MAX_NUM_CCs
],
MIMO_mode_indicator
[
MAX_NUM_CCs
]
unsigned
char
[
N_RBG_MAX
])
rballoc_sub
[
MAX_NUM_CCs
]
[
N_RBG_MAX
],
unsigned
char
MIMO_mode_indicator
[
MAX_NUM_CCs
]
[
N_RBG_MAX
])
{
{
int
i
,
j
;
int
i
,
j
;
UE_list_t
*
UE_list
=
&
RC
.
mac
[
module_idP
]
->
UE_list
;
UE_list_t
*
UE_list
=
&
RC
.
mac
[
module_idP
]
->
UE_list
;
UE_sched_ctrl
*
ue_sched_ctl
=
&
UE_list
->
UE_sched_ctrl
[
UE_id
];
UE_sched_ctrl
*
ue_sched_ctl
=
&
UE_list
->
UE_sched_ctrl
[
UE_id
];
rnti_t
rnti
=
UE_RNTI
(
module_idP
,
UE_id
);
rnti_t
rnti
=
UE_RNTI
(
module_idP
,
UE_id
);
uint8_t
*
vrb_map
=
RC
.
mac
[
module_idP
]
->
common_channels
[
CC_id
].
vrb_map
;
uint8_t
*
vrb_map
=
RC
.
mac
[
module_idP
]
->
common_channels
[
CC_id
].
vrb_map
;
int
N_RB_DL
=
int
N_RB_DL
=
to_prb
(
RC
.
mac
[
module_idP
]
->
common_channels
[
CC_id
].
mib
->
to_prb
(
RC
.
mac
[
module_idP
]
->
common_channels
[
CC_id
].
mib
->
message
.
dl_Bandwidth
);
message
.
dl_Bandwidth
);
int
RBGsize
=
N_RB_DL
/
N_RBG
,
RBGsize_last
;
int
RBGsize
=
N_RB_DL
/
N_RBG
,
RBGsize_last
;
#ifdef SF0_LIMIT
#ifdef SF0_LIMIT
int
sf0_upper
=
-
1
,
sf0_lower
=
-
1
;
int
sf0_upper
=
-
1
,
sf0_lower
=
-
1
;
#endif
#endif
LOG_D
(
MAC
,
"Running preprocessor for UE %d (%x)
\n
"
,
UE_id
,
rnti
);
LOG_D
(
MAC
,
"Running preprocessor for UE %d (%x)
\n
"
,
UE_id
,
rnti
);
// initialize harq_pid and round
// initialize harq_pid and round
if
(
ue_sched_ctl
->
ta_timer
)
if
(
ue_sched_ctl
->
ta_timer
)
ue_sched_ctl
->
ta_timer
--
;
ue_sched_ctl
->
ta_timer
--
;
/*
/*
eNB_UE_stats *eNB_UE_stats;
eNB_UE_stats *eNB_UE_stats;
...
@@ -1234,95 +1237,96 @@ dlsch_scheduler_pre_processor_reset(int module_idP,
...
@@ -1234,95 +1237,96 @@ dlsch_scheduler_pre_processor_reset(int module_idP,
}
}
*/
*/
nb_rbs_required
[
CC_id
][
UE_id
]
=
0
;
nb_rbs_required
[
CC_id
][
UE_id
]
=
0
;
ue_sched_ctl
->
pre_nb_available_rbs
[
CC_id
]
=
0
;
ue_sched_ctl
->
pre_nb_available_rbs
[
CC_id
]
=
0
;
ue_sched_ctl
->
dl_pow_off
[
CC_id
]
=
2
;
ue_sched_ctl
->
dl_pow_off
[
CC_id
]
=
2
;
nb_rbs_required_remaining
[
CC_id
][
UE_id
]
=
0
;
total_ue_count
[
CC_id
]
=
0
;
switch
(
N_RB_DL
)
{
total_rbs_used
[
CC_id
]
=
0
;
switch
(
N_RB_DL
)
{
case
6
:
case
6
:
RBGsize
=
1
;
RBGsize
=
1
;
RBGsize_last
=
1
;
RBGsize_last
=
1
;
break
;
break
;
case
15
:
case
15
:
RBGsize
=
2
;
RBGsize
=
2
;
RBGsize_last
=
1
;
RBGsize_last
=
1
;
break
;
break
;
case
25
:
case
25
:
RBGsize
=
2
;
RBGsize
=
2
;
RBGsize_last
=
1
;
RBGsize_last
=
1
;
break
;
break
;
case
50
:
case
50
:
RBGsize
=
3
;
RBGsize
=
3
;
RBGsize_last
=
2
;
RBGsize_last
=
2
;
break
;
break
;
case
75
:
case
75
:
RBGsize
=
4
;
RBGsize
=
4
;
RBGsize_last
=
3
;
RBGsize_last
=
3
;
break
;
break
;
case
100
:
case
100
:
RBGsize
=
4
;
RBGsize
=
4
;
RBGsize_last
=
4
;
RBGsize_last
=
4
;
break
;
break
;
default:
default:
AssertFatal
(
1
==
0
,
"unsupported RBs (%d)
\n
"
,
N_RB_DL
);
AssertFatal
(
1
==
0
,
"unsupported RBs (%d)
\n
"
,
N_RB_DL
);
}
}
#ifdef SF0_LIMIT
#ifdef SF0_LIMIT
switch
(
N_RBG
)
{
switch
(
N_RBG
)
{
case
6
:
case
6
:
sf0_lower
=
0
;
sf0_lower
=
0
;
sf0_upper
=
5
;
sf0_upper
=
5
;
break
;
break
;
case
8
:
case
8
:
sf0_lower
=
2
;
sf0_lower
=
2
;
sf0_upper
=
5
;
sf0_upper
=
5
;
break
;
break
;
case
13
:
case
13
:
sf0_lower
=
4
;
sf0_lower
=
4
;
sf0_upper
=
7
;
sf0_upper
=
7
;
break
;
break
;
case
17
:
case
17
:
sf0_lower
=
7
;
sf0_lower
=
7
;
sf0_upper
=
9
;
sf0_upper
=
9
;
break
;
break
;
case
25
:
case
25
:
sf0_lower
=
11
;
sf0_lower
=
11
;
sf0_upper
=
13
;
sf0_upper
=
13
;
break
;
break
;
default:
default:
AssertFatal
(
1
==
0
,
"unsupported RBs (%d)
\n
"
,
N_RB_DL
);
AssertFatal
(
1
==
0
,
"unsupported RBs (%d)
\n
"
,
N_RB_DL
);
}
}
#endif
#endif
// Initialize Subbands according to VRB map
for
(
i
=
0
;
i
<
N_RBG
;
i
++
)
{
int
rb_size
=
i
==
N_RBG
-
1
?
RBGsize_last
:
RBGsize
;
ue_sched_ctl
->
rballoc_sub_UE
[
CC_id
][
i
]
=
0
;
// Initialize Subbands according to VRB map
rballoc_sub
[
CC_id
][
i
]
=
0
;
for
(
i
=
0
;
i
<
N_RBG
;
i
++
)
{
int
rb_size
=
i
==
N_RBG
-
1
?
RBGsize_last
:
RBGsize
;
ue_sched_ctl
->
rballoc_sub_UE
[
CC_id
][
i
]
=
0
;
rballoc_sub
[
CC_id
][
i
]
=
0
;
#ifdef SF0_LIMIT
#ifdef SF0_LIMIT
// for avoiding 6+ PRBs around DC in subframe 0 (avoid excessive errors)
// for avoiding 6+ PRBs around DC in subframe 0 (avoid excessive errors)
/* TODO: make it proper - allocate those RBs, do not "protect" them, but
/* TODO: make it proper - allocate those RBs, do not "protect" them, but
* compute number of available REs and limit MCS according to the
* compute number of available REs and limit MCS according to the
* TBS table 36.213 7.1.7.2.1-1 (can be done after pre-processor)
* TBS table 36.213 7.1.7.2.1-1 (can be done after pre-processor)
*/
*/
if
(
subframeP
==
0
&&
i
>=
sf0_lower
&&
i
<=
sf0_upper
)
if
(
subframeP
==
0
&&
i
>=
sf0_lower
&&
i
<=
sf0_upper
)
rballoc_sub
[
CC_id
][
i
]
=
1
;
rballoc_sub
[
CC_id
][
i
]
=
1
;
#endif
#endif
// for SI-RNTI,RA-RNTI and P-RNTI allocations
for
(
j
=
0
;
j
<
rb_size
;
j
++
)
{
// for SI-RNTI,RA-RNTI and P-RNTI allocations
if
(
vrb_map
[
j
+
(
i
*
RBGsize
)]
!=
0
)
{
for
(
j
=
0
;
j
<
rb_size
;
j
++
)
{
rballoc_sub
[
CC_id
][
i
]
=
1
;
if
(
vrb_map
[
j
+
(
i
*
RBGsize
)]
!=
0
)
{
LOG_D
(
MAC
,
"Frame %d, subframe %d : vrb %d allocated
\n
"
,
rballoc_sub
[
CC_id
][
i
]
=
1
;
frameP
,
subframeP
,
j
+
(
i
*
RBGsize
));
LOG_D
(
MAC
,
"Frame %d, subframe %d : vrb %d allocated
\n
"
,
break
;
frameP
,
subframeP
,
j
+
(
i
*
RBGsize
));
}
break
;
}
}
LOG_D
(
MAC
,
"Frame %d Subframe %d CC_id %d RBG %i : rb_alloc %d
\n
"
,
frameP
,
subframeP
,
CC_id
,
i
,
rballoc_sub
[
CC_id
][
i
]);
MIMO_mode_indicator
[
CC_id
][
i
]
=
2
;
}
}
LOG_D
(
MAC
,
"Frame %d Subframe %d CC_id %d RBG %i : rb_alloc %d
\n
"
,
frameP
,
subframeP
,
CC_id
,
i
,
rballoc_sub
[
CC_id
][
i
]);
MIMO_mode_indicator
[
CC_id
][
i
]
=
2
;
}
}
}
...
...
This diff is collapsed.
Click to expand it.
openair2/LAYER2/MAC/proto.h
+
0
−
5
View file @
10e91d4e
...
@@ -209,11 +209,6 @@ void dlsch_scheduler_pre_processor_reset(int module_idP, int UE_id,
...
@@ -209,11 +209,6 @@ void dlsch_scheduler_pre_processor_reset(int module_idP, int UE_id,
uint16_t
uint16_t
nb_rbs_required
[
MAX_NUM_CCs
]
nb_rbs_required
[
MAX_NUM_CCs
]
[
NUMBER_OF_UE_MAX
],
[
NUMBER_OF_UE_MAX
],
uint16_t
nb_rbs_required_remaining
[
MAX_NUM_CCs
][
NUMBER_OF_UE_MAX
],
unsigned
char
total_ue_count
[
MAX_NUM_CCs
],
unsigned
char
total_rbs_used
[
MAX_NUM_CCs
],
unsigned
char
unsigned
char
rballoc_sub
[
MAX_NUM_CCs
]
rballoc_sub
[
MAX_NUM_CCs
]
[
N_RBG_MAX
],
[
N_RBG_MAX
],
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment