Skip to content
Snippets Groups Projects
Commit e2c32228 authored by knopp's avatar knopp
Browse files

removal of EXPRESSMIMO #ifdef's in lte-ue. move UE-specific code from lte-softmodem.c to lte-ue.c

parent 58ba0089
No related branches found
No related tags found
No related merge requests found
......@@ -259,15 +259,6 @@ if (${ENABLE_ITTI})
endif (${ENABLE_ITTI})
add_boolean_option(RTAI False "Use RTAI")
if (${RTAI})
set(LOWLATENCY False)
set(CPU_AFFINITY False)
add_definitions("-DENABLE_RTAI_CLOCK")
add_definitions("-DCONFIG_RTAI_LXRT_INLINE")
include_directories ("/usr/realtime/include")
include_directories ("/usr/realtime/include/asm")
set(RTAI_SOURCE sched_dlsch.c sched_rx_pdsch.c rt_wrapper.c vcd_signal_dumper.c log.c)
endif (${RTAI})
#############################
# ASN.1 grammar C code generation & dependancies
......@@ -1574,8 +1565,6 @@ add_executable(lte-softmodem
${rrc_h}
${s1ap_h}
${OPENAIR_BIN_DIR}/messages_xml.h
${OPENAIR_TARGETS}/RT/USER/sched_dlsch.c
${OPENAIR_TARGETS}/RT/USER/sched_rx_pdsch.c
${OPENAIR_TARGETS}/RT/USER/rt_wrapper.c
${OPENAIR_TARGETS}/RT/USER/lte-ue.c
${OPENAIR_TARGETS}/RT/USER/lte-softmodem.c
......@@ -1611,8 +1600,6 @@ add_executable(lte-softmodem-nos1
${rrc_h}
${s1ap_h}
${OPENAIR_BIN_DIR}/messages_xml.h
${OPENAIR_TARGETS}/RT/USER/sched_dlsch.c
${OPENAIR_TARGETS}/RT/USER/sched_rx_pdsch.c
${OPENAIR_TARGETS}/RT/USER/rt_wrapper.c
${OPENAIR_TARGETS}/RT/USER/lte-ue.c
${OPENAIR_TARGETS}/RT/USER/lte-softmodem.c
......
......@@ -113,12 +113,18 @@ unsigned short config_frames[4] = {2,9,11,13};
#include "stats.h"
#endif
// In lte-enb.c
int setup_eNB_buffers(PHY_VARS_eNB **phy_vars_eNB, openair0_config_t *openair0_cfg, openair0_rf_map rf_map[MAX_NUM_CCs]);
int setup_ue_buffers(PHY_VARS_UE **phy_vars_ue, openair0_config_t *openair0_cfg, openair0_rf_map rf_map[MAX_NUM_CCs]);
extern void init_eNB(void);
extern void stop_eNB(void);
extern void kill_eNB_proc(void);
// In lte-ue.c
int setup_ue_buffers(PHY_VARS_UE **phy_vars_ue, openair0_config_t *openair0_cfg, openair0_rf_map rf_map[MAX_NUM_CCs]);
void fill_ue_band_info(void);
extern void init_UE(void);
#ifdef XFORMS
// current status is that every UE has a DL scope for a SINGLE eNB (eNB_id=0)
// at eNB 0, an UL scope for every UE
......@@ -130,9 +136,6 @@ unsigned char scope_enb_num_ue = 2;
#endif //XFORMS
pthread_t main_ue_thread;
pthread_attr_t attr_UE_thread;
......@@ -144,7 +147,6 @@ int sync_var=-1; //!< protected by mutex \ref sync_mutex.
struct sched_param sched_param_UE_thread;
#ifdef XFORMS
......@@ -284,9 +286,8 @@ openair0_config_t openair0_cfg[MAX_CARDS];
double cpuf;
char uecap_xer[1024],uecap_xer_in=0;
extern void *UE_thread(void *arg);
extern void init_UE_threads(void);
extern void kill_eNB_proc(void);
/*---------------------BMC: timespec helpers -----------------------------*/
......@@ -1120,7 +1121,6 @@ int main( int argc, char **argv )
uint16_t Nid_cell = 0;
uint8_t cooperation_flag=0, abstraction_flag=0;
uint8_t beta_ACK=0,beta_RI=0,beta_CQI=2;
int error_code;
#if defined (XFORMS)
int ret;
......@@ -1752,43 +1752,11 @@ int main( int argc, char **argv )
rt_sleep_ns(10*100000000ULL);
pthread_attr_init (&attr_UE_thread);
pthread_attr_setstacksize(&attr_UE_thread,8192);//5*PTHREAD_STACK_MIN);
#ifndef LOWLATENCY
sched_param_UE_thread.sched_priority = sched_get_priority_max(SCHED_FIFO);
pthread_attr_setschedparam(&attr_UE_thread,&sched_param_UE_thread);
#endif
// start the main thread
if (UE_flag == 1) {
printf("Intializing UE Threads ...\n");
init_UE_threads();
sleep(1);
error_code = pthread_create(&main_ue_thread, &attr_UE_thread, UE_thread, NULL);
if (error_code!= 0) {
LOG_D(HW,"[lte-softmodem.c] Could not allocate UE_thread, error %d\n",error_code);
return(error_code);
} else {
LOG_D( HW, "[lte-softmodem.c] Allocate UE_thread successful\n" );
pthread_setname_np( main_ue_thread, "main UE" );
}
printf("UE threads created\n");
#ifdef USE_MME
while (start_UE == 0) {
sleep(1);
}
#endif
} else {
init_eNB();
}
if (UE_flag == 1) init_UE();
else init_eNB();
// Sleep to allow all threads to setup
sleep(1);
......
This diff is collapsed.
/*******************************************************************************
OpenAirInterface
Copyright(c) 1999 - 2014 Eurecom
OpenAirInterface is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenAirInterface is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenAirInterface.The full GNU General Public License is
included in this distribution in the file called "COPYING". If not,
see <http://www.gnu.org/licenses/>.
Contact Information
OpenAirInterface Admin: openair_admin@eurecom.fr
OpenAirInterface Tech : openair_tech@eurecom.fr
OpenAirInterface Dev : openair4g-devel@lists.eurecom.fr
Address : Eurecom, Campus SophiaTech, 450 Route des Chappes, CS 50193 - 06904 Biot Sophia Antipolis cedex, FRANCE
*******************************************************************************/
/*! \file sched_dlsch.c
* \brief DLSCH decoding thread (RTAI)
* \author R. Knopp, F. Kaltenberger
* \date 2011
* \version 0.1
* \company Eurecom
* \email: knopp@eurecom.fr,florian.kaltenberger@eurecom.fr
* \note
* \warning
*/
#include <stdio.h>
#include <stdlib.h>
#include <sched.h>
#include "rt_wrapper.h"
#include <sys/mman.h>
#include "PHY/types.h"
#include "PHY/defs.h"
#include "PHY/extern.h"
#include "SCHED/defs.h"
#include "UTIL/LOG/vcd_signal_dumper.h"
#define DEBUG_PHY
/// Mutex for instance count on dlsch scheduling
pthread_mutex_t dlsch_mutex[8];
/// Condition variable for dlsch thread
pthread_cond_t dlsch_cond[8];
pthread_t dlsch_threads[8];
pthread_attr_t attr_dlsch_threads;
unsigned char dlsch_thread_indices[8];
// activity indicators for harq_pid's
int dlsch_instance_cnt[8];
// process ids for cpu
int dlsch_cpuid[8];
// subframe number for each harq_pid (needed to store ack in right place for UL)
int dlsch_subframe[8];
extern int oai_exit;
/*
extern int dlsch_errors;
extern int dlsch_received;
extern int dlsch_errors_last;
extern int dlsch_received_last;
extern int dlsch_fer;
extern int current_dlsch_cqi;
*/
/** DLSCH Decoding Thread */
static void * dlsch_thread(void *param)
{
//unsigned long cpuid;
unsigned char dlsch_thread_index = *((unsigned char *)param);
unsigned int ret=0;
uint8_t harq_pid;
#ifdef RTAI
RT_TASK *task;
char task_name[8];
#endif
int eNB_id = 0, UE_id = 0, CC_id=0;
PHY_VARS_UE *phy_vars_ue = PHY_vars_UE_g[UE_id][CC_id];
if ((dlsch_thread_index <0) || (dlsch_thread_index>7)) {
LOG_E(PHY,"[SCHED][DLSCH] Illegal dlsch_thread_index %d (%p)!!!!\n",dlsch_thread_index,param);
return 0;
}
#ifdef RTAI
sprintf(task_name,"DLSCH%d",dlsch_thread_index);
task = rt_task_init_schmod(nam2num(task_name), 0, 0, 0, SCHED_FIFO, 0xF);
if (task==NULL) {
LOG_E(PHY,"[SCHED][DLSCH] Problem starting dlsch_thread_index %d (%s)!!!!\n",dlsch_thread_index,task_name);
return 0;
} else {
LOG_I(PHY,"[SCHED][DLSCH] dlsch_thread for process %d started with id %p\n",
dlsch_thread_index,
task);
}
#endif
mlockall(MCL_CURRENT | MCL_FUTURE);
//rt_set_runnable_on_cpuid(task,1);
//cpuid = rtai_cpuid();
#ifdef HARD_RT
rt_make_hard_real_time();
#endif
//dlsch_cpuid[dlsch_thread_index] = cpuid;
while (!oai_exit) {
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_DLSCH_THREAD0+dlsch_thread_index,0);
if (pthread_mutex_lock(&dlsch_mutex[dlsch_thread_index]) != 0) {
LOG_E(PHY,"[SCHED][DLSCH] error locking mutex.\n");
} else {
while (dlsch_instance_cnt[dlsch_thread_index] < 0) {
pthread_cond_wait(&dlsch_cond[dlsch_thread_index],&dlsch_mutex[dlsch_thread_index]);
}
if (pthread_mutex_unlock(&dlsch_mutex[dlsch_thread_index]) != 0) {
LOG_E(PHY,"[SCHED][DLSCH] error unlocking mutex.\n");
}
}
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_DLSCH_THREAD0+dlsch_thread_index,1);
if (oai_exit) break;
LOG_I(PHY,"[SCHED][DLSCH] Frame %d: Calling dlsch_decoding with dlsch_thread_index = %d\n",phy_vars_ue->frame_rx,dlsch_thread_index);
if (phy_vars_ue->frame_rx < phy_vars_ue->dlsch_errors[eNB_id]) {
phy_vars_ue->dlsch_errors[eNB_id]=0;
phy_vars_ue->dlsch_received[eNB_id] = 0;
}
harq_pid = dlsch_thread_index;
if (phy_vars_ue->dlsch_ue[eNB_id][0]) {
// rt_printk("[SCHED][DLSCH] Frame %d, slot %d, start %llu, end %llu, proc time: %llu ns\n",phy_vars_ue->frame,last_slot,time0,time1,(time1-time0));
dlsch_unscrambling(&phy_vars_ue->lte_frame_parms,
0,
phy_vars_ue->dlsch_ue[eNB_id][0],
phy_vars_ue->dlsch_ue[eNB_id][0]->harq_processes[harq_pid]->G,
phy_vars_ue->lte_ue_pdsch_vars[eNB_id]->llr[0],
0,
dlsch_subframe[dlsch_thread_index]<<1);
LOG_I(PHY,"[UE %d] PDSCH Calling dlsch_decoding for subframe %d, harq_pid %d, G%d\n", phy_vars_ue->Mod_id,dlsch_subframe[dlsch_thread_index], harq_pid,
phy_vars_ue->dlsch_ue[eNB_id][0]->harq_processes[harq_pid]->G);
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_DLSCH_DECODING0+dlsch_thread_index,1);
ret = dlsch_decoding(phy_vars_ue,
phy_vars_ue->lte_ue_pdsch_vars[eNB_id]->llr[0],
&phy_vars_ue->lte_frame_parms,
phy_vars_ue->dlsch_ue[eNB_id][0],
phy_vars_ue->dlsch_ue[eNB_id][0]->harq_processes[harq_pid],
dlsch_subframe[dlsch_thread_index],
harq_pid,
1, // is_crnti
0); // llr8_flag
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_DLSCH_DECODING0+dlsch_thread_index,0);
LOG_D(PHY,"[UE %d][PDSCH %x/%d] Frame %d subframe %d: PDSCH/DLSCH decoding iter %d (mcs %d, rv %d, TBS %d)\n",
phy_vars_ue->Mod_id,
phy_vars_ue->dlsch_ue[eNB_id][0]->rnti,harq_pid,
phy_vars_ue->frame_rx,dlsch_subframe[dlsch_thread_index],ret,
phy_vars_ue->dlsch_ue[eNB_id][0]->harq_processes[harq_pid]->mcs,
phy_vars_ue->dlsch_ue[eNB_id][0]->harq_processes[harq_pid]->rvidx,
phy_vars_ue->dlsch_ue[eNB_id][0]->harq_processes[harq_pid]->TBS);
if (ret == (1+MAX_TURBO_ITERATIONS)) {
phy_vars_ue->dlsch_errors[eNB_id]++;
#ifdef DEBUG_PHY
LOG_I(PHY,"[UE %d][PDSCH %x/%d] Frame %d subframe %d DLSCH in error (rv %d,mcs %d)\n",
phy_vars_ue->Mod_id,phy_vars_ue->dlsch_ue[eNB_id][0]->rnti,
harq_pid,phy_vars_ue->frame_rx,dlsch_subframe[dlsch_thread_index],
phy_vars_ue->dlsch_ue[eNB_id][0]->harq_processes[harq_pid]->rvidx,
phy_vars_ue->dlsch_ue[eNB_id][0]->harq_processes[harq_pid]->mcs);
#endif
} else {
LOG_I(PHY,"[UE %d][PDSCH %x/%d] Frame %d subframe %d: Received DLSCH (rv %d,mcs %d)\n",
phy_vars_ue->Mod_id,phy_vars_ue->dlsch_ue[eNB_id][0]->rnti,
harq_pid,phy_vars_ue->frame_rx,dlsch_subframe[dlsch_thread_index],
phy_vars_ue->dlsch_ue[eNB_id][0]->harq_processes[harq_pid]->rvidx,
phy_vars_ue->dlsch_ue[eNB_id][0]->harq_processes[harq_pid]->mcs);
#ifdef OPENAIR2
mac_xface->ue_send_sdu(phy_vars_ue->Mod_id,
0, // CC_id
phy_vars_ue->frame_rx,
phy_vars_ue->dlsch_ue[eNB_id][0]->harq_processes[harq_pid]->b,
phy_vars_ue->dlsch_ue[eNB_id][0]->harq_processes[harq_pid]->TBS>>3,
eNB_id);
#endif
phy_vars_ue->total_TBS[eNB_id] = phy_vars_ue->dlsch_ue[eNB_id][0]->harq_processes[harq_pid]->TBS + phy_vars_ue->total_TBS[eNB_id];
phy_vars_ue->total_received_bits[eNB_id] = phy_vars_ue->total_received_bits[eNB_id] + phy_vars_ue->dlsch_ue[eNB_id][0]->harq_processes[harq_pid]->TBS;
}
}
// this is done in main thread
/*
if (phy_vars_ue->frame % 100 == 0) {
if ((phy_vars_ue->dlsch_received[eNB_id] - phy_vars_ue->dlsch_received_last[eNB_id]) != 0)
phy_vars_ue->dlsch_fer[eNB_id] = (100*(phy_vars_ue->dlsch_errors[eNB_id] - phy_vars_ue->dlsch_errors_last[eNB_id]))/(phy_vars_ue->dlsch_received[eNB_id] - phy_vars_ue->dlsch_received_last[eNB_id]);
phy_vars_ue->dlsch_errors_last[eNB_id] = phy_vars_ue->dlsch_errors[eNB_id];
phy_vars_ue->dlsch_received_last[eNB_id] = phy_vars_ue->dlsch_received[eNB_id];
}
*/
#ifdef DEBUG_PHY
if (phy_vars_ue->dlsch_ue[eNB_id][0]) {
LOG_I(PHY,"[UE %d][PDSCH %x/%d] Frame %d subframe %d: PDSCH/DLSCH decoding iter %d (mcs %d, rv %d, TBS %d)\n",
phy_vars_ue->Mod_id,
phy_vars_ue->dlsch_ue[eNB_id][0]->rnti,harq_pid,
phy_vars_ue->frame_rx,dlsch_subframe[dlsch_thread_index],ret,
phy_vars_ue->dlsch_ue[eNB_id][0]->harq_processes[harq_pid]->mcs,
phy_vars_ue->dlsch_ue[eNB_id][0]->harq_processes[harq_pid]->rvidx,
phy_vars_ue->dlsch_ue[eNB_id][0]->harq_processes[harq_pid]->TBS);
if (phy_vars_ue->frame_rx%100==0) {
LOG_D(PHY,"[UE %d][PDSCH %x] Frame %d subframe %d dlsch_errors %d, dlsch_received %d, dlsch_fer %d, current_dlsch_cqi %d\n",
phy_vars_ue->Mod_id,phy_vars_ue->dlsch_ue[eNB_id][0]->rnti,
phy_vars_ue->frame_rx,dlsch_subframe[dlsch_thread_index],
phy_vars_ue->dlsch_errors[eNB_id],
phy_vars_ue->dlsch_received[eNB_id],
phy_vars_ue->dlsch_fer[eNB_id],
phy_vars_ue->PHY_measurements.wideband_cqi_tot[eNB_id]);
}
} else {
LOG_I( PHY,"[UE %d][PDSCH ?/%d] Frame %d subframe %d: PDSCH/DLSCH decoding iter %d (phy_vars_ue->dlsch_ue[eNB_id][0] == 0)\n",
phy_vars_ue->Mod_id,
harq_pid,
phy_vars_ue->frame_rx, dlsch_subframe[dlsch_thread_index], ret );
}
#endif
if (pthread_mutex_lock(&dlsch_mutex[dlsch_thread_index]) != 0) {
msg("[openair][SCHED][DLSCH] error locking mutex.\n");
} else {
dlsch_instance_cnt[dlsch_thread_index]--;
if (pthread_mutex_unlock(&dlsch_mutex[dlsch_thread_index]) != 0) {
msg("[openair][SCHED][DLSCH] error unlocking mutex.\n");
}
}
}
#ifdef HARD_RT
rt_make_soft_real_time();
#endif
msg("[openair][SCHED][DLSCH] DLSCH thread %d exiting\n",dlsch_thread_index);
return 0;
}
int init_dlsch_threads(void)
{
int error_code;
struct sched_param p;
unsigned char dlsch_thread_index;
pthread_attr_init (&attr_dlsch_threads);
pthread_attr_setstacksize(&attr_dlsch_threads,OPENAIR_THREAD_STACK_SIZE);
//attr_dlsch_threads.priority = 1;
p.sched_priority = OPENAIR_THREAD_PRIORITY;
pthread_attr_setschedparam (&attr_dlsch_threads, &p);
#ifndef RTAI_ISNT_POSIX
pthread_attr_setschedpolicy (&attr_dlsch_threads, SCHED_FIFO);
#endif
for(dlsch_thread_index=0; dlsch_thread_index<8; dlsch_thread_index++) {
pthread_mutex_init(&dlsch_mutex[dlsch_thread_index],NULL);
pthread_cond_init(&dlsch_cond[dlsch_thread_index],NULL);
dlsch_instance_cnt[dlsch_thread_index] = -1;
dlsch_thread_indices[dlsch_thread_index] = dlsch_thread_index;
rt_printk("[openair][SCHED][DLSCH][INIT] Allocating DLSCH thread for dlsch_thread_index %d (%p)\n",dlsch_thread_index,&dlsch_thread_indices[dlsch_thread_index]);
error_code = pthread_create(&dlsch_threads[dlsch_thread_index],
&attr_dlsch_threads,
dlsch_thread,
(void *)&dlsch_thread_indices[dlsch_thread_index]);
if (error_code!= 0) {
rt_printk("[openair][SCHED][DLSCH][INIT] Could not allocate dlsch_thread %d, error %d\n",dlsch_thread_index,error_code);
return(error_code);
} else {
rt_printk("[openair][SCHED][DLSCH][INIT] Allocate dlsch_thread %d successful\n",dlsch_thread_index);
}
}
return(0);
}
void cleanup_dlsch_threads(void)
{
unsigned char dlsch_thread_index;
for(dlsch_thread_index=0; dlsch_thread_index<8; dlsch_thread_index++) {
// pthread_exit(&dlsch_threads[dlsch_thread_index]);
rt_printk("[openair][SCHED][DLSCH] Scheduling dlsch_thread %d to exit\n",dlsch_thread_index);
dlsch_instance_cnt[dlsch_thread_index] = 0;
if (pthread_cond_signal(&dlsch_cond[dlsch_thread_index]) != 0)
rt_printk("[openair][SCHED][DLSCH] ERROR pthread_cond_signal\n");
else
rt_printk("[openair][SCHED][DLSCH] Signalled dlsch_thread %d to exit\n",dlsch_thread_index);
rt_printk("[openair][SCHED][DLSCH] Exiting ...\n");
pthread_cond_destroy(&dlsch_cond[dlsch_thread_index]);
pthread_mutex_destroy(&dlsch_mutex[dlsch_thread_index]);
}
}
/*******************************************************************************
OpenAirInterface
Copyright(c) 1999 - 2014 Eurecom
OpenAirInterface is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenAirInterface is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenAirInterface.The full GNU General Public License is
included in this distribution in the file called "COPYING". If not,
see <http://www.gnu.org/licenses/>.
Contact Information
OpenAirInterface Admin: openair_admin@eurecom.fr
OpenAirInterface Tech : openair_tech@eurecom.fr
OpenAirInterface Dev : openair4g-devel@lists.eurecom.fr
Address : Eurecom, Campus SophiaTech, 450 Route des Chappes, CS 50193 - 06904 Biot Sophia Antipolis cedex, FRANCE
*******************************************************************************/
/*! \file sched_dlsch.c
* \brief DLSCH decoding thread (RTAI)
* \author R. Knopp, F. Kaltenberger
* \date 2011
* \version 0.1
* \company Eurecom
* \email: knopp@eurecom.fr,florian.kaltenberger@eurecom.fr
* \note
* \warning
*/
#include <stdio.h>
#include <stdlib.h>
#include <sched.h>
#include "rt_wrapper.h"
#include <sys/mman.h>
#include "PHY/types.h"
#include "PHY/defs.h"
#include "PHY/extern.h"
#include "SCHED/defs.h"
#include "SCHED/extern.h"
#include "UTIL/LOG/vcd_signal_dumper.h"
RTIME time0,time1;
#define DEBUG_PHY
/// Mutex for instance count on rx_pdsch scheduling
pthread_mutex_t rx_pdsch_mutex;
/// Condition variable for rx_pdsch thread
pthread_cond_t rx_pdsch_cond;
pthread_t rx_pdsch_thread_var;
pthread_attr_t attr_rx_pdsch_thread;
// activity indicators for harq_pid's
int rx_pdsch_instance_cnt;
// process ids for cpu
int rx_pdsch_cpuid;
// subframe number for each harq_pid (needed to store ack in right place for UL)
int rx_pdsch_slot;
extern int oai_exit;
extern pthread_mutex_t dlsch_mutex[8];
extern int dlsch_instance_cnt[8];
extern int dlsch_subframe[8];
extern pthread_cond_t dlsch_cond[8];
/** RX_PDSCH Decoding Thread */
static void * rx_pdsch_thread(void *param)
{
//unsigned long cpuid;
uint8_t dlsch_thread_index = 0;
uint8_t pilot2,harq_pid,subframe;
// uint8_t last_slot;
uint8_t dual_stream_UE = 0;
uint8_t i_mod = 0;
#ifdef RTAI
RT_TASK *task;
#endif
int m,eNB_id = 0;
int eNB_id_i = 1;
PHY_VARS_UE *UE = PHY_vars_UE_g[0][0];
#ifdef RTAI
task = rt_task_init_schmod(nam2num("RX_PDSCH_THREAD"), 0, 0, 0, SCHED_FIFO, 0xF);
if (task==NULL) {
LOG_E(PHY,"[SCHED][RX_PDSCH] Problem starting rx_pdsch thread!!!!\n");
return 0;
} else {
LOG_I(PHY,"[SCHED][RX_PDSCH] rx_pdsch_thread started for with id %p\n",task);
}
#endif
mlockall(MCL_CURRENT | MCL_FUTURE);
//rt_set_runnable_on_cpuid(task,1);
//cpuid = rtai_cpuid();
#ifdef HARD_RT
rt_make_hard_real_time();
#endif
if (UE->lte_frame_parms.Ncp == NORMAL) { // normal prefix
pilot2 = 7;
} else { // extended prefix
pilot2 = 6;
}
while (!oai_exit) {
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_PDSCH_THREAD, 0);
if (pthread_mutex_lock(&rx_pdsch_mutex) != 0) {
LOG_E(PHY,"[SCHED][RX_PDSCH] error locking mutex.\n");
} else {
while (rx_pdsch_instance_cnt < 0) {
pthread_cond_wait(&rx_pdsch_cond,&rx_pdsch_mutex);
}
if (pthread_mutex_unlock(&rx_pdsch_mutex) != 0) {
LOG_E(PHY,"[SCHED][RX_PDSCH] error unlocking mutex.\n");
}
}
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_PDSCH_THREAD, 1);
// last_slot = rx_pdsch_slot;
subframe = UE->slot_rx>>1;
// Important! assumption that PDCCH procedure of next SF is not called yet
harq_pid = UE->dlsch_ue[eNB_id][0]->current_harq_pid;
UE->dlsch_ue[eNB_id][0]->harq_processes[harq_pid]->G = get_G(&UE->lte_frame_parms,
UE->dlsch_ue[eNB_id][0]->harq_processes[harq_pid]->nb_rb,
UE->dlsch_ue[eNB_id][0]->harq_processes[harq_pid]->rb_alloc_even,
get_Qm(UE->dlsch_ue[eNB_id][0]->harq_processes[harq_pid]->mcs),
UE->dlsch_ue[eNB_id][0]->harq_processes[harq_pid]->Nl,
UE->lte_ue_pdcch_vars[eNB_id]->num_pdcch_symbols,
UE->frame_rx,subframe);
if ((UE->transmission_mode[eNB_id] == 5) &&
(UE->dlsch_ue[eNB_id][0]->harq_processes[harq_pid]->dl_power_off==0) &&
(openair_daq_vars.use_ia_receiver > 0)) {
dual_stream_UE = 1;
eNB_id_i = UE->n_connected_eNB;
if (openair_daq_vars.use_ia_receiver == 2) {
i_mod = get_Qm(((UE->frame_rx%1024)/3)%28);
} else {
i_mod = get_Qm(UE->dlsch_ue[eNB_id][0]->harq_processes[harq_pid]->mcs);
}
} else {
dual_stream_UE = 0;
eNB_id_i = eNB_id+1;
i_mod = 0;
}
if (oai_exit) break;
LOG_D(PHY,"[SCHED][RX_PDSCH] Frame %d, slot %d: Calling rx_pdsch_decoding with harq_pid %d\n",UE->frame_rx,UE->slot_rx,harq_pid);
// Check if we are in even or odd slot
if (UE->slot_rx%2) { // odd slots
// measure time
//time0 = rt_get_time_ns();
// rt_printk("[SCHED][RX_PDSCH][before rx_pdsch] Frame %d, slot %d, time %llu\n",UE->frame,last_slot,rt_get_time_ns());
for (m=pilot2; m<UE->lte_frame_parms.symbols_per_tti; m++) {
rx_pdsch(UE,
PDSCH,
eNB_id,
eNB_id_i,
subframe,
m,
0,
dual_stream_UE,
i_mod,
harq_pid);
}
// time1 = rt_get_time_ns();
// rt_printk("[SCHED][RX_PDSCH] Frame %d, slot %d, start %llu, end %llu, proc time: %llu ns\n",UE->frame_rx,last_slot,time0,time1,(time1-time0));
dlsch_thread_index = harq_pid;
if (pthread_mutex_lock (&dlsch_mutex[dlsch_thread_index]) != 0) { // Signal MAC_PHY Scheduler
LOG_E(PHY,"[UE %d] ERROR pthread_mutex_lock\n",UE->Mod_id); // lock before accessing shared resource
// VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_PHY_PROCEDURES_UE_RX, VCD_FUNCTION_OUT);
//return(-1);
}
dlsch_instance_cnt[dlsch_thread_index]++;
dlsch_subframe[dlsch_thread_index] = subframe;
pthread_mutex_unlock (&dlsch_mutex[dlsch_thread_index]);
if (dlsch_instance_cnt[dlsch_thread_index] == 0) {
if (pthread_cond_signal(&dlsch_cond[dlsch_thread_index]) != 0) {
LOG_E(PHY,"[UE %d] ERROR pthread_cond_signal for dlsch_cond[%d]\n",UE->Mod_id,dlsch_thread_index);
// VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_PHY_PROCEDURES_UE_RX, VCD_FUNCTION_OUT);
//return(-1);
}
} else {
LOG_W(PHY,"[UE %d] DLSCH thread for dlsch_thread_index %d busy!!!\n",UE->Mod_id,dlsch_thread_index);
// VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_PHY_PROCEDURES_UE_RX, VCD_FUNCTION_OUT);
//return(-1);
}
} else { // even slots
for (m=UE->lte_ue_pdcch_vars[eNB_id]->num_pdcch_symbols; m<pilot2; m++) {
rx_pdsch(UE,
PDSCH,
eNB_id,
eNB_id_i,
subframe,
m,
(m==UE->lte_ue_pdcch_vars[eNB_id]->num_pdcch_symbols)?1:0, // first_symbol_flag
dual_stream_UE,
i_mod,
harq_pid);
}
}
if (pthread_mutex_lock(&rx_pdsch_mutex) != 0) {
msg("[openair][SCHED][RX_PDSCH] error locking mutex.\n");
} else {
rx_pdsch_instance_cnt--;
if (pthread_mutex_unlock(&rx_pdsch_mutex) != 0) {
msg("[openair][SCHED][RX_PDSCH] error unlocking mutex.\n");
}
}
}
#ifdef HARD_RT
rt_make_soft_real_time();
#endif
LOG_D(PHY,"[openair][SCHED][RX_PDSCH] RX_PDSCH thread exiting\n");
return 0;
}
int init_rx_pdsch_thread(void)
{
int error_code;
struct sched_param p;
pthread_mutex_init(&rx_pdsch_mutex,NULL);
pthread_cond_init(&rx_pdsch_cond,NULL);
pthread_attr_init (&attr_rx_pdsch_thread);
pthread_attr_setstacksize(&attr_rx_pdsch_thread,OPENAIR_THREAD_STACK_SIZE);
//attr_rx_pdsch_thread.priority = 1;
p.sched_priority = OPENAIR_THREAD_PRIORITY;
pthread_attr_setschedparam (&attr_rx_pdsch_thread, &p);
#ifndef RTAI_ISNT_POSIX
pthread_attr_setschedpolicy (&attr_rx_pdsch_thread, SCHED_FIFO);
#endif
rx_pdsch_instance_cnt = -1;
rt_printk("[openair][SCHED][RX_PDSCH][INIT] Allocating RX_PDSCH thread\n");
error_code = pthread_create(&rx_pdsch_thread_var,
&attr_rx_pdsch_thread,
rx_pdsch_thread,
0);
if (error_code!= 0) {
rt_printk("[openair][SCHED][RX_PDSCH][INIT] Could not allocate rx_pdsch_thread, error %d\n",error_code);
return(error_code);
} else {
rt_printk("[openair][SCHED][RX_PDSCH][INIT] Allocate rx_pdsch_thread successful\n");
return(0);
}
}
void cleanup_rx_pdsch_thread(void)
{
rt_printk("[openair][SCHED][RX_PDSCH] Scheduling rx_pdsch_thread to exit\n");
rx_pdsch_instance_cnt = 0;
if (pthread_cond_signal(&rx_pdsch_cond) != 0)
rt_printk("[openair][SCHED][RX_PDSCH] ERROR pthread_cond_signal\n");
else
rt_printk("[openair][SCHED][RX_PDSCH] Signalled rx_pdsch_thread to exit\n");
rt_printk("[openair][SCHED][RX_PDSCH] Exiting ...\n");
pthread_cond_destroy(&rx_pdsch_cond);
pthread_mutex_destroy(&rx_pdsch_mutex);
}
/*******************************************************************************
OpenAirInterface
Copyright(c) 1999 - 2014 Eurecom
OpenAirInterface is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenAirInterface is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenAirInterface.The full GNU General Public License is
included in this distribution in the file called "COPYING". If not,
see <http://www.gnu.org/licenses/>.
Contact Information
OpenAirInterface Admin: openair_admin@eurecom.fr
OpenAirInterface Tech : openair_tech@eurecom.fr
OpenAirInterface Dev : openair4g-devel@lists.eurecom.fr
Address : Eurecom, Campus SophiaTech, 450 Route des Chappes, CS 50193 - 06904 Biot Sophia Antipolis cedex, FRANCE
*******************************************************************************/
/*! \file sched_ulsch.c
* \brief ULSCH decoding thread (RTAI)
* \author R. Knopp, F. Kaltenberger
* \date 2011
* \version 0.1
* \company Eurecom
* \email: knopp@eurecom.fr,florian.kaltenberger@eurecom.fr
* \note
* \warning
*/
#include <stdio.h>
#include <stdlib.h>
#include <sched.h>
#include "rt_wrapper.h"
#include <sys/mman.h>
#include "PHY/types.h"
#include "PHY/defs.h"
#include "PHY/extern.h"
#include "SCHED/defs.h"
#include "MAC_INTERFACE/extern.h"
#ifdef CBMIMO1
#include "ARCH/CBMIMO1/DEVICE_DRIVER/cbmimo1_device.h"
#include "ARCH/CBMIMO1/DEVICE_DRIVER/extern.h"
#include "ARCH/CBMIMO1/DEVICE_DRIVER/defs.h"
#endif // CBMIMO1
#define DEBUG_PHY
/// Mutex for instance count on ulsch scheduling
pthread_mutex_t ulsch_mutex[NUMBER_OF_UE_MAX];
/// Condition variable for ulsch thread
pthread_cond_t ulsch_cond[NUMBER_OF_UE_MAX];
pthread_t ulsch_threads[NUMBER_OF_UE_MAX];
pthread_attr_t attr_ulsch_threads;
// activity indicators for harq_pid's
int ulsch_instance_cnt[NUMBER_OF_UE_MAX];
// process ids for cpu
int ulsch_cpuid[NUMBER_OF_UE_MAX];
// subframe number for each harq_pid (needed to store ack in right place for UL)
int ulsch_subframe[NUMBER_OF_UE_MAX];
extern int oai_exit;
/*
extern int ulsch_errors;
extern int ulsch_received;
extern int ulsch_errors_last;
extern int ulsch_received_last;
extern int ulsch_fer;
extern int current_ulsch_cqi;
*/
/** ULSCH Decoding Thread */
static void * ulsch_thread(void *param)
{
//unsigned long cpuid;
unsigned int ulsch_thread_index = (unsigned int)param;
RTIME time_in,time_out;
#ifdef RTAI
RT_TASK *task;
char ulsch_thread_name[64];
#endif
int eNB_id = 0, UE_id = 0;
PHY_VARS_eNB *phy_vars_eNB = PHY_vars_eNB_g[eNB_id];
if ((ulsch_thread_index <0) || (ulsch_thread_index>NUMBER_OF_UE_MAX)) {
LOG_E(PHY,"[SCHED][ULSCH] Illegal ulsch_thread_index %d!!!!\n",ulsch_thread_index);
return 0;
}
#ifdef RTAI
sprintf(ulsch_thread_name,"ULSCH_THREAD%d",ulsch_thread_index);
LOG_I(PHY,"[SCHED][ULSCH] starting ulsch_thread %s for process %d\n",
ulsch_thread_name,
ulsch_thread_index);
task = rt_task_init_schmod(nam2num(ulsch_thread_name), 0, 0, 0, SCHED_FIFO, 0xF);
if (task==NULL) {
LOG_E(PHY,"[SCHED][ULSCH] Problem starting ulsch_thread_index %d!!!!\n",ulsch_thread_index);
return 0;
} else {
LOG_I(PHY,"[SCHED][ULSCH] ulsch_thread for process %d started with id %p\n",
ulsch_thread_index,
task);
}
#endif
mlockall(MCL_CURRENT | MCL_FUTURE);
//rt_set_runnable_on_cpuid(task,1);
//cpuid = rtai_cpuid();
#ifdef HARD_RT
rt_make_hard_real_time();
#endif
//ulsch_cpuid[ulsch_thread_index] = cpuid;
while (!oai_exit) {
if (pthread_mutex_lock(&ulsch_mutex[ulsch_thread_index]) != 0) {
LOG_E(PHY,"[SCHED][ULSCH] error locking mutex.\n");
} else {
while (ulsch_instance_cnt[ulsch_thread_index] < 0) {
pthread_cond_wait(&ulsch_cond[ulsch_thread_index],&ulsch_mutex[ulsch_thread_index]);
}
if (pthread_mutex_unlock(&ulsch_mutex[ulsch_thread_index]) != 0) {
LOG_E(PHY,"[SCHED][ULSCH] error unlocking mutex.\n");
}
}
if (oai_exit) break;
LOG_D(PHY,"[SCHED][ULSCH] Frame %d: Calling ulsch_decoding with ulsch_thread_index = %d\n",phy_vars_eNB->proc[0].frame_tx,ulsch_thread_index);
time_in = rt_get_time_ns();
ulsch_decoding_procedures(ulsch_subframe[ulsch_thread_index]<<1,ulsch_thread_index,phy_vars_eNB,0);
time_out = rt_get_time_ns();
if (pthread_mutex_lock(&ulsch_mutex[ulsch_thread_index]) != 0) {
msg("[openair][SCHED][ULSCH] error locking mutex.\n");
} else {
ulsch_instance_cnt[ulsch_thread_index]--;
if (pthread_mutex_unlock(&ulsch_mutex[ulsch_thread_index]) != 0) {
msg("[openair][SCHED][ULSCH] error unlocking mutex.\n");
}
}
}
#ifdef HARD_RT
rt_make_soft_real_time();
#endif
msg("[openair][SCHED][ULSCH] ULSCH thread %d exiting\n",ulsch_thread_index);
return 0;
}
int init_ulsch_threads(void)
{
int error_code, return_code=0;
struct sched_param p;
int ulsch_thread_index;
// later loop on all harq_pids, do 0 for now
for (ulsch_thread_index=0; ulsch_thread_index<NUMBER_OF_UE_MAX; ulsch_thread_index++) {
pthread_mutex_init(&ulsch_mutex[ulsch_thread_index],NULL);
pthread_cond_init(&ulsch_cond[ulsch_thread_index],NULL);
pthread_attr_init (&attr_ulsch_threads);
pthread_attr_setstacksize(&attr_ulsch_threads,OPENAIR_THREAD_STACK_SIZE);
//attr_ulsch_threads.priority = 1;
p.sched_priority = OPENAIR_THREAD_PRIORITY;
pthread_attr_setschedparam (&attr_ulsch_threads, &p);
#ifndef RTAI_ISNT_POSIX
pthread_attr_setschedpolicy (&attr_ulsch_threads, SCHED_FIFO);
#endif
ulsch_instance_cnt[ulsch_thread_index] = -1;
rt_printk("[openair][SCHED][ULSCH][INIT] Allocating ULSCH thread for ulsch_thread_index %d\n",ulsch_thread_index);
error_code = pthread_create(&ulsch_threads[ulsch_thread_index],
&attr_ulsch_threads,
ulsch_thread,
(void *)ulsch_thread_index);
if (error_code!= 0) {
rt_printk("[openair][SCHED][ULSCH][INIT] Could not allocate ulsch_thread %d, error %d\n",ulsch_thread_index,error_code);
return_code+=error_code;
//return(error_code);
} else {
rt_printk("[openair][SCHED][ULSCH][INIT] Allocate ulsch_thread %d successful\n",ulsch_thread_index);
//return(0);
}
}
return(return_code);
}
void cleanup_ulsch_threads(void)
{
int ulsch_thread_index;
for (ulsch_thread_index=0; ulsch_thread_index<NUMBER_OF_UE_MAX; ulsch_thread_index++) {
// pthread_exit(&ulsch_threads[ulsch_thread_index]);
rt_printk("[openair][SCHED][ULSCH] Scheduling ulsch_thread %d to exit\n",ulsch_thread_index);
ulsch_instance_cnt[ulsch_thread_index] = 0;
if (pthread_cond_signal(&ulsch_cond[ulsch_thread_index]) != 0)
rt_printk("[openair][SCHED][ULSCH] ERROR pthread_cond_signal\n");
else
rt_printk("[openair][SCHED][ULSCH] Signalled ulsch_thread %d to exit\n",ulsch_thread_index);
rt_printk("[openair][SCHED][ULSCH] Exiting ...\n");
pthread_cond_destroy(&ulsch_cond[ulsch_thread_index]);
pthread_mutex_destroy(&ulsch_mutex[ulsch_thread_index]);
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment