From e468376d934dd7e1de60007edd1ba9b1b1b777dc Mon Sep 17 00:00:00 2001 From: luaihui <luaihui@cn.fujitsu.com> Date: Thu, 11 Jul 2019 10:59:07 +0900 Subject: [PATCH] add forwarding and end marker --- common/utils/itti_analyzer/filters.xml | 4 + common/utils/itti_analyzer/filters_ue_enb.xml | 4 + common/utils/ocp_itti/intertask_interface.h | 2 + openair2/COMMON/gtpv1_u_messages_def.h | 4 + openair2/COMMON/gtpv1_u_messages_types.h | 67 +++ openair2/RRC/LTE/rrc_defs.h | 22 + openair2/RRC/LTE/rrc_eNB.c | 208 +++++++- openair2/RRC/LTE/rrc_eNB_GTPV1U.c | 89 +++ openair2/RRC/LTE/rrc_eNB_GTPV1U.h | 12 + openair2/RRC/LTE/rrc_eNB_S1AP.c | 64 +++ openair2/RRC/LTE/rrc_eNB_S1AP.h | 1 + openair2/X2AP/x2ap_eNB_generate_messages.c | 21 + openair2/X2AP/x2ap_eNB_handler.c | 52 ++ openair3/GTPV1-U/gtpv1u_eNB.c | 505 +++++++++++++++++- openair3/GTPV1-U/gtpv1u_eNB_defs.h | 8 + openair3/GTPV1-U/gtpv1u_eNB_task.h | 11 + openair3/GTPV1-U/nw-gtpv1u/src/NwGtpv1u.c | 10 +- openair3/S1AP/s1ap_eNB_handlers.c | 2 +- openair3/UDP/udp_eNB_task.c | 4 +- 19 files changed, 1080 insertions(+), 10 deletions(-) diff --git a/common/utils/itti_analyzer/filters.xml b/common/utils/itti_analyzer/filters.xml index e4a0e627d56..7305a2294cd 100644 --- a/common/utils/itti_analyzer/filters.xml +++ b/common/utils/itti_analyzer/filters.xml @@ -91,6 +91,8 @@ <TASK_MAC_ENB enabled="1"/> <TASK_RLC_ENB enabled="1"/> <TASK_PDCP_ENB enabled="1"/> + <TASK_DATA_FORWARDING enabled="1"/> + <TASK_END_MARKER enabled="1"/> <TASK_RRC_ENB enabled="1"/> <TASK_RAL_ENB enabled="1"/> <TASK_S1AP enabled="1"/> @@ -114,6 +116,8 @@ <TASK_MAC_ENB enabled="1"/> <TASK_RLC_ENB enabled="1"/> <TASK_PDCP_ENB enabled="1"/> + <TASK_DATA_FORWARDING enabled="1"/> + <TASK_END_MARKER enabled="1"/> <TASK_RRC_ENB enabled="1"/> <TASK_RAL_ENB enabled="1"/> <TASK_S1AP enabled="1"/> diff --git a/common/utils/itti_analyzer/filters_ue_enb.xml b/common/utils/itti_analyzer/filters_ue_enb.xml index 38ed00ceae6..77859d20b69 100644 --- a/common/utils/itti_analyzer/filters_ue_enb.xml +++ b/common/utils/itti_analyzer/filters_ue_enb.xml @@ -130,6 +130,8 @@ <TASK_MAC_ENB enabled="1"/> <TASK_RLC_ENB enabled="1"/> <TASK_PDCP_ENB enabled="1"/> + <TASK_DATA_FORWARDING enabled="1"/> + <TASK_END_MARKER enabled="1"/> <TASK_RRC_ENB enabled="1"/> <TASK_RAL_ENB enabled="1"/> <TASK_S1AP enabled="1"/> @@ -153,6 +155,8 @@ <TASK_MAC_ENB enabled="1"/> <TASK_RLC_ENB enabled="1"/> <TASK_PDCP_ENB enabled="1"/> + <TASK_DATA_FORWARDING enabled="1"/> + <TASK_END_MARKER enabled="1"/> <TASK_RRC_ENB enabled="1"/> <TASK_RAL_ENB enabled="1"/> <TASK_S1AP enabled="1"/> diff --git a/common/utils/ocp_itti/intertask_interface.h b/common/utils/ocp_itti/intertask_interface.h index 159265c6a82..956d90072dd 100644 --- a/common/utils/ocp_itti/intertask_interface.h +++ b/common/utils/ocp_itti/intertask_interface.h @@ -283,6 +283,8 @@ typedef struct { TASK_DEF(TASK_RLC_ENB, TASK_PRIORITY_MED, 200, NULL, NULL) \ TASK_DEF(TASK_RRC_ENB_NB_IoT, TASK_PRIORITY_MED, 200, NULL, NULL) \ TASK_DEF(TASK_PDCP_ENB, TASK_PRIORITY_MED, 200, NULL, NULL) \ + TASK_DEF(TASK_DATA_FORWARDING, TASK_PRIORITY_MED, 200, NULL, NULL) \ + TASK_DEF(TASK_END_MARKER, TASK_PRIORITY_MED, 200, NULL, NULL) \ TASK_DEF(TASK_RRC_ENB, TASK_PRIORITY_MED, 200, NULL,NULL)\ TASK_DEF(TASK_RAL_ENB, TASK_PRIORITY_MED, 200, NULL, NULL) \ TASK_DEF(TASK_S1AP, TASK_PRIORITY_MED, 200, NULL, NULL) \ diff --git a/openair2/COMMON/gtpv1_u_messages_def.h b/openair2/COMMON/gtpv1_u_messages_def.h index 27723eec159..1948f1596ad 100644 --- a/openair2/COMMON/gtpv1_u_messages_def.h +++ b/openair2/COMMON/gtpv1_u_messages_def.h @@ -25,4 +25,8 @@ MESSAGE_DEF(GTPV1U_ENB_DELETE_TUNNEL_REQ, MESSAGE_PRIORITY_MED, gtpv1u_enb_del MESSAGE_DEF(GTPV1U_ENB_DELETE_TUNNEL_RESP, MESSAGE_PRIORITY_MED, gtpv1u_enb_delete_tunnel_resp_t, Gtpv1uDeleteTunnelResp) MESSAGE_DEF(GTPV1U_ENB_TUNNEL_DATA_IND, MESSAGE_PRIORITY_MED, gtpv1u_enb_tunnel_data_ind_t, Gtpv1uTunnelDataInd) MESSAGE_DEF(GTPV1U_ENB_TUNNEL_DATA_REQ, MESSAGE_PRIORITY_MED, gtpv1u_enb_tunnel_data_req_t, Gtpv1uTunnelDataReq) +MESSAGE_DEF(GTPV1U_ENB_DATA_FORWARDING_REQ, MESSAGE_PRIORITY_MED, gtpv1u_enb_data_forwarding_req_t,Gtpv1uDataForwardingReq) +MESSAGE_DEF(GTPV1U_ENB_DATA_FORWARDING_IND, MESSAGE_PRIORITY_MED, gtpv1u_enb_data_forwarding_ind_t,Gtpv1uDataForwardingInd) +MESSAGE_DEF(GTPV1U_ENB_END_MARKER_REQ, MESSAGE_PRIORITY_MED, gtpv1u_enb_end_marker_req_t, Gtpv1uEndMarkerReq) +MESSAGE_DEF(GTPV1U_ENB_END_MARKER_IND, MESSAGE_PRIORITY_MED, gtpv1u_enb_end_marker_ind_t, Gtpv1uEndMarkerInd) MESSAGE_DEF(GTPV1U_ENB_S1_REQ, MESSAGE_PRIORITY_MED, Gtpv1uS1Req, gtpv1uS1Req) diff --git a/openair2/COMMON/gtpv1_u_messages_types.h b/openair2/COMMON/gtpv1_u_messages_types.h index c398b55e3ef..4f81c22fb0b 100644 --- a/openair2/COMMON/gtpv1_u_messages_types.h +++ b/openair2/COMMON/gtpv1_u_messages_types.h @@ -33,10 +33,33 @@ #define GTPV1U_ENB_DELETE_TUNNEL_RESP(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uDeleteTunnelResp #define GTPV1U_ENB_TUNNEL_DATA_IND(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uTunnelDataInd #define GTPV1U_ENB_TUNNEL_DATA_REQ(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uTunnelDataReq +#define GTPV1U_ENB_DATA_FORWARDING_REQ(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uDataForwardingReq +#define GTPV1U_ENB_DATA_FORWARDING_IND(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uDataForwardingInd +#define GTPV1U_ENB_END_MARKER_REQ(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uEndMarkerReq +#define GTPV1U_ENB_END_MARKER_IND(mSGpTR) (mSGpTR)->ittiMsg.Gtpv1uEndMarkerInd + #define GTPV1U_ENB_S1_REQ(mSGpTR) (mSGpTR)->ittiMsg.gtpv1uS1Req #define GTPV1U_ALL_TUNNELS_TEID (teid_t)0xFFFFFFFF +typedef struct gtpv1u_enb_create_x2u_tunnel_req_s { + rnti_t rnti; + int num_tunnels; + teid_t tenb_X2u_teid[GTPV1U_MAX_BEARERS_PER_UE]; ///< Tunnel Endpoint Identifier + ebi_t eps_bearer_id[GTPV1U_MAX_BEARERS_PER_UE]; + transport_layer_addr_t enb_addr[GTPV1U_MAX_BEARERS_PER_UE]; +} gtpv1u_enb_create_x2u_tunnel_req_t; + +typedef struct gtpv1u_enb_create_x2u_tunnel_resp_s { + uint8_t status; ///< Status of S1U endpoint creation (Failed = 0xFF or Success = 0x0) + rnti_t rnti; + int num_tunnels; + teid_t enb_X2u_teid[GTPV1U_MAX_BEARERS_PER_UE]; ///< Tunnel Endpoint Identifier + ebi_t eps_bearer_id[GTPV1U_MAX_BEARERS_PER_UE]; + transport_layer_addr_t enb_addr; +} gtpv1u_enb_create_x2u_tunnel_resp_t; + + typedef struct gtpv1u_enb_create_tunnel_req_s { rnti_t rnti; int num_tunnels; @@ -99,6 +122,50 @@ typedef struct gtpv1u_enb_tunnel_data_req_s { rb_id_t rab_id; } gtpv1u_enb_tunnel_data_req_t; +typedef struct gtpv1u_enb_data_forwarding_req_s { + uint8_t *buffer; + uint32_t length; + uint32_t offset; ///< start of message offset in buffer + rnti_t rnti; + rb_id_t rab_id; +} gtpv1u_enb_data_forwarding_req_t; + +typedef struct gtpv1u_enb_data_forwarding_ind_s { + uint32_t frame; + uint8_t enb_flag; + uint32_t rb_id; + uint32_t muip; + uint32_t confirmp; + uint32_t sdu_size; + uint8_t *sdu_p; + uint8_t mode; + uint16_t rnti; + uint8_t module_id; + uint8_t eNB_index; +} gtpv1u_enb_data_forwarding_ind_t; + +typedef struct gtpv1u_enb_end_marker_req_s { + uint8_t *buffer; + uint32_t length; + uint32_t offset; ///< start of message offset in buffer + rnti_t rnti; + rb_id_t rab_id; +} gtpv1u_enb_end_marker_req_t; + +typedef struct gtpv1u_enb_end_marker_ind_s { + uint32_t frame; + uint8_t enb_flag; + uint32_t rb_id; + uint32_t muip; + uint32_t confirmp; + uint32_t sdu_size; + uint8_t *sdu_p; + uint8_t mode; + uint16_t rnti; + uint8_t module_id; + uint8_t eNB_index; +} gtpv1u_enb_end_marker_ind_t; + typedef struct { in_addr_t enb_ip_address_for_S1u_S12_S4_up; tcp_udp_port_t enb_port_for_S1u_S12_S4_up; diff --git a/openair2/RRC/LTE/rrc_defs.h b/openair2/RRC/LTE/rrc_defs.h index ef0598a40a7..21f6429c7ce 100644 --- a/openair2/RRC/LTE/rrc_defs.h +++ b/openair2/RRC/LTE/rrc_defs.h @@ -348,11 +348,24 @@ typedef enum HO_STATE_e { HO_COMPLETE, // initiated by the target eNB HO_REQUEST, HO_ACK, + HO_FORWARDING, HO_CONFIGURED, + HO_END_MARKER, + HO_FORWARDING_COMPLETE, HO_RELEASE, HO_CANCEL } HO_STATE_t; +typedef enum DATA_FORWARDING_STATE_e { + FORWARDING_EMPTY=0, + FORWARDING_NO_EMPTY +} DATA_FORWARDING_STATE_t; + +typedef enum DATA_ENDMARK_STATE_e { + ENDMARK_EMPTY=0, + ENDMARK_NO_EMPTY +} DATA_ENDMARK_STATE_t; + typedef enum SL_TRIGGER_e { SL_RECEIVE_COMMUNICATION=0, SL_TRANSMIT_RELAY_ONE_TO_ONE, @@ -477,6 +490,9 @@ typedef struct HANDOVER_INFO_s { uint8_t buf[RRC_BUF_SIZE]; /* ASN.1 encoded handoverCommandMessage */ int size; /* size of above message in bytes */ int x2_id; /* X2AP UE ID in the target eNB */ + uint32_t x2u_teid; + DATA_FORWARDING_STATE_t forwarding_state; + DATA_ENDMARK_STATE_t endmark_state; } HANDOVER_INFO; typedef struct PER_EVENT_s { @@ -693,6 +709,12 @@ typedef struct eNB_RRC_UE_s { uint32_t enb_gtp_teid[S1AP_MAX_E_RAB]; transport_layer_addr_t enb_gtp_addrs[S1AP_MAX_E_RAB]; rb_id_t enb_gtp_ebi[S1AP_MAX_E_RAB]; + /* Total number of e_rab already setup in the list */ + uint8_t nb_x2u_e_rabs; + // LG: For GTPV1 TUNNELS(X2U) + uint32_t enb_gtp_x2u_teid[S1AP_MAX_E_RAB]; + transport_layer_addr_t enb_gtp_x2u_addrs[S1AP_MAX_E_RAB]; + rb_id_t enb_gtp_x2u_ebi[S1AP_MAX_E_RAB]; uint32_t ul_failure_timer; uint32_t ue_release_timer; uint32_t ue_release_timer_thres; diff --git a/openair2/RRC/LTE/rrc_eNB.c b/openair2/RRC/LTE/rrc_eNB.c index ffad3733193..d1de4e8e6bc 100644 --- a/openair2/RRC/LTE/rrc_eNB.c +++ b/openair2/RRC/LTE/rrc_eNB.c @@ -4585,7 +4585,9 @@ rrc_eNB_process_MeasurementReport( /* HO info struct may not be needed anymore */ ue_context_pP->ue_context.handover_info = CALLOC(1, sizeof(*(ue_context_pP->ue_context.handover_info))); ue_context_pP->ue_context.Status = RRC_HO_EXECUTION; + ue_context_pP->ue_context.handover_info->state = HO_REQUEST; + /* HO Preparation message */ msg = itti_alloc_new_message(TASK_RRC_ENB, X2AP_HANDOVER_REQ); rrc_eNB_generate_HandoverPreparationInformation( @@ -4713,8 +4715,8 @@ void rrc_eNB_process_handoverPreparationInformation(int mod_id, x2ap_handover_re RB_INSERT(rrc_ue_tree_s, &RC.rrc[mod_id]->rrc_ue_head, ue_context_target_p); LOG_D(RRC, "eNB %d: Created new UE context uid %u\n", mod_id, ue_context_target_p->local_uid); ue_context_target_p->ue_context.handover_info = CALLOC(1, sizeof(*(ue_context_target_p->ue_context.handover_info))); - ue_context_target_p->ue_context.Status = RRC_HO_EXECUTION; - ue_context_target_p->ue_context.handover_info->state = HO_ACK; + //ue_context_target_p->ue_context.Status = RRC_HO_EXECUTION; + //ue_context_target_p->ue_context.handover_info->state = HO_ACK; ue_context_target_p->ue_context.handover_info->x2_id = m->x2_id; ue_context_target_p->ue_context.handover_info->assoc_id = m->target_assoc_id; memset (ue_context_target_p->ue_context.nh, 0, 32); @@ -4788,6 +4790,10 @@ void rrc_eNB_process_handoverPreparationInformation(int mod_id, x2ap_handover_re ue_context_target_p->ue_context.e_rab[i].param.e_rab_id, ue_context_target_p->ue_context.e_rab[i].param.gtp_teid); } + rrc_eNB_process_X2AP_TUNNEL_SETUP_REQ(mod_id, ue_context_target_p); + + ue_context_target_p->ue_context.Status = RRC_HO_EXECUTION; + ue_context_target_p->ue_context.handover_info->state = HO_ACK; } void rrc_eNB_process_handoverCommand( @@ -5003,7 +5009,8 @@ check_handovers( if (ue_context_p->ue_context.handover_info->state == HO_ACK) { MessageDef *msg; // Configure target - ue_context_p->ue_context.handover_info->state = HO_CONFIGURED; + ue_context_p->ue_context.handover_info->state = HO_FORWARDING; + msg = itti_alloc_new_message(TASK_RRC_ENB, X2AP_HANDOVER_REQ_ACK); rrc_eNB_generate_HO_RRCConnectionReconfiguration(ctxt_pP, ue_context_p, X2AP_HANDOVER_REQ_ACK(msg).rrc_buffer, &X2AP_HANDOVER_REQ_ACK(msg).rrc_buffer_size); @@ -5016,13 +5023,162 @@ check_handovers( X2AP_HANDOVER_REQ_ACK(msg).nb_e_rabs_tobesetup = ue_context_p->ue_context.setup_e_rabs; for (int i=0; i<ue_context_p->ue_context.setup_e_rabs; i++) { + /* set gtpv teid info */ X2AP_HANDOVER_REQ_ACK(msg).e_rabs_tobesetup[i].e_rab_id = ue_context_p->ue_context.e_rab[i].param.e_rab_id; + X2AP_HANDOVER_REQ_ACK(msg).e_rabs_tobesetup[i].gtp_teid = ue_context_p->ue_context.enb_gtp_x2u_teid[i]; + X2AP_HANDOVER_REQ_ACK(msg).e_rabs_tobesetup[i].eNB_addr = ue_context_p->ue_context.enb_gtp_x2u_addrs[i]; } itti_send_msg_to_task(TASK_X2AP, ENB_MODULE_ID_TO_INSTANCE(ctxt_pP->module_id), msg); LOG_I(RRC, "RRC Sends X2 HO ACK to the source eNB at frame %d and subframe %d \n", ctxt_pP->frame,ctxt_pP->subframe); } } + + if (ue_context_p->ue_context.Status == RRC_RECONFIGURED + && ue_context_p->ue_context.handover_info != NULL && + ue_context_p->ue_context.handover_info->forwarding_state == FORWARDING_NO_EMPTY ) { + +#if defined(ENABLE_ITTI) + MessageDef *msg_p; + int result; + protocol_ctxt_t ctxt; + + do { + // Checks if a message has been sent to PDCP sub-task + itti_poll_msg (TASK_DATA_FORWARDING, &msg_p); + + if (msg_p != NULL) { + + switch (ITTI_MSG_ID(msg_p)) { + case GTPV1U_ENB_DATA_FORWARDING_IND: + PROTOCOL_CTXT_SET_BY_MODULE_ID( + &ctxt, + GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).module_id, + GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).enb_flag, + GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).rnti, + GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).frame, + 0, + GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).eNB_index); + LOG_D(RRC, PROTOCOL_CTXT_FMT"[check_handovers]Received %s from %s: instance %d, rb_id %d, muiP %d, confirmP %d, mode %d\n", + PROTOCOL_CTXT_ARGS(&ctxt), + ITTI_MSG_NAME (msg_p), + ITTI_MSG_ORIGIN_NAME(msg_p), + ITTI_MSG_INSTANCE (msg_p), + GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).rb_id, + GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).muip, + GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).confirmp, + GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).mode); + + LOG_I(RRC, "Before calling pdcp_data_req from check_handovers! GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).rb_id: %d \n", GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).rb_id); + result = pdcp_data_req (&ctxt, + SRB_FLAG_NO, + GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).rb_id, + GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).muip, + GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).confirmp, + GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).sdu_size, + GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).sdu_p, + GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).mode +#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0)) + , NULL, NULL +#endif + ); + if (result != TRUE){ + LOG_E(RRC, "target enb send data forwarding buffer to PDCP request failed!\n"); + }else{ + LOG_D(RRC, "target enb send data forwarding buffer to PDCP!\n"); + } + + // Message buffer has been processed, free it now. + result = itti_free (ITTI_MSG_ORIGIN_ID(msg_p), GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).sdu_p); + AssertFatal (result == EXIT_SUCCESS, "Failed to free memory (%d)!\n", result); + break; + + default: + LOG_E(RRC, "Received unexpected message %s\n", ITTI_MSG_NAME (msg_p)); + break; + } + + result = itti_free (ITTI_MSG_ORIGIN_ID(msg_p), msg_p); + AssertFatal (result == EXIT_SUCCESS, "Failed to free memory (%d)!\n", result); + } + } while(msg_p != NULL); + ue_context_p->ue_context.handover_info->forwarding_state = FORWARDING_EMPTY; + } + if( ue_context_p->ue_context.Status == RRC_RECONFIGURED && + ue_context_p->ue_context.handover_info != NULL && + ue_context_p->ue_context.handover_info->forwarding_state == FORWARDING_EMPTY && + ue_context_p->ue_context.handover_info->endmark_state == ENDMARK_NO_EMPTY && + ue_context_p->ue_context.handover_info->state == HO_END_MARKER ){ + + MessageDef *msg_p; + int result; + protocol_ctxt_t ctxt; + + do { + // Checks if a message has been sent to PDCP sub-task + itti_poll_msg (TASK_END_MARKER, &msg_p); + + if (msg_p != NULL) { + + switch (ITTI_MSG_ID(msg_p)) { + case GTPV1U_ENB_END_MARKER_IND: + PROTOCOL_CTXT_SET_BY_MODULE_ID( + &ctxt, + GTPV1U_ENB_END_MARKER_IND (msg_p).module_id, + GTPV1U_ENB_END_MARKER_IND (msg_p).enb_flag, + GTPV1U_ENB_END_MARKER_IND (msg_p).rnti, + GTPV1U_ENB_END_MARKER_IND (msg_p).frame, + 0, + GTPV1U_ENB_END_MARKER_IND (msg_p).eNB_index); + LOG_I(RRC, PROTOCOL_CTXT_FMT"[check_handovers]Received %s from %s: instance %d, rb_id %d, muiP %d, confirmP %d, mode %d\n", + PROTOCOL_CTXT_ARGS(&ctxt), + ITTI_MSG_NAME (msg_p), + ITTI_MSG_ORIGIN_NAME(msg_p), + ITTI_MSG_INSTANCE (msg_p), + GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).rb_id, + GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).muip, + GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).confirmp, + GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).mode); + + LOG_D(RRC, "Before calling pdcp_data_req from check_handovers! GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).rb_id: %d \n", GTPV1U_ENB_DATA_FORWARDING_IND (msg_p).rb_id); + result = pdcp_data_req (&ctxt, + SRB_FLAG_NO, + GTPV1U_ENB_END_MARKER_IND (msg_p).rb_id, + GTPV1U_ENB_END_MARKER_IND (msg_p).muip, + GTPV1U_ENB_END_MARKER_IND (msg_p).confirmp, + GTPV1U_ENB_END_MARKER_IND (msg_p).sdu_size, + GTPV1U_ENB_END_MARKER_IND (msg_p).sdu_p, + GTPV1U_ENB_END_MARKER_IND (msg_p).mode +#if (LTE_RRC_VERSION >= MAKE_VERSION(14, 0, 0)) + , NULL, NULL +#endif + ); + if (result != TRUE){ + LOG_E(RRC, "target enb send spgw buffer to PDCP request failed!\n"); + }else{ + LOG_D(RRC, "target enb send spgw buffer to PDCP!\n"); + } + + // Message buffer has been processed, free it now. + result = itti_free (ITTI_MSG_ORIGIN_ID(msg_p), GTPV1U_ENB_END_MARKER_IND (msg_p).sdu_p); + AssertFatal (result == EXIT_SUCCESS, "Failed to free memory (%d)!\n", result); + break; + + default: + LOG_E(RRC, "Received unexpected message %s\n", ITTI_MSG_NAME (msg_p)); + break; + } + + result = itti_free (ITTI_MSG_ORIGIN_ID(msg_p), msg_p); + AssertFatal (result == EXIT_SUCCESS, "Failed to free memory (%d)!\n", result); + } + } while(msg_p != NULL); + + ue_context_p->ue_context.handover_info->endmark_state = ENDMARK_EMPTY; + ue_context_p->ue_context.handover_info->state = HO_FORWARDING_COMPLETE; + } + +#endif } } @@ -7585,6 +7741,9 @@ rrc_eNB_decode_dcch( dedicated_DRB = 3; RC.mac[ctxt_pP->module_id]->UE_list.UE_sched_ctrl[UE_id].crnti_reconfigurationcomplete_flag = 0; ue_context_p->ue_context.Status = RRC_RECONFIGURED; + if(ue_context_p->ue_context.handover_info){ + ue_context_p->ue_context.handover_info->state = HO_CONFIGURED; + } LOG_I(RRC, PROTOCOL_RRC_CTXT_UE_FMT" UE State = RRC_HO_EXECUTION (xid %ld)\n", PROTOCOL_RRC_CTXT_UE_ARGS(ctxt_pP),ul_dcch_msg->message.choice.c1.choice.rrcConnectionReconfigurationComplete.rrc_TransactionIdentifier); @@ -7663,6 +7822,7 @@ rrc_eNB_decode_dcch( } } else if(dedicated_DRB == 0) { if(ue_context_p->ue_context.reestablishment_cause == LTE_ReestablishmentCause_spare1) { + rrc_eNB_send_S1AP_INITIAL_CONTEXT_SETUP_RESP(ctxt_pP, ue_context_p); } else { @@ -8568,7 +8728,7 @@ void *rrc_enb_process_itti_msg(void *notUsed) { instance = ITTI_MSG_INSTANCE(msg_p); /* RRC_SUBFRAME_PROCESS is sent every subframe, do not log it */ if (ITTI_MSG_ID(msg_p) != RRC_SUBFRAME_PROCESS) - LOG_I(RRC,"Received message %s\n",msg_name_p); + LOG_D(RRC,"Received message %s\n",msg_name_p); switch (ITTI_MSG_ID(msg_p)) { case TERMINATE_MESSAGE: @@ -8707,6 +8867,9 @@ void *rrc_enb_process_itti_msg(void *notUsed) { case X2AP_HANDOVER_REQ_ACK: { struct rrc_eNB_ue_context_s *ue_context_p = NULL; + x2ap_handover_req_ack_t *x2ap_handover_req_ack = NULL; + hashtable_rc_t hash_rc = HASH_TABLE_KEY_NOT_EXISTS; + gtpv1u_ue_data_t *gtpv1u_ue_data_p = NULL; ue_context_p = rrc_eNB_get_ue_context(RC.rrc[instance], X2AP_HANDOVER_REQ_ACK(msg_p).rnti); if (ue_context_p == NULL) { /* is it possible? */ @@ -8718,7 +8881,44 @@ void *rrc_enb_process_itti_msg(void *notUsed) { DevAssert(ue_context_p != NULL); if (ue_context_p->ue_context.handover_info->state != HO_REQUEST) abort(); + + hash_rc = hashtable_get(RC.gtpv1u_data_g->ue_mapping, ue_context_p->ue_context.rnti, (void**)>pv1u_ue_data_p); + /* set target enb gtp teid */ + if (hash_rc == HASH_TABLE_KEY_NOT_EXISTS) { + LOG_E(RRC, "X2AP_HANDOVER_REQ_ACK func(), hashtable_get failed: while getting ue rnti %x in hashtable ue_mapping\n", ue_context_p->ue_context.rnti); + } else { + uint8_t nb_e_rabs_tobesetup = 0; + ebi_t eps_bearer_id = 0; + int ip_offset = 0; + in_addr_t in_addr; + x2ap_handover_req_ack = &X2AP_HANDOVER_REQ_ACK(msg_p); + nb_e_rabs_tobesetup = x2ap_handover_req_ack->nb_e_rabs_tobesetup; + ue_context_p->ue_context.nb_x2u_e_rabs = nb_e_rabs_tobesetup; + for(int i=0; i< nb_e_rabs_tobesetup; i++){ + ip_offset = 0; + eps_bearer_id = x2ap_handover_req_ack->e_rabs_tobesetup[i].e_rab_id; + ue_context_p->ue_context.enb_gtp_x2u_ebi[i] = eps_bearer_id; + ue_context_p->ue_context.enb_gtp_x2u_teid[i] = x2ap_handover_req_ack->e_rabs_tobesetup[i].gtp_teid; + gtpv1u_ue_data_p->bearers[eps_bearer_id - GTPV1U_BEARER_OFFSET].teid_teNB = x2ap_handover_req_ack->e_rabs_tobesetup[i].gtp_teid; + + if ((x2ap_handover_req_ack->e_rabs_tobesetup[i].eNB_addr.length == 4) || + (x2ap_handover_req_ack->e_rabs_tobesetup[i].eNB_addr.length == 20)) { + in_addr = *((in_addr_t*)x2ap_handover_req_ack->e_rabs_tobesetup[i].eNB_addr.buffer); + ip_offset = 4; + gtpv1u_ue_data_p->bearers[eps_bearer_id - GTPV1U_BEARER_OFFSET].tenb_ip_addr = in_addr; + ue_context_p->ue_context.enb_gtp_x2u_addrs[i] = x2ap_handover_req_ack->e_rabs_tobesetup[i].eNB_addr; + } + + if ((x2ap_handover_req_ack->e_rabs_tobesetup[i].eNB_addr.length == 16) || + (x2ap_handover_req_ack->e_rabs_tobesetup[i].eNB_addr.length == 20)) { + memcpy(gtpv1u_ue_data_p->bearers[eps_bearer_id - GTPV1U_BEARER_OFFSET].tenb_ip6_addr.s6_addr, + &x2ap_handover_req_ack->e_rabs_tobesetup[i].eNB_addr.buffer[ip_offset], + 16); + } + } + } + rrc_eNB_process_handoverCommand(instance, ue_context_p, &X2AP_HANDOVER_REQ_ACK(msg_p)); ue_context_p->ue_context.handover_info->state = HO_PREPARE; break; diff --git a/openair2/RRC/LTE/rrc_eNB_GTPV1U.c b/openair2/RRC/LTE/rrc_eNB_GTPV1U.c index ff8f455d6cb..d873a825ba9 100644 --- a/openair2/RRC/LTE/rrc_eNB_GTPV1U.c +++ b/openair2/RRC/LTE/rrc_eNB_GTPV1U.c @@ -27,14 +27,19 @@ * \email: lionel.gauthier@eurecom.fr */ +//#if defined(ENABLE_USE_MME) # include "rrc_defs.h" # include "rrc_extern.h" # include "RRC/LTE/MESSAGES/asn1_msg.h" # include "rrc_eNB_GTPV1U.h" # include "rrc_eNB_UE_context.h" # include "msc.h" + +//# if defined(ENABLE_ITTI) # include "asn1_conversions.h" # include "intertask_interface.h" +//#endif + # include "common/ran_context.h" extern RAN_CONTEXT_t RC; @@ -88,6 +93,88 @@ rrc_eNB_process_GTPV1U_CREATE_TUNNEL_RESP( } } +//------------------------------------------------------------------------------ +boolean_t +gtpv_data_req( + const protocol_ctxt_t* const ctxt_pP, + const rb_id_t rb_idP, + const mui_t muiP, + const confirm_t confirmP, + const sdu_size_t sdu_sizeP, + uint8_t* const buffer_pP, + const pdcp_transmission_mode_t modeP, + uint32_t task_id +) +//------------------------------------------------------------------------------ +{ + if(sdu_sizeP == 0) + { + LOG_I(GTPU,"gtpv_data_req sdu_sizeP == 0"); + return FALSE; + } + LOG_D(GTPU,"gtpv_data_req ue rnti %x sdu_sizeP %d rb id %d", ctxt_pP->rnti, sdu_sizeP, rb_idP); +#if defined(ENABLE_ITTI) + { + MessageDef *message_p; + // Uses a new buffer to avoid issue with PDCP buffer content that could be changed by PDCP (asynchronous message handling). + uint8_t *message_buffer; + + if(task_id == TASK_DATA_FORWARDING){ + + LOG_I(GTPU,"gtpv_data_req task_id = TASK_DATA_FORWARDING\n"); + + message_buffer = itti_malloc (TASK_GTPV1_U, TASK_DATA_FORWARDING, sdu_sizeP); + + memcpy (message_buffer, buffer_pP, sdu_sizeP); + + message_p = itti_alloc_new_message (TASK_GTPV1_U, GTPV1U_ENB_DATA_FORWARDING_IND); + GTPV1U_ENB_DATA_FORWARDING_IND (message_p).frame = ctxt_pP->frame; + GTPV1U_ENB_DATA_FORWARDING_IND (message_p).enb_flag = ctxt_pP->enb_flag; + GTPV1U_ENB_DATA_FORWARDING_IND (message_p).rb_id = rb_idP; + GTPV1U_ENB_DATA_FORWARDING_IND (message_p).muip = muiP; + GTPV1U_ENB_DATA_FORWARDING_IND (message_p).confirmp = confirmP; + GTPV1U_ENB_DATA_FORWARDING_IND (message_p).sdu_size = sdu_sizeP; + GTPV1U_ENB_DATA_FORWARDING_IND (message_p).sdu_p = message_buffer; + GTPV1U_ENB_DATA_FORWARDING_IND (message_p).mode = modeP; + GTPV1U_ENB_DATA_FORWARDING_IND (message_p).module_id = ctxt_pP->module_id; + GTPV1U_ENB_DATA_FORWARDING_IND (message_p).rnti = ctxt_pP->rnti; + GTPV1U_ENB_DATA_FORWARDING_IND (message_p).eNB_index = ctxt_pP->eNB_index; + + itti_send_msg_to_task (TASK_DATA_FORWARDING, ctxt_pP->instance, message_p); + return TRUE; // TODO should be changed to a CNF message later, currently RRC lite does not used the returned value anyway. + }else if(task_id == TASK_END_MARKER){ + + LOG_I(GTPU,"gtpv_data_req task_id = TASK_END_MARKER\n"); + + message_buffer = itti_malloc (TASK_GTPV1_U, TASK_END_MARKER, sdu_sizeP); + + memcpy (message_buffer, buffer_pP, sdu_sizeP); + + message_p = itti_alloc_new_message (TASK_GTPV1_U, GTPV1U_ENB_END_MARKER_IND); + GTPV1U_ENB_END_MARKER_IND (message_p).frame = ctxt_pP->frame; + GTPV1U_ENB_END_MARKER_IND (message_p).enb_flag = ctxt_pP->enb_flag; + GTPV1U_ENB_END_MARKER_IND (message_p).rb_id = rb_idP; + GTPV1U_ENB_END_MARKER_IND (message_p).muip = muiP; + GTPV1U_ENB_END_MARKER_IND (message_p).confirmp = confirmP; + GTPV1U_ENB_END_MARKER_IND (message_p).sdu_size = sdu_sizeP; + GTPV1U_ENB_END_MARKER_IND (message_p).sdu_p = message_buffer; + GTPV1U_ENB_END_MARKER_IND (message_p).mode = modeP; + GTPV1U_ENB_END_MARKER_IND (message_p).module_id = ctxt_pP->module_id; + GTPV1U_ENB_END_MARKER_IND (message_p).rnti = ctxt_pP->rnti; + GTPV1U_ENB_END_MARKER_IND (message_p).eNB_index = ctxt_pP->eNB_index; + + itti_send_msg_to_task (TASK_END_MARKER, ctxt_pP->instance, message_p); + return TRUE; // TODO should be changed to a CNF message later, currently RRC lite does not used the returned value anyway. + } + } +#endif + + return TRUE; + +} + +//#endif + void rrc_eNB_send_GTPV1U_ENB_DELETE_TUNNEL_REQ( module_id_t enb_mod_idP, const rrc_eNB_ue_context_t* const ue_context_pP @@ -112,3 +199,5 @@ void rrc_eNB_send_GTPV1U_ENB_DELETE_TUNNEL_REQ( } itti_send_msg_to_task(TASK_GTPV1_U, ENB_MODULE_ID_TO_INSTANCE(enb_mod_idP), msg); } + + diff --git a/openair2/RRC/LTE/rrc_eNB_GTPV1U.h b/openair2/RRC/LTE/rrc_eNB_GTPV1U.h index 23806f6a048..00c74903590 100644 --- a/openair2/RRC/LTE/rrc_eNB_GTPV1U.h +++ b/openair2/RRC/LTE/rrc_eNB_GTPV1U.h @@ -53,4 +53,16 @@ void rrc_eNB_send_GTPV1U_ENB_DELETE_TUNNEL_REQ( const rrc_eNB_ue_context_t* const ue_context_pP ); +boolean_t +gtpv_data_req( + const protocol_ctxt_t* const ctxt_pP, + const rb_id_t rb_idP, + const mui_t muiP, + const confirm_t confirmP, + const sdu_size_t sdu_sizeP, + uint8_t* const buffer_pP, + const pdcp_transmission_mode_t modeP, + uint32_t task_id +); + #endif /* RRC_ENB_GTPV1U_H_ */ diff --git a/openair2/RRC/LTE/rrc_eNB_S1AP.c b/openair2/RRC/LTE/rrc_eNB_S1AP.c index dabe5b15246..dc5e386ea16 100644 --- a/openair2/RRC/LTE/rrc_eNB_S1AP.c +++ b/openair2/RRC/LTE/rrc_eNB_S1AP.c @@ -2040,6 +2040,70 @@ int rrc_eNB_send_PATH_SWITCH_REQ(const protocol_ctxt_t *const ctxt_pP, return 0; } +int rrc_eNB_process_X2AP_TUNNEL_SETUP_REQ(instance_t instance, rrc_eNB_ue_context_t* const ue_context_target_p) { + gtpv1u_enb_create_x2u_tunnel_req_t create_tunnel_req; + gtpv1u_enb_create_x2u_tunnel_resp_t create_tunnel_resp; + uint8_t e_rab_done; + uint8_t inde_list[NB_RB_MAX - 3]= {0}; + + LOG_I(RRC, "[eNB %d] rrc_eNB_process_X2AP_TUNNEL_SETUP_REQ: rnti %u nb_of_e_rabs %d\n", + instance, ue_context_target_p->ue_context.rnti, ue_context_target_p->ue_context.nb_of_e_rabs); + + if (ue_context_target_p == NULL) { + + return (-1); + } else { + + /* Save e RAB information for later */ + { + int i; + memset(&create_tunnel_req, 0, sizeof(create_tunnel_req)); + uint8_t nb_e_rabs_tosetup = ue_context_target_p->ue_context.nb_of_e_rabs; + e_rab_done = 0; + + for (i = 0;i < nb_e_rabs_tosetup; i++) { + + if(ue_context_target_p->ue_context.e_rab[i].status >= E_RAB_STATUS_DONE) + continue; + + create_tunnel_req.eps_bearer_id[e_rab_done] = ue_context_target_p->ue_context.e_rab[i].param.e_rab_id; + LOG_I(RRC,"x2 tunnel setup: local index %d UE id %x, eps id %d \n", + i, + ue_context_target_p->ue_context.rnti, + create_tunnel_req.eps_bearer_id[i] ); + inde_list[i] = e_rab_done; + e_rab_done++; + } + + create_tunnel_req.rnti = ue_context_target_p->ue_context.rnti; // warning put zero above + create_tunnel_req.num_tunnels = e_rab_done; + // NN: not sure if we should create a new tunnel: need to check teid, etc. + gtpv1u_create_x2u_tunnel( + instance, + &create_tunnel_req, + &create_tunnel_resp); + + ue_context_target_p->ue_context.nb_x2u_e_rabs = create_tunnel_resp.num_tunnels; + for (i = 0; i < create_tunnel_resp.num_tunnels; i++) { + ue_context_target_p->ue_context.enb_gtp_x2u_teid[inde_list[i]] = create_tunnel_resp.enb_X2u_teid[i]; + ue_context_target_p->ue_context.enb_gtp_x2u_addrs[inde_list[i]] = create_tunnel_resp.enb_addr; + ue_context_target_p->ue_context.enb_gtp_x2u_ebi[inde_list[i]] = create_tunnel_resp.eps_bearer_id[i]; + + LOG_I(RRC, "rrc_eNB_process_X2AP_TUNNEL_SETUP_REQ tunnel (%u, %u) bearer UE context index %u, msg index %u, eps bearer id %u, gtp addr len %d \n", + create_tunnel_resp.enb_X2u_teid[i], + ue_context_target_p->ue_context.enb_gtp_x2u_teid[inde_list[i]], + inde_list[i], + i, + create_tunnel_resp.eps_bearer_id[i], + create_tunnel_resp.enb_addr.length); + } + } + + return (0); + } +} + + int rrc_eNB_process_S1AP_PATH_SWITCH_REQ_ACK (MessageDef *msg_p, const char *msg_name, instance_t instance) { uint16_t ue_initial_id; uint32_t eNB_ue_s1ap_id; diff --git a/openair2/RRC/LTE/rrc_eNB_S1AP.h b/openair2/RRC/LTE/rrc_eNB_S1AP.h index 5feb55d635c..e5ff9c27d60 100644 --- a/openair2/RRC/LTE/rrc_eNB_S1AP.h +++ b/openair2/RRC/LTE/rrc_eNB_S1AP.h @@ -278,6 +278,7 @@ int rrc_eNB_send_S1AP_E_RAB_RELEASE_RESPONSE(const protocol_ctxt_t *const ctxt_p int rrc_eNB_send_PATH_SWITCH_REQ(const protocol_ctxt_t *const ctxt_pP, rrc_eNB_ue_context_t *const ue_context_pP); +int rrc_eNB_process_X2AP_TUNNEL_SETUP_REQ(instance_t instance, rrc_eNB_ue_context_t* const ue_context_target_p); int rrc_eNB_process_S1AP_PATH_SWITCH_REQ_ACK (MessageDef *msg_p, const char *msg_name, instance_t instance); int rrc_eNB_send_X2AP_UE_CONTEXT_RELEASE(const protocol_ctxt_t* const ctxt_pP, rrc_eNB_ue_context_t* const ue_context_pP); diff --git a/openair2/X2AP/x2ap_eNB_generate_messages.c b/openair2/X2AP/x2ap_eNB_generate_messages.c index 7d84a7d4982..38ebd732ac3 100644 --- a/openair2/X2AP/x2ap_eNB_generate_messages.c +++ b/openair2/X2AP/x2ap_eNB_generate_messages.c @@ -797,6 +797,27 @@ int x2ap_eNB_generate_x2_handover_request_ack (x2ap_eNB_instance_t *instance_p, e_RABs_Admitted_Item = &e_RABS_Admitted_ItemIEs->value.choice.E_RABs_Admitted_Item; { e_RABs_Admitted_Item->e_RAB_ID = x2ap_handover_req_ack->e_rabs_tobesetup[i].e_rab_id; + e_RABs_Admitted_Item->uL_GTP_TunnelEndpoint = NULL; + e_RABs_Admitted_Item->dL_GTP_TunnelEndpoint = (X2AP_GTPtunnelEndpoint_t *)calloc(1, sizeof(*e_RABs_Admitted_Item->dL_GTP_TunnelEndpoint)); + + e_RABs_Admitted_Item->dL_GTP_TunnelEndpoint->transportLayerAddress.size = (uint8_t)x2ap_handover_req_ack->e_rabs_tobesetup[i].eNB_addr.length; + e_RABs_Admitted_Item->dL_GTP_TunnelEndpoint->transportLayerAddress.bits_unused = 0; + e_RABs_Admitted_Item->dL_GTP_TunnelEndpoint->transportLayerAddress.buf = + calloc(1, e_RABs_Admitted_Item->dL_GTP_TunnelEndpoint->transportLayerAddress.size); + + memcpy (e_RABs_Admitted_Item->dL_GTP_TunnelEndpoint->transportLayerAddress.buf, + x2ap_handover_req_ack->e_rabs_tobesetup[i].eNB_addr.buffer, + e_RABs_Admitted_Item->dL_GTP_TunnelEndpoint->transportLayerAddress.size); + + X2AP_DEBUG("X2 handover response target ip addr. length %lu bits_unused %d buf %d.%d.%d.%d\n", + e_RABs_Admitted_Item->dL_GTP_TunnelEndpoint->transportLayerAddress.size, + e_RABs_Admitted_Item->dL_GTP_TunnelEndpoint->transportLayerAddress.bits_unused, + e_RABs_Admitted_Item->dL_GTP_TunnelEndpoint->transportLayerAddress.buf[0], + e_RABs_Admitted_Item->dL_GTP_TunnelEndpoint->transportLayerAddress.buf[1], + e_RABs_Admitted_Item->dL_GTP_TunnelEndpoint->transportLayerAddress.buf[2], + e_RABs_Admitted_Item->dL_GTP_TunnelEndpoint->transportLayerAddress.buf[3]); + + INT32_TO_OCTET_STRING(x2ap_handover_req_ack->e_rabs_tobesetup[i].gtp_teid, &e_RABs_Admitted_Item->dL_GTP_TunnelEndpoint->gTP_TEID); } ASN_SEQUENCE_ADD(&ie->value.choice.E_RABs_Admitted_List.list, e_RABS_Admitted_ItemIEs); } diff --git a/openair2/X2AP/x2ap_eNB_handler.c b/openair2/X2AP/x2ap_eNB_handler.c index 2a44501a337..67a5d389b46 100644 --- a/openair2/X2AP/x2ap_eNB_handler.c +++ b/openair2/X2AP/x2ap_eNB_handler.c @@ -749,6 +749,8 @@ int x2ap_eNB_handle_handover_response (instance_t instance, { X2AP_HandoverRequestAcknowledge_t *x2HandoverRequestAck; X2AP_HandoverRequestAcknowledge_IEs_t *ie; + X2AP_E_RABs_Admitted_ItemIEs_t *e_RABS_Admitted_ItemIEs; + X2AP_E_RABs_Admitted_Item_t *e_RABs_Admitted_Item; x2ap_eNB_instance_t *instance_p; x2ap_eNB_data_t *x2ap_eNB_data; @@ -816,6 +818,56 @@ int x2ap_eNB_handle_handover_response (instance_t instance, X2AP_HANDOVER_REQ_ACK(msg).rnti = rnti; + + X2AP_FIND_PROTOCOLIE_BY_ID(X2AP_HandoverRequestAcknowledge_IEs_t, ie, x2HandoverRequestAck, + X2AP_ProtocolIE_ID_id_E_RABs_Admitted_List, true); + + if (ie == NULL ) { + X2AP_ERROR("%s %d: ie is a NULL pointer \n", __FILE__, __LINE__); + return -1; + }else{ + if (ie->value.choice.E_RABs_Admitted_List.list.count > 0) { + + uint8_t nb_e_rabs_tobesetup; + nb_e_rabs_tobesetup = ie->value.choice.E_RABs_Admitted_List.list.count; + X2AP_HANDOVER_REQ_ACK(msg).nb_e_rabs_tobesetup = nb_e_rabs_tobesetup; + + for (int i=0; i<nb_e_rabs_tobesetup; i++) { + e_RABS_Admitted_ItemIEs = (X2AP_E_RABs_Admitted_ItemIEs_t *) ie->value.choice.E_RABs_Admitted_List.list.array[i]; + e_RABs_Admitted_Item = &e_RABS_Admitted_ItemIEs->value.choice.E_RABs_Admitted_Item; + + X2AP_HANDOVER_REQ_ACK(msg).e_rabs_tobesetup[i].e_rab_id = e_RABs_Admitted_Item->e_RAB_ID ; + X2AP_ERROR("x2u tunnel: index %d e_rab_id %d\n", i, X2AP_HANDOVER_REQ_ACK(msg).e_rabs_tobesetup[i].e_rab_id); + + if(e_RABs_Admitted_Item->dL_GTP_TunnelEndpoint == NULL){ + X2AP_DEBUG("%s %d: X2AP_E_RABs_Admitted_Item_t->dL_GTP_TunnelEndpoint is a NULL pointer \n", __FILE__, __LINE__); + continue; + } + + memcpy(X2AP_HANDOVER_REQ_ACK(msg).e_rabs_tobesetup[i].eNB_addr.buffer, + e_RABs_Admitted_Item->dL_GTP_TunnelEndpoint->transportLayerAddress.buf, + e_RABs_Admitted_Item->dL_GTP_TunnelEndpoint->transportLayerAddress.size); + + X2AP_HANDOVER_REQ_ACK(msg).e_rabs_tobesetup[i].eNB_addr.length = e_RABs_Admitted_Item->dL_GTP_TunnelEndpoint->transportLayerAddress.size; + + OCTET_STRING_TO_INT32(&e_RABs_Admitted_Item->dL_GTP_TunnelEndpoint->gTP_TEID, + X2AP_HANDOVER_REQ_ACK(msg).e_rabs_tobesetup[i].gtp_teid); + + + X2AP_DEBUG("x2u tunnel: index %d target enb ip %d.%d.%d.%d length %d gtp teid %u\n", + i, + X2AP_HANDOVER_REQ_ACK(msg).e_rabs_tobesetup[i].eNB_addr.buffer[0], + X2AP_HANDOVER_REQ_ACK(msg).e_rabs_tobesetup[i].eNB_addr.buffer[1], + X2AP_HANDOVER_REQ_ACK(msg).e_rabs_tobesetup[i].eNB_addr.buffer[2], + X2AP_HANDOVER_REQ_ACK(msg).e_rabs_tobesetup[i].eNB_addr.buffer[3], + X2AP_HANDOVER_REQ_ACK(msg).e_rabs_tobesetup[i].eNB_addr.length, + X2AP_HANDOVER_REQ_ACK(msg).e_rabs_tobesetup[i].gtp_teid); + } + + } + + } + X2AP_FIND_PROTOCOLIE_BY_ID(X2AP_HandoverRequestAcknowledge_IEs_t, ie, x2HandoverRequestAck, X2AP_ProtocolIE_ID_id_TargeteNBtoSource_eNBTransparentContainer, true); diff --git a/openair3/GTPV1-U/gtpv1u_eNB.c b/openair3/GTPV1-U/gtpv1u_eNB.c index fcadb0530e4..ef1a2912a39 100644 --- a/openair3/GTPV1-U/gtpv1u_eNB.c +++ b/openair3/GTPV1-U/gtpv1u_eNB.c @@ -50,12 +50,18 @@ #include "common/ran_context.h" #include "gtpv1u_eNB_defs.h" #include "gtpv1u_eNB_task.h" +#include "rrc_eNB_GTPV1U.h" #undef GTP_DUMP_SOCKET extern unsigned char NB_eNB_INST; extern RAN_CONTEXT_t RC; +extern struct rrc_eNB_ue_context_s* +rrc_eNB_get_ue_context( + eNB_RRC_INST* rrc_instance_pP, + rnti_t rntiP); + #if defined(GTP_DUMP_SOCKET) && GTP_DUMP_SOCKET > 0 #include <linux/if.h> static int gtpv1u_dump_socket_g; @@ -105,6 +111,27 @@ static void gtpv1u_eNB_write_dump_socket(uint8_t *buffer_pP, uint32_t buffer_len #endif +//----------------------------------------------------------------------------- +static int gtpv1u_eNB_get_msgsource(struct rrc_eNB_ue_context_s *ue_context_p, teid_t teid) +{ + int erab_index = 0; + + /* source enb */ + if(ue_context_p->ue_context.handover_info != NULL && ue_context_p->ue_context.handover_info->state == HO_COMPLETE) + { + return GTPV1U_MSG_FROM_SPGW; + } + + /* target enb */ + for (erab_index = 0; erab_index < ue_context_p->ue_context.nb_x2u_e_rabs; erab_index++) { + if(ue_context_p->ue_context.enb_gtp_x2u_teid[erab_index] == teid){ + return GTPV1U_MSG_FROM_SOURCE_ENB; + } + } + + return GTPV1U_MSG_FROM_SPGW; +} + //----------------------------------------------------------------------------- static int gtpv1u_eNB_send_init_udp(const Gtpv1uS1Req *req) { // Create and alloc new message @@ -198,10 +225,16 @@ NwGtpv1uRcT gtpv1u_eNB_process_stack_req( case NW_GTPV1U_ULP_API_RECV_TPDU: { uint8_t buffer[4096]; uint32_t buffer_len; + struct rrc_eNB_ue_context_s *ue_context_p; + uint16_t msgType = NW_GTP_GPDU; + NwGtpv1uMsgT *pMsg = NULL; + /* Nw-gptv1u stack has processed a PDU. we can schedule it to PDCP * for transmission. */ teid = pUlpApi->apiInfo.recvMsgInfo.teid; + pMsg = (NwGtpv1uMsgT *) pUlpApi->apiInfo.recvMsgInfo.hMsg; + msgType = pMsg->msgType; if (NW_GTPV1U_OK != nwGtpv1uMsgGetTpdu(pUlpApi->apiInfo.recvMsgInfo.hMsg, buffer, &buffer_len)) { @@ -243,6 +276,158 @@ NwGtpv1uRcT gtpv1u_eNB_process_stack_req( 0,0, (gtpv1u_teid_data_p->eps_bearer_id) ? gtpv1u_teid_data_p->eps_bearer_id - 4: 5-4, buffer_len); + + ue_context_p = rrc_eNB_get_ue_context(RC.rrc[ctxt.module_id], ctxt.rnti); + if((ue_context_p != NULL) && + (ue_context_p->ue_context.handover_info != NULL) && + (ue_context_p->ue_context.handover_info->state < HO_FORWARDING_COMPLETE)) { + + if(msgType == NW_GTP_END_MARKER){ + /* in the source enb, UE in RRC_HO_EXECUTION mode */ + if (ue_context_p->ue_context.Status == RRC_HO_EXECUTION && ue_context_p->ue_context.handover_info->state == HO_COMPLETE) { + /* set handover state */ + //ue_context_p->ue_context.handover_info->state = HO_END_MARKER; + + MessageDef *msg; + // Configure end marker + msg = itti_alloc_new_message(TASK_GTPV1_U, GTPV1U_ENB_END_MARKER_REQ); + GTPV1U_ENB_END_MARKER_REQ(msg).buffer = itti_malloc(TASK_GTPV1_U, TASK_GTPV1_U, GTPU_HEADER_OVERHEAD_MAX + buffer_len); + memcpy(>PV1U_ENB_END_MARKER_REQ(msg).buffer[GTPU_HEADER_OVERHEAD_MAX], buffer, buffer_len); + GTPV1U_ENB_END_MARKER_REQ(msg).length = buffer_len; + GTPV1U_ENB_END_MARKER_REQ(msg).rnti = ctxt.rnti; + GTPV1U_ENB_END_MARKER_REQ(msg).rab_id = gtpv1u_teid_data_p->eps_bearer_id; + GTPV1U_ENB_END_MARKER_REQ(msg).offset = GTPU_HEADER_OVERHEAD_MAX; + LOG_I(GTPU, "Send End Marker to GTPV1-U at frame %d and subframe %d \n", ctxt.frame,ctxt.subframe); + itti_send_msg_to_task(TASK_GTPV1_U, ENB_MODULE_ID_TO_INSTANCE(ctxt.module_id), msg); + return NW_GTPV1U_OK; + } + } + + if (ue_context_p->ue_context.Status == RRC_HO_EXECUTION || ue_context_p->ue_context.Status == RRC_RECONFIGURED) { + + int msgsrc = gtpv1u_eNB_get_msgsource(ue_context_p, teid); + + LOG_D(GTPU,"UE INFO.ueStatus %d, handover state %d, forwarding state %d, from %s. message type %s\n", + ue_context_p->ue_context.Status, + ue_context_p->ue_context.handover_info->state, + ue_context_p->ue_context.handover_info->forwarding_state, + msgsrc == GTPV1U_MSG_FROM_SOURCE_ENB?"Source eNB":"EPC", + msgsrc != GTPV1U_MSG_FROM_SOURCE_ENB? "UDP DATA" : + msgType == NW_GTP_END_MARKER?"END MARKER":"DATA FORWARDING"); + + /* target enb */ + if(msgType == NW_GTP_END_MARKER){ + LOG_I(GTPU, "target end receive END MARKER\n"); + ue_context_p->ue_context.handover_info->state = HO_END_MARKER; + gtpv1u_enb_delete_tunnel_req_t delete_tunnel_req; + memset(&delete_tunnel_req, 0 , sizeof(delete_tunnel_req)); + delete_tunnel_req.rnti = ctxt.rnti; + gtpv1u_delete_x2u_tunnel(ctxt.module_id, &delete_tunnel_req, GTPV1U_TARGET_ENB); + return NW_GTPV1U_OK; + } + + /* form source eNB message */ + if(msgsrc == GTPV1U_MSG_FROM_SOURCE_ENB) + { + LOG_I(GTPU, "Received a message data forwarding length %d\n", buffer_len); + +#if defined(LOG_GTPU) && LOG_GTPU > 0 + LOG_T(GTPU, "forwarding data info:\n", buffer_len); + + for(int i=1;i<=buffer_len; i++){ + LOG_T(GTPU, "%02x ", buffer[i-1]); + if(i%20 == 0)LOG_T(GTPU, "\n"); + } + LOG_T(GTPU, "\n"); +#endif + + result = gtpv_data_req( + &ctxt, + (gtpv1u_teid_data_p->eps_bearer_id) ? gtpv1u_teid_data_p->eps_bearer_id - 4: 5-4, + 0, // mui + SDU_CONFIRM_NO, // confirm + buffer_len, + buffer, + PDCP_TRANSMISSION_MODE_DATA, + TASK_DATA_FORWARDING + ); + + if ( result == FALSE ) { + LOG_W(GTPU, "DATA FORWARDING message save failed\n"); + return NW_GTPV1U_FAILURE; + } + ue_context_p->ue_context.handover_info->forwarding_state = FORWARDING_NO_EMPTY; + return NW_GTPV1U_OK; + } + /* from epc message */ + else + { + /* in the source enb, UE in RRC_HO_EXECUTION mode */ + if (ue_context_p->ue_context.handover_info->state == HO_COMPLETE) { + MessageDef *msg; + // Configure target + msg = itti_alloc_new_message(TASK_GTPV1_U, GTPV1U_ENB_DATA_FORWARDING_REQ); + GTPV1U_ENB_DATA_FORWARDING_REQ(msg).buffer = itti_malloc(TASK_GTPV1_U, TASK_GTPV1_U, GTPU_HEADER_OVERHEAD_MAX + buffer_len); + memcpy(>PV1U_ENB_DATA_FORWARDING_REQ(msg).buffer[GTPU_HEADER_OVERHEAD_MAX], buffer, buffer_len); + GTPV1U_ENB_DATA_FORWARDING_REQ(msg).length = buffer_len; + GTPV1U_ENB_DATA_FORWARDING_REQ(msg).rnti = ctxt.rnti; + GTPV1U_ENB_DATA_FORWARDING_REQ(msg).rab_id = gtpv1u_teid_data_p->eps_bearer_id; + GTPV1U_ENB_DATA_FORWARDING_REQ(msg).offset = GTPU_HEADER_OVERHEAD_MAX; + +#if defined(LOG_GTPU) && LOG_GTPU > 0 + + LOG_T(GTPU, "Send data forwarding sdu_buffer to target enb. len %d info\n", buffer); + for(int i=1; i<=buffer_len; i++){ + LOG_T(GTPU, "%02x ", buffer[i-1]); + if(i%20 == 0)LOG_T(GTPU, "\n"); + } + LOG_T(GTPU, "\n"); +#endif + + LOG_I(GTPU, "Send data forwarding to GTPV1-U at frame %d and subframe %d \n", ctxt.frame,ctxt.subframe); + itti_send_msg_to_task(TASK_GTPV1_U, ENB_MODULE_ID_TO_INSTANCE(ctxt.module_id), msg); + return NW_GTPV1U_OK; + } + + /* target eNB. x2ho forwarding is processing. spgw message save to TASK_END_MARKER */ + if(ue_context_p->ue_context.handover_info->state != HO_COMPLETE && + ue_context_p->ue_context.handover_info->state != HO_END_MARKER ) + { + LOG_I(GTPU, "x2ho forwarding is processing. Received a spgw message. length %d\n", buffer_len); +#if defined(LOG_GTPU) && LOG_GTPU > 0 + LOG_T(GTPU, "spgw data info:\n", buffer_len); + for(int i=1;i<=buffer_len; i++){ + LOG_T(GTPU, "%02x ", buffer[i-1]); + if(i%20 == 0)LOG_T(GTPU, "\n"); + } + LOG_T(GTPU, "\n"); +#endif + + result = gtpv_data_req( + &ctxt, + (gtpv1u_teid_data_p->eps_bearer_id) ? gtpv1u_teid_data_p->eps_bearer_id - 4: 5-4, + 0, // mui + SDU_CONFIRM_NO, // confirm + buffer_len, + buffer, + PDCP_TRANSMISSION_MODE_DATA, + TASK_END_MARKER + ); + + if ( result == FALSE ) { + LOG_W(GTPU, "DATA FORWARDING message save failed\n"); + return NW_GTPV1U_FAILURE; + } + + ue_context_p->ue_context.handover_info->endmark_state = ENDMARK_NO_EMPTY; + return NW_GTPV1U_OK; + } + + } + + } + + } result = pdcp_data_req( &ctxt, SRB_FLAG_NO, @@ -259,7 +444,7 @@ NwGtpv1uRcT gtpv1u_eNB_process_stack_req( if ( result == FALSE ) { if (ctxt.configured == FALSE ) - LOG_W(GTPU, "PDCP data request failed, cause: RB is not configured!\n") ; + LOG_W(GTPU, "PDCP data request failed, cause: [UE:%x]RB is not configured!\n", ctxt.rnti) ; else LOG_W(GTPU, "PDCP data request failed\n"); @@ -550,6 +735,154 @@ gtpv1u_new_data_req( return 0; } +//----------------------------------------------------------------------------- +int +gtpv1u_create_x2u_tunnel( + const instance_t instanceP, + const gtpv1u_enb_create_x2u_tunnel_req_t * const create_tunnel_req_pP, + gtpv1u_enb_create_x2u_tunnel_resp_t * const create_tunnel_resp_pP + ) +{ + /* Create a new nw-gtpv1-u stack req using API */ + NwGtpv1uUlpApiT stack_req; + NwGtpv1uRcT rc = NW_GTPV1U_FAILURE; + /* Local tunnel end-point identifier */ + teid_t x2u_teid = 0; + gtpv1u_teid_data_t *gtpv1u_teid_data_p = NULL; + hashtable_rc_t hash_rc = HASH_TABLE_KEY_NOT_EXISTS; + int i; + ebi_t eps_bearer_id = 0; + + MSC_LOG_RX_MESSAGE( + MSC_GTPU_ENB, + MSC_RRC_ENB, + NULL,0, + MSC_AS_TIME_FMT" CREATE_X2U_TUNNEL_REQ RNTI %"PRIx16" inst %u ntuns %u ebid %u enb-x2u teid %u", + 0,0,create_tunnel_req_pP->rnti, instanceP, + create_tunnel_req_pP->num_tunnels, create_tunnel_req_pP->eps_bearer_id[0], + create_tunnel_req_pP->tenb_X2u_teid[0]); + + create_tunnel_resp_pP->rnti = create_tunnel_req_pP->rnti; + create_tunnel_resp_pP->status = 0; + create_tunnel_resp_pP->num_tunnels = 0; + + for (i = 0; i < create_tunnel_req_pP->num_tunnels; i++) { + eps_bearer_id = create_tunnel_req_pP->eps_bearer_id[i]; + LOG_D(GTPU, "Rx GTPV1U_ENB_CREATE_X2U_TUNNEL_REQ ue rnti %x eps bearer id %u\n", + create_tunnel_req_pP->rnti, eps_bearer_id); + memset(&stack_req, 0, sizeof(NwGtpv1uUlpApiT)); + + stack_req.apiType = NW_GTPV1U_ULP_API_CREATE_TUNNEL_ENDPOINT; + + do { + x2u_teid = gtpv1u_new_teid(); + LOG_D(GTPU, "gtpv1u_create_x2u_tunnel() 0x%x %u(dec)\n", x2u_teid, x2u_teid); + stack_req.apiInfo.createTunnelEndPointInfo.teid = x2u_teid; + stack_req.apiInfo.createTunnelEndPointInfo.hUlpSession = 0; + stack_req.apiInfo.createTunnelEndPointInfo.hStackSession = 0; + + rc = nwGtpv1uProcessUlpReq(RC.gtpv1u_data_g->gtpv1u_stack, &stack_req); + LOG_D(GTPU, ".\n"); + } while (rc != NW_GTPV1U_OK); + + memcpy(&create_tunnel_resp_pP->enb_addr.buffer, + &RC.gtpv1u_data_g->enb_ip_address_for_S1u_S12_S4_up, + sizeof (in_addr_t)); + + LOG_D(GTPU, "gtpv1u_create_x2u_tunnel() end addr %d.%d.%d.%d\n", + create_tunnel_resp_pP->enb_addr.buffer[0], + create_tunnel_resp_pP->enb_addr.buffer[1], + create_tunnel_resp_pP->enb_addr.buffer[2], + create_tunnel_resp_pP->enb_addr.buffer[3]); + + create_tunnel_resp_pP->enb_addr.length = sizeof (in_addr_t); + create_tunnel_resp_pP->eps_bearer_id[i] = eps_bearer_id; + create_tunnel_resp_pP->num_tunnels += 1; + + //----------------------- + // GTPV1U->PDCP mapping + //----------------------- + create_tunnel_resp_pP->enb_X2u_teid[i] = x2u_teid; + hash_rc = hashtable_get(RC.gtpv1u_data_g->teid_mapping, x2u_teid, (void**)>pv1u_teid_data_p); + + if (hash_rc == HASH_TABLE_KEY_NOT_EXISTS) { + gtpv1u_teid_data_p = calloc (1, sizeof(gtpv1u_teid_data_t)); + gtpv1u_teid_data_p->enb_id = 0; // TO DO + gtpv1u_teid_data_p->ue_id = create_tunnel_req_pP->rnti; + gtpv1u_teid_data_p->eps_bearer_id = eps_bearer_id; + hash_rc = hashtable_insert(RC.gtpv1u_data_g->teid_mapping, x2u_teid, gtpv1u_teid_data_p); + AssertFatal(hash_rc == HASH_TABLE_OK, "Error inserting teid mapping in GTPV1U hashtable"); + } else { + create_tunnel_resp_pP->enb_X2u_teid[i] = 0; + create_tunnel_resp_pP->status = 0xFF; + } + } + MSC_LOG_TX_MESSAGE( + MSC_GTPU_ENB, + MSC_RRC_ENB, + NULL,0, + "0 GTPV1U_ENB_CREATE_TUNNEL_RESP rnti %x teid %x", + create_tunnel_resp_pP->rnti, + x2u_teid); + + LOG_D(GTPU, "Tx GTPV1U_ENB_CREATE_TUNNEL_RESP ue rnti %x status %d\n", + create_tunnel_req_pP->rnti, + create_tunnel_resp_pP->status); + return 0; +} + +//----------------------------------------------------------------------------- +int gtpv1u_delete_x2u_tunnel( + const instance_t instanceP, + const gtpv1u_enb_delete_tunnel_req_t * const req_pP, + int enbflag) +{ + gtpv1u_ue_data_t *gtpv1u_ue_data_p = NULL; + hashtable_rc_t hash_rc = HASH_TABLE_KEY_NOT_EXISTS; + int erab_index = 0; + ebi_t eps_bearer_id = 0; + struct rrc_eNB_ue_context_s *ue_context_p = NULL; + + ue_context_p = rrc_eNB_get_ue_context(RC.rrc[instanceP], req_pP->rnti); + if(ue_context_p != NULL){ + /* in the source enb */ + if(enbflag == GTPV1U_SOURCE_ENB){ + hash_rc = hashtable_get(RC.gtpv1u_data_g->ue_mapping, req_pP->rnti, (void**)>pv1u_ue_data_p); + + if (hash_rc == HASH_TABLE_OK) { + for (erab_index = 0; erab_index < ue_context_p->ue_context.nb_x2u_e_rabs; erab_index++) { + eps_bearer_id = ue_context_p->ue_context.enb_gtp_x2u_ebi[erab_index]; + LOG_I(GTPU, "gtpv1u_delete_x2u_tunnel user rnti %x teNB X2U teid %u eps bearer id %u\n", + req_pP->rnti, + gtpv1u_ue_data_p->bearers[eps_bearer_id - GTPV1U_BEARER_OFFSET].teid_teNB, + ue_context_p->ue_context.enb_gtp_x2u_ebi[erab_index]); + + gtpv1u_ue_data_p->bearers[eps_bearer_id - GTPV1U_BEARER_OFFSET].teid_teNB = 0; + gtpv1u_ue_data_p->bearers[eps_bearer_id - GTPV1U_BEARER_OFFSET].tenb_ip_addr = 0; + //gtpv1u_ue_data_p->bearers[eps_bearer_id - GTPV1U_BEARER_OFFSET].tenb_ip6_addr = 0; + } + ue_context_p->ue_context.nb_x2u_e_rabs = 0; + } + } + /* in the target enb */ + else{ + for (erab_index = 0; erab_index < ue_context_p->ue_context.nb_x2u_e_rabs; erab_index++) { + //----------------------- + // GTPV1U->PDCP mapping + //----------------------- + hash_rc = hashtable_remove(RC.gtpv1u_data_g->teid_mapping, ue_context_p->ue_context.enb_gtp_x2u_teid[erab_index]); + LOG_I(GTPU, "Removed user rnti %x , enb X2U teid %u\n", req_pP->rnti, ue_context_p->ue_context.enb_gtp_x2u_teid[erab_index]); + + if (hash_rc != HASH_TABLE_OK) { + LOG_D(GTPU, "Removed user rnti %x , enb X2U teid %u not found\n", req_pP->rnti, ue_context_p->ue_context.enb_gtp_x2u_teid[erab_index]); + } + } + ue_context_p->ue_context.nb_x2u_e_rabs = 0; + } + } + return 0; +} + //----------------------------------------------------------------------------- int gtpv1u_create_s1u_tunnel( @@ -1043,6 +1376,176 @@ void *gtpv1u_eNB_process_itti_msg(void *notUsed) { } break; + case GTPV1U_ENB_DATA_FORWARDING_REQ: { + gtpv1u_enb_data_forwarding_req_t *data_req_p = NULL; + NwGtpv1uUlpApiT stack_req; + NwGtpv1uRcT rc = NW_GTPV1U_FAILURE; + hashtable_rc_t hash_rc = HASH_TABLE_KEY_NOT_EXISTS; + gtpv1u_ue_data_t *gtpv1u_ue_data_p = NULL; + teid_t enb_s1u_teid = 0; + teid_t tenb_x2u_teid = 0; + + VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_GTPV1U_PROCESS_TUNNEL_DATA_REQ, VCD_FUNCTION_IN); + data_req_p = >PV1U_ENB_DATA_FORWARDING_REQ(received_message_p); + //ipv4_send_data(ipv4_data_p->sd, data_ind_p->buffer, data_ind_p->length); + +#if defined(GTP_DUMP_SOCKET) && GTP_DUMP_SOCKET > 0 + gtpv1u_eNB_write_dump_socket(&data_req_p->buffer[data_req_p->offset],data_req_p->length); +#endif + memset(&stack_req, 0, sizeof(NwGtpv1uUlpApiT)); + + hash_rc = hashtable_get(RC.gtpv1u_data_g->ue_mapping, (uint64_t)data_req_p->rnti, (void**)>pv1u_ue_data_p); + + if (hash_rc == HASH_TABLE_KEY_NOT_EXISTS) { + LOG_E(GTPU, "nwGtpv1uProcessUlpReq failed: while getting ue rnti %x in hashtable ue_mapping\n", data_req_p->rnti); + } else { + if ((data_req_p->rab_id >= GTPV1U_BEARER_OFFSET) && (data_req_p->rab_id <= max_val_LTE_DRB_Identity)) { + enb_s1u_teid = gtpv1u_ue_data_p->bearers[data_req_p->rab_id - GTPV1U_BEARER_OFFSET].teid_eNB; + tenb_x2u_teid = gtpv1u_ue_data_p->bearers[data_req_p->rab_id - GTPV1U_BEARER_OFFSET].teid_teNB; // target enb teid + stack_req.apiType = NW_GTPV1U_ULP_API_SEND_TPDU; + stack_req.apiInfo.sendtoInfo.teid = tenb_x2u_teid; + stack_req.apiInfo.sendtoInfo.ipAddr = gtpv1u_ue_data_p->bearers[data_req_p->rab_id - GTPV1U_BEARER_OFFSET].tenb_ip_addr;// target enb ip + + rc = nwGtpv1uGpduMsgNew( + RC.gtpv1u_data_g->gtpv1u_stack, + tenb_x2u_teid, + NW_FALSE, + RC.gtpv1u_data_g->seq_num++, + data_req_p->buffer, + data_req_p->length, + data_req_p->offset, + &(stack_req.apiInfo.sendtoInfo.hMsg)); + + if (rc != NW_GTPV1U_OK) { + LOG_E(GTPU, "nwGtpv1uGpduMsgNew failed: 0x%x\n", rc); + MSC_LOG_EVENT(MSC_GTPU_ENB,"0 Failed send G-PDU ltid %u rtid %u size %u", + enb_s1u_teid,tenb_x2u_teid,data_req_p->length); + (void)enb_s1u_teid; /* avoid gcc warning "set but not used" */ + } else { + rc = nwGtpv1uProcessUlpReq(RC.gtpv1u_data_g->gtpv1u_stack, &stack_req); + + if (rc != NW_GTPV1U_OK) { + LOG_E(GTPU, "nwGtpv1uProcessUlpReq failed: 0x%x\n", rc); + MSC_LOG_EVENT(MSC_GTPU_ENB,"0 Failed send G-PDU ltid %u rtid %u size %u", + enb_s1u_teid,tenb_x2u_teid,data_req_p->length); + } else { + MSC_LOG_TX_MESSAGE( + MSC_GTPU_ENB, + MSC_GTPU_SGW, + NULL, + 0, + MSC_AS_TIME_FMT" G-PDU ltid %u rtid %u size %u", + 0,0, + enb_s1u_teid, + tenb_x2u_teid, + data_req_p->length); + + } + + rc = nwGtpv1uMsgDelete(RC.gtpv1u_data_g->gtpv1u_stack, + stack_req.apiInfo.sendtoInfo.hMsg); + + if (rc != NW_GTPV1U_OK) { + LOG_E(GTPU, "nwGtpv1uMsgDelete failed: 0x%x\n", rc); + } + } + } + } + + VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_GTPV1U_PROCESS_TUNNEL_DATA_REQ, VCD_FUNCTION_OUT); + /* Buffer still needed, do not free it */ + //itti_free(ITTI_MSG_ORIGIN_ID(received_message_p), data_req_p->buffer) + } + break; + + case GTPV1U_ENB_END_MARKER_REQ:{ + gtpv1u_enb_end_marker_req_t *data_req_p = NULL; + NwGtpv1uUlpApiT stack_req; + NwGtpv1uRcT rc = NW_GTPV1U_FAILURE; + hashtable_rc_t hash_rc = HASH_TABLE_KEY_NOT_EXISTS; + gtpv1u_ue_data_t *gtpv1u_ue_data_p = NULL; + teid_t enb_s1u_teid = 0; + teid_t tenb_x2u_teid = 0; + NwGtpv1uMsgT *pMsg = NULL; + + VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_GTPV1U_PROCESS_TUNNEL_DATA_REQ, VCD_FUNCTION_IN); + data_req_p = >PV1U_ENB_END_MARKER_REQ(received_message_p); + //ipv4_send_data(ipv4_data_p->sd, data_ind_p->buffer, data_ind_p->length); + +#if defined(GTP_DUMP_SOCKET) && GTP_DUMP_SOCKET > 0 + gtpv1u_eNB_write_dump_socket(&data_req_p->buffer[data_req_p->offset],data_req_p->length); +#endif + memset(&stack_req, 0, sizeof(NwGtpv1uUlpApiT)); + + hash_rc = hashtable_get(RC.gtpv1u_data_g->ue_mapping, (uint64_t)data_req_p->rnti, (void**)>pv1u_ue_data_p); + + if (hash_rc == HASH_TABLE_KEY_NOT_EXISTS) { + LOG_E(GTPU, "nwGtpv1uProcessUlpReq failed: while getting ue rnti %x in hashtable ue_mapping\n", data_req_p->rnti); + } else { + if ((data_req_p->rab_id >= GTPV1U_BEARER_OFFSET) && (data_req_p->rab_id <= max_val_LTE_DRB_Identity)) { + enb_s1u_teid = gtpv1u_ue_data_p->bearers[data_req_p->rab_id - GTPV1U_BEARER_OFFSET].teid_eNB; + tenb_x2u_teid = gtpv1u_ue_data_p->bearers[data_req_p->rab_id - GTPV1U_BEARER_OFFSET].teid_teNB; // target enb teid + stack_req.apiType = NW_GTPV1U_ULP_API_SEND_TPDU; + stack_req.apiInfo.sendtoInfo.teid = tenb_x2u_teid; + stack_req.apiInfo.sendtoInfo.ipAddr = gtpv1u_ue_data_p->bearers[data_req_p->rab_id - GTPV1U_BEARER_OFFSET].tenb_ip_addr;// target enb ip + + rc = nwGtpv1uGpduMsgNew( + RC.gtpv1u_data_g->gtpv1u_stack, + tenb_x2u_teid, + NW_FALSE, + RC.gtpv1u_data_g->seq_num++, + data_req_p->buffer, + data_req_p->length, + data_req_p->offset, + &(stack_req.apiInfo.sendtoInfo.hMsg)); + + if (rc != NW_GTPV1U_OK) { + LOG_E(GTPU, "nwGtpv1uGpduMsgNew failed: 0x%x\n", rc); + MSC_LOG_EVENT(MSC_GTPU_ENB,"0 Failed send G-PDU ltid %u rtid %u size %u", + enb_s1u_teid,tenb_x2u_teid,data_req_p->length); + (void)enb_s1u_teid; /* avoid gcc warning "set but not used" */ + } else { + pMsg = (NwGtpv1uMsgT *) stack_req.apiInfo.sendtoInfo.hMsg; + pMsg->msgType = NW_GTP_END_MARKER; + rc = nwGtpv1uProcessUlpReq(RC.gtpv1u_data_g->gtpv1u_stack, &stack_req); + + if (rc != NW_GTPV1U_OK) { + LOG_E(GTPU, "nwGtpv1uProcessUlpReq failed: 0x%x\n", rc); + MSC_LOG_EVENT(MSC_GTPU_ENB,"0 Failed send G-PDU ltid %u rtid %u size %u", + enb_s1u_teid,tenb_x2u_teid,data_req_p->length); + } else { + MSC_LOG_TX_MESSAGE( + MSC_GTPU_ENB, + MSC_GTPU_SGW, + NULL, + 0, + MSC_AS_TIME_FMT" G-PDU ltid %u rtid %u size %u", + 0,0, + enb_s1u_teid, + tenb_x2u_teid, + data_req_p->length); + + } + + rc = nwGtpv1uMsgDelete(RC.gtpv1u_data_g->gtpv1u_stack, + stack_req.apiInfo.sendtoInfo.hMsg); + + if (rc != NW_GTPV1U_OK) { + LOG_E(GTPU, "nwGtpv1uMsgDelete failed: 0x%x\n", rc); + } + + gtpv1u_enb_delete_tunnel_req_t delete_tunnel_req; + memset(&delete_tunnel_req, 0 , sizeof(delete_tunnel_req)); + delete_tunnel_req.rnti = data_req_p->rnti; + gtpv1u_delete_x2u_tunnel(instance, &delete_tunnel_req, GTPV1U_SOURCE_ENB); + } + } + } + + VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_GTPV1U_PROCESS_TUNNEL_DATA_REQ, VCD_FUNCTION_OUT); + } + break; + case TERMINATE_MESSAGE: { if (RC.gtpv1u_data_g->ue_mapping != NULL) { hashtable_destroy (&(RC.gtpv1u_data_g->ue_mapping)); diff --git a/openair3/GTPV1-U/gtpv1u_eNB_defs.h b/openair3/GTPV1-U/gtpv1u_eNB_defs.h index 978a3c58bc0..79d9a1dcdd5 100644 --- a/openair3/GTPV1-U/gtpv1u_eNB_defs.h +++ b/openair3/GTPV1-U/gtpv1u_eNB_defs.h @@ -40,6 +40,11 @@ #define GTPV1U_MAX_BEARERS_ID (max_val_LTE_DRB_Identity - GTPV1U_BEARER_OFFSET) +#define GTPV1U_SOURCE_ENB (0) +#define GTPV1U_TARGET_ENB (1) +#define GTPV1U_MSG_FROM_SOURCE_ENB (0) +#define GTPV1U_MSG_FROM_SPGW (1) + typedef enum { BEARER_DOWN = 0, BEARER_IN_CONFIG, @@ -65,6 +70,9 @@ typedef struct gtpv1u_bearer_s { teid_t teid_sgw; ///< Remote TEID in_addr_t sgw_ip_addr; struct in6_addr sgw_ip6_addr; + teid_t teid_teNB; + in_addr_t tenb_ip_addr; ///< target eNB ipv4 + struct in6_addr tenb_ip6_addr; ///< target eNB ipv6 tcp_udp_port_t port; //NwGtpv1uStackSessionHandleT stack_session; bearer_state_t state; diff --git a/openair3/GTPV1-U/gtpv1u_eNB_task.h b/openair3/GTPV1-U/gtpv1u_eNB_task.h index 9830ea61699..a6e26259415 100644 --- a/openair3/GTPV1-U/gtpv1u_eNB_task.h +++ b/openair3/GTPV1-U/gtpv1u_eNB_task.h @@ -44,6 +44,12 @@ int gtpv1u_eNB_init(void); void *gtpv1u_eNB_process_itti_msg(void*); void *gtpv1u_eNB_task(void *args); +int +gtpv1u_create_x2u_tunnel( + const instance_t instanceP, + const gtpv1u_enb_create_x2u_tunnel_req_t * const create_tunnel_req_pP, + gtpv1u_enb_create_x2u_tunnel_resp_t * const create_tunnel_resp_pP); + int gtpv1u_create_s1u_tunnel( const instance_t instanceP, @@ -55,4 +61,9 @@ gtpv1u_update_s1u_tunnel( const instance_t instanceP, const gtpv1u_enb_create_tunnel_req_t * const create_tunnel_req_pP, const rnti_t prior_rnti); + +int gtpv1u_delete_x2u_tunnel( + const instance_t instanceP, + const gtpv1u_enb_delete_tunnel_req_t * const req_pP, + int enbflag); #endif /* GTPV1U_ENB_TASK_H_ */ diff --git a/openair3/GTPV1-U/nw-gtpv1u/src/NwGtpv1u.c b/openair3/GTPV1-U/nw-gtpv1u/src/NwGtpv1u.c index 3e8da5d015f..a6e0224e775 100644 --- a/openair3/GTPV1-U/nw-gtpv1u/src/NwGtpv1u.c +++ b/openair3/GTPV1-U/nw-gtpv1u/src/NwGtpv1u.c @@ -893,8 +893,14 @@ nwGtpv1uProcessUdpReq( NW_IN NwGtpv1uStackHandleT hGtpuStackHandle, break; case NW_GTP_END_MARKER: - GTPU_DEBUG("NW_GTP_END_MARKER\n"); - ret = NW_GTPV1U_OK; +#if defined(LOG_GTPU) && LOG_GTPU > 0 + for(int i =1; i<= udpDataLen; i++){ + printf("%02x ", udpData[i-1]); + if(i % 20 == 0)printf("\n"); + } +#endif + GTPU_INFO("NW_GTP_END_MARKER\n"); + ret = nwGtpv1uProcessGpdu(thiz, udpData, udpDataLen, peerIp); break; default: diff --git a/openair3/S1AP/s1ap_eNB_handlers.c b/openair3/S1AP/s1ap_eNB_handlers.c index e2396d015f6..e773ae6a207 100644 --- a/openair3/S1AP/s1ap_eNB_handlers.c +++ b/openair3/S1AP/s1ap_eNB_handlers.c @@ -1518,7 +1518,7 @@ int s1ap_eNB_handle_s1_path_switch_request_ack(uint32_t assoc_id, if (stream == 0) { S1AP_ERROR("[SCTP %d] Received s1 path switch request ack on stream (%d)\n", assoc_id, stream); - return -1; + //return -1; } if ((mme_desc_p = s1ap_eNB_get_MME(NULL, assoc_id, 0)) == NULL) { diff --git a/openair3/UDP/udp_eNB_task.c b/openair3/UDP/udp_eNB_task.c index 27f754badc2..90ee87900df 100644 --- a/openair3/UDP/udp_eNB_task.c +++ b/openair3/UDP/udp_eNB_task.c @@ -371,13 +371,13 @@ void *udp_eNB_task(void *args_p) udp_sd = udp_sock_p->sd; pthread_mutex_unlock(&udp_socket_list_mutex); -#if defined(LOG_UDP) && LOG_UDP > 0 +//#if defined(LOG_UDP) && LOG_UDP > 0 LOG_D(UDP_, "[%d] Sending message of size %u to "IPV4_ADDR" and port %u\n", udp_sd, udp_data_req_p->buffer_length, IPV4_ADDR_FORMAT(udp_data_req_p->peer_address), udp_data_req_p->peer_port); -#endif +//#endif bytes_written = sendto( udp_sd, &udp_data_req_p->buffer[udp_data_req_p->buffer_offset], -- GitLab