gtpv1u_eNB.c 39.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The OpenAirInterface Software Alliance licenses this file to You under
 * the OAI Public License, Version 1.0  (the "License"); you may not use this file
 * except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.openairinterface.org/?page_id=698
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 *-------------------------------------------------------------------------------
 * For more information about the OpenAirInterface (OAI) Software Alliance:
 *      contact@openairinterface.org
 */

22 23
/*! \file gtpv1u_eNB.c
 * \brief
24
 * \author Sebastien ROUX, Lionel GAUTHIER, Navid Nikaein
25 26 27 28
 * \version 1.0
 * \company Eurecom
 * \email: lionel.gauthier@eurecom.fr
 */
29 30 31
#include <stdio.h>
#include <errno.h>

32 33
#include "mme_config.h"

34
#include "assertions.h"
35 36
#include "intertask_interface.h"
#include "timer.h"
gauthier's avatar
gauthier committed
37
#include "msc.h"
38 39

#include "gtpv1u.h"
40 41
#include "NwGtpv1u.h"
#include "NwGtpv1uMsg.h"
42
#include "NwGtpv1uPrivate.h"
43 44
#include "NwLog.h"
#include "gtpv1u_eNB_defs.h"
45
#include "gtpv1_u_messages_types.h"
gauthier's avatar
gauthier committed
46
#include "udp_eNB_task.h"
47 48
#include "UTIL/LOG/log.h"
#include "COMMON/platform_types.h"
49
#include "COMMON/platform_constants.h"
50
#include "UTIL/LOG/vcd_signal_dumper.h"
51 52


53
#undef GTP_DUMP_SOCKET
54

55
extern boolean_t pdcp_data_req(
56 57 58 59 60 61 62 63
  const protocol_ctxt_t* const  ctxt_pP,
  const srb_flag_t     srb_flagP,
  const rb_id_t        rb_idP,
  const mui_t          muiP,
  const confirm_t      confirmP,
  const sdu_size_t     sdu_buffer_sizeP,
  unsigned char *const sdu_buffer_pP,
  const pdcp_transmission_mode_t modeP);
gauthier's avatar
gauthier committed
64

65
extern unsigned char NB_eNB_INST;
gauthier's avatar
gauthier committed
66 67

static int
68 69
gtpv1u_eNB_send_init_udp(
  uint16_t port_number);
gauthier's avatar
gauthier committed
70 71

NwGtpv1uRcT
72 73
gtpv1u_eNB_log_request(
  NwGtpv1uLogMgrHandleT   hLogMgr,
74
  uint32_t                  logLevel,
75
  NwCharT                *file,
76
  uint32_t                  line,
77
  NwCharT                *logStr);
gauthier's avatar
gauthier committed
78 79

NwGtpv1uRcT
80 81
gtpv1u_eNB_send_udp_msg(
  NwGtpv1uUdpHandleT      udpHandle,
82 83 84 85 86
  uint8_t                  *buffer,
  uint32_t                  buffer_len,
  uint32_t                  buffer_offset,
  uint32_t                  peerIpAddr,
  uint16_t                  peerPort);
gauthier's avatar
gauthier committed
87 88

NwGtpv1uRcT
89 90 91
gtpv1u_eNB_process_stack_req(
  NwGtpv1uUlpHandleT hUlp,
  NwGtpv1uUlpApiT   *pUlpApi);
gauthier's avatar
gauthier committed
92 93

int
94 95 96 97 98 99
data_recv_callback(
  uint16_t  portP,
  uint32_t  address,
  uint8_t  *buffer,
  uint32_t  length,
  void     *arg_p);
100 101 102 103 104 105 106
//int
//gtpv1u_create_tunnel_endpoint(
//    gtpv1u_data_t *gtpv1u_data_pP,
//    uint8_t        ue_idP,
//    uint8_t        rab_idP,
//    char          *sgw_ip_addr_pP,
//    uint16_t       portP);
gauthier's avatar
gauthier committed
107
static NwGtpv1uRcT
108 109
gtpv1u_start_timer_wrapper(
  NwGtpv1uTimerMgrHandleT tmrMgrHandle,
110 111 112
  uint32_t                  timeoutSec,
  uint32_t                  timeoutUsec,
  uint32_t                  tmrType,
113 114
  void                   *timeoutArg,
  NwGtpv1uTimerHandleT   *hTmr);
gauthier's avatar
gauthier committed
115 116

static NwGtpv1uRcT
117 118 119
gtpv1u_stop_timer_wrapper(
  NwGtpv1uTimerMgrHandleT     tmrMgrHandle,
  NwGtpv1uTimerHandleT         hTmr);
gauthier's avatar
gauthier committed
120

121 122
int
gtpv1u_initial_req(
123 124 125 126
  gtpv1u_data_t *gtpv1u_data_pP,
  teid_t         teidP,
  tcp_udp_port_t portP,
  uint32_t       address);
gauthier's avatar
gauthier committed
127

128 129
int
gtpv1u_new_data_req(
130 131
  uint8_t  enb_module_idP,
  rnti_t   ue_rntiP,
132 133 134
  uint8_t  rab_idP,
  uint8_t *buffer_pP,
  uint32_t buf_lenP,
135 136
  uint32_t buf_offsetP
);
gauthier's avatar
gauthier committed
137

138
int
139
gtpv1u_create_s1u_tunnel(
140
  const instance_t instanceP,
141 142
  const gtpv1u_enb_create_tunnel_req_t *  const create_tunnel_req_pP,
        gtpv1u_enb_create_tunnel_resp_t * const create_tunnel_resp_pP);
gauthier's avatar
gauthier committed
143 144

static int
145 146 147
gtpv1u_delete_s1u_tunnel(
  const instance_t instanceP,
  const gtpv1u_enb_delete_tunnel_req_t * const req_pP);
gauthier's avatar
gauthier committed
148 149

static int
150
gtpv1u_eNB_init(void);
gauthier's avatar
gauthier committed
151 152

void *
153
gtpv1u_eNB_task(void *args);
gauthier's avatar
gauthier committed
154

155 156
static gtpv1u_data_t gtpv1u_data_g;

157
#if defined(GTP_DUMP_SOCKET) && GTP_DUMP_SOCKET > 0
158 159 160 161
#include <linux/if.h>
static int           gtpv1u_dump_socket_g;

//-----------------------------------------------------------------------------
162 163
int gtpv1u_eNB_create_dump_socket(void)
{
164 165 166 167
  struct ifreq ifr;
  int          hdrincl=1;

  if ((gtpv1u_dump_socket_g = socket(AF_INET, SOCK_RAW, IPPROTO_RAW)) < 0 ) {
168 169
    LOG_E(GTPU, "Could not create dump socket %d:%s\n", errno, strerror(errno));
    return -1;
170 171 172
  }

  if (setsockopt(gtpv1u_dump_socket_g,
173 174 175 176 177
                 IPPROTO_IP,
                 IP_HDRINCL,
                 &hdrincl,
                 sizeof(hdrincl))==-1) {
    LOG_E(GTPU, "%s:%d set IP_HDRINCL %d:%s\n",
178 179 180 181 182
          __FILE__, __LINE__, errno, strerror(errno));
  }

  memset(&ifr, 0, sizeof(ifr));
  snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "lo");
183 184 185

  if (setsockopt(gtpv1u_dump_socket_g, SOL_SOCKET, SO_BINDTODEVICE, (void *)&ifr, sizeof(ifr)) < 0) {
    LOG_E(GTPU, "%s:%d  setsockopt SO_BINDTODEVICE %d:%s\n",
186
          __FILE__, __LINE__, errno, strerror(errno));
187 188
    close(gtpv1u_dump_socket_g);
    return -1;
189 190 191 192
  }
}

//-----------------------------------------------------------------------------
193 194
static void gtpv1u_eNB_write_dump_socket(uint8_t *buffer_pP, uint32_t buffer_lengthP)
{
195 196 197 198 199 200
  struct sockaddr_in sin;
  memset(&sin, 0, sizeof(sin));
  sin.sin_family = AF_INET;
  sin.sin_addr.s_addr = INADDR_LOOPBACK;

  if (sendto(gtpv1u_dump_socket_g, buffer_pP, (size_t)buffer_lengthP, 0, (struct sockaddr *)&sin, sizeof(struct sockaddr)) < 0)  {
201
    LOG_E(GTPU, "%s:%s:%d  sendto %d:%s\n",
202 203 204 205 206 207 208
          __FILE__, __FUNCTION__, __LINE__, errno, strerror(errno));
  }
}

#endif

//-----------------------------------------------------------------------------
209 210
static int gtpv1u_eNB_send_init_udp(uint16_t port_number)
{
211 212 213
  // Create and alloc new message
  MessageDef *message_p;
  struct in_addr addr;
214

215 216 217 218 219
  message_p = itti_alloc_new_message(TASK_GTPV1_U, UDP_INIT);

  if (message_p == NULL) {
    return -1;
  }
220

221 222
  UDP_INIT(message_p).port = port_number;
  //LG UDP_INIT(message_p).address = "0.0.0.0"; //ANY address
223

224 225 226
  addr.s_addr = gtpv1u_data_g.enb_ip_address_for_S1u_S12_S4_up;
  UDP_INIT(message_p).address = inet_ntoa(addr);
  LOG_D(GTPU, "Tx UDP_INIT IP addr %s\n", UDP_INIT(message_p).address);
227

228 229 230 231 232
  MSC_LOG_EVENT(
	  MSC_GTPU_ENB,
	  "0 UDP bind  %s:%u",
	  UDP_INIT(message_p).address,
	  UDP_INIT(message_p).port);
233
  return itti_send_msg_to_task(TASK_UDP, INSTANCE_DEFAULT, message_p);
234 235
}

236
//-----------------------------------------------------------------------------
237
NwGtpv1uRcT gtpv1u_eNB_log_request(NwGtpv1uLogMgrHandleT hLogMgr,
238
                                   uint32_t logLevel,
239
                                   NwCharT *file,
240
                                   uint32_t line,
241
                                   NwCharT *logStr)
242
{
243 244
  LOG_D(GTPU, "%s\n", logStr);
  return NW_GTPV1U_OK;
245 246
}

247
//-----------------------------------------------------------------------------
248
NwGtpv1uRcT gtpv1u_eNB_send_udp_msg(
249
  NwGtpv1uUdpHandleT udpHandle,
250 251 252 253 254
  uint8_t *buffer,
  uint32_t buffer_len,
  uint32_t buffer_offset,
  uint32_t peerIpAddr,
  uint16_t peerPort)
255
{
256 257 258
  // Create and alloc new message
  MessageDef     *message_p       = NULL;
  udp_data_req_t *udp_data_req_p  = NULL;
259

260
  message_p = itti_alloc_new_message(TASK_GTPV1_U, UDP_DATA_REQ);
261

262
  if (message_p) {
263
#if defined(LOG_GTPU) && LOG_GTPU > 0
264
    LOG_D(GTPU, "Sending UDP_DATA_REQ length %u offset %u", buffer_len, buffer_offset);
265
#endif
266 267 268 269 270 271 272 273 274 275
    udp_data_req_p = &message_p->ittiMsg.udp_data_req;
    udp_data_req_p->peer_address  = peerIpAddr;
    udp_data_req_p->peer_port     = peerPort;
    udp_data_req_p->buffer        = buffer;
    udp_data_req_p->buffer_length = buffer_len;
    udp_data_req_p->buffer_offset = buffer_offset;
    return itti_send_msg_to_task(TASK_UDP, INSTANCE_DEFAULT, message_p);
  } else {
    return NW_GTPV1U_FAILURE;
  }
276 277 278
}


279
//-----------------------------------------------------------------------------
280
/* Callback called when a gtpv1u message arrived on UDP interface */
281
NwGtpv1uRcT gtpv1u_eNB_process_stack_req(
282 283
  NwGtpv1uUlpHandleT hUlp,
  NwGtpv1uUlpApiT   *pUlpApi)
284
{
285
  boolean_t           result             = FALSE;
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
  teid_t              teid               = 0;
  hashtable_rc_t      hash_rc            = HASH_TABLE_KEY_NOT_EXISTS;
  gtpv1u_teid_data_t *gtpv1u_teid_data_p = NULL;
  protocol_ctxt_t     ctxt;

  switch(pUlpApi->apiType) {
    /* Here there are two type of messages handled:
     * - T-PDU
     * - END-MARKER
     */
  case NW_GTPV1U_ULP_API_RECV_TPDU: {
    uint8_t              buffer[4096];
    uint32_t             buffer_len;
    /* Nw-gptv1u stack has processed a PDU. we can schedule it to PDCP
     * for transmission.
     */
    teid = pUlpApi->apiInfo.recvMsgInfo.teid;

    if (NW_GTPV1U_OK != nwGtpv1uMsgGetTpdu(pUlpApi->apiInfo.recvMsgInfo.hMsg,
                                           buffer, &buffer_len)) {
      LOG_E(GTPU, "Error while retrieving T-PDU");
    }

    itti_free(TASK_UDP, ((NwGtpv1uMsgT*)pUlpApi->apiInfo.recvMsgInfo.hMsg)->msgBuf);
310
#if defined(GTP_DUMP_SOCKET) && GTP_DUMP_SOCKET > 0
311
    gtpv1u_eNB_write_dump_socket(buffer,buffer_len);
312
#endif
313

314 315 316 317 318 319
    //-----------------------
    // GTPV1U->PDCP mapping
    //-----------------------
    hash_rc = hashtable_get(gtpv1u_data_g.teid_mapping, teid, (void**)&gtpv1u_teid_data_p);

    if (hash_rc == HASH_TABLE_OK) {
320
#if defined(LOG_GTPU) && LOG_GTPU > 0
321 322 323 324 325 326
      LOG_D(GTPU, "Received T-PDU from gtpv1u stack teid  %u size %d -> enb module id %u ue module id %u rab id %u\n",
            teid,
            buffer_len,
            gtpv1u_teid_data_p->enb_id,
            gtpv1u_teid_data_p->ue_id,
            gtpv1u_teid_data_p->eps_bearer_id);
327
#endif
328

329
//#warning "LG eps bearer mapping to DRB id to do (offset -4)"
330
      PROTOCOL_CTXT_SET_BY_MODULE_ID(&ctxt, gtpv1u_teid_data_p->enb_id, ENB_FLAG_YES,  gtpv1u_teid_data_p->ue_id, 0, 0,gtpv1u_teid_data_p->enb_id);
nikaeinn's avatar
nikaeinn committed
331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
      MSC_LOG_TX_MESSAGE(
			 MSC_GTPU_ENB,
			 MSC_PDCP_ENB,
			 NULL,0,
			 MSC_AS_TIME_FMT" DATA-REQ rb %u size %u",
			 0,0,
			 (gtpv1u_teid_data_p->eps_bearer_id) ? gtpv1u_teid_data_p->eps_bearer_id - 4: 5-4,
			 buffer_len);
      
      result = pdcp_data_req(
			     &ctxt,
			     SRB_FLAG_NO,
			     (gtpv1u_teid_data_p->eps_bearer_id) ? gtpv1u_teid_data_p->eps_bearer_id - 4: 5-4,
			     0, // mui
			     SDU_CONFIRM_NO, // confirm
			     buffer_len,
			     buffer,
			     PDCP_TRANSMISSION_MODE_DATA);
      
350 351 352 353 354 355 356 357 358 359 360
      
      if ( result == FALSE ) {
	
	if (ctxt.configured == FALSE )
	  LOG_W(GTPU, "PDCP data request failed, cause: RB is not configured!\n") ;
	else  
	  LOG_W(GTPU, "PDCP data request failed\n");
	
	return NW_GTPV1U_FAILURE;
      }
      
361
    } else {
362
      LOG_W(GTPU, "Received T-PDU from gtpv1u stack teid %u unknown size %u", teid, buffer_len);
363
    }
364
  }
365 366
    break;
    
367 368 369 370
  default: {
    LOG_E(GTPU, "Received undefined UlpApi (%02x) from gtpv1u stack!\n",
          pUlpApi->apiType);
  }
371 372 373
  
  } // end of switch 
  
374
  return NW_GTPV1U_OK;
375 376 377
}


378
//-----------------------------------------------------------------------------
379
int data_recv_callback(uint16_t  portP,
380 381 382 383 384
                       uint32_t  address,
                       uint8_t  *buffer,
                       uint32_t  length,
                       void     *arg_p)
{
385
  gtpv1u_data_t        *gtpv1u_data_p;
386

387 388 389
  if (arg_p == NULL) {
    return -1;
  }
390

391
  gtpv1u_data_p = (gtpv1u_data_t *)arg_p;
392

393 394 395 396 397
  return nwGtpv1uProcessUdpReq(gtpv1u_data_p->gtpv1u_stack,
                               buffer,
                               length,
                               portP,
                               address);
398 399
}

400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493
//int
//gtpv1u_create_tunnel_endpoint(
//    gtpv1u_data_t *gtpv1u_data_pP,
//    uint8_t        ue_idP,
//    uint8_t        rab_idP,
//    char          *sgw_ip_addr_pP,
//    uint16_t       portP)
//{
//    uint32_t                     teid;
//    uint8_t                      max_attempt = 100;
//    NwGtpv1uRcT                  rc          = NW_GTPV1U_FAILURE;
//    NwGtpv1uUlpApiT              ulp_req;
//    struct gtpv1u_ue_data_s     *new_ue_p    = NULL;
//    struct gtpv1u_bearer_s      *bearer_p    = NULL;
//    hashtable_rc_t               hash_rc     = HASH_TABLE_KEY_NOT_EXISTS;;
//
//    if (rab_idP > GTPV1U_MAX_BEARERS_PER_UE) {
//        LOG_E(GTPU, "Could not use rab_id %d > max %d\n",
//              rab_idP, GTPV1U_MAX_BEARERS_PER_UE);
//        return -1;
//    }
//
//
//    if ((hash_rc = hashtable_get(gtpv1u_data_pP->ue_mapping, (uint64_t)ue_idP, (void**)&new_ue_p)) == HASH_TABLE_OK) {
//        /* A context for this UE already exist in the tree, use it */
//        /* We check that the tunnel is not already configured */
//        if (new_ue_p->bearers[rab_idP].state != BEARER_DOWN) {
//            LOG_E(GTPU, "Cannot create new end-point over already existing tunnel\n");
//            return -1;
//        }
//    } else {
//        /* Context doesn't exist, create it */
//        if (rab_idP != 0) {
//            /* UE should first establish Default bearer before trying to setup
//             * additional bearers.
//             */
//            LOG_E(GTPU, "UE context is not known and rab_id != 0\n");
//            return -1;
//        }
//        new_ue_p = calloc(1, sizeof(struct gtpv1u_ue_data_s));
//        new_ue_p->ue_id = ue_idP;
//
//        hash_rc = hashtable_insert(gtpv1u_data_pP->ue_mapping, (uint64_t)ue_idP, new_ue_p);
//
//        if ((hash_rc != HASH_TABLE_OK) && (hash_rc != HASH_TABLE_INSERT_OVERWRITTEN_DATA)) {
//            LOG_E(GTPU, "Failed to insert new UE context\n");
//            free(new_ue_p);
//            return -1;
//        }
//    }
//
//    bearer_p = &new_ue_p->bearers[rab_idP];
//
//    /* Configure the bearer */
//    bearer_p->state       = BEARER_IN_CONFIG;
//    bearer_p->sgw_ip_addr = inet_addr(sgw_ip_addr_pP);
//    bearer_p->port        = portP;
//
//    /* Create the new stack api request */
//    memset(&ulp_req, 0, sizeof(NwGtpv1uUlpApiT));
//    ulp_req.apiType = NW_GTPV1U_ULP_API_CREATE_TUNNEL_ENDPOINT;
//
//    /* Try to create new tunnel-endpoint.
//     * If teid generated is already present in the stack, just peek another random
//     * teid. This could be ok for small number of tunnel but more errors could be
//     * thrown if we reached high number of tunnels.
//     * TODO: find a solution for teid
//     */
//    do {
//        /* Request for a new random TEID */
//        teid = gtpv1u_new_teid();
//        ulp_req.apiInfo.createTunnelEndPointInfo.teid = teid;
//
//        rc = nwGtpv1uProcessUlpReq(gtpv1u_data_pP->gtpv1u_stack, &ulp_req);
//
//        if (rc == NW_GTPV1U_OK) {
////             LOG_D(GTPU, "Successfully created new tunnel endpoint for teid 0x%x\n",
////                   teid);
//            bearer_p->teid_eNB = teid;
////             gtpv1u_initial_req(gtpv1u_data_pP, teid, GTPV1U_UDP_PORT,
////                                inet_addr("192.168.56.101"));
//            LOG_I(GTPU, "Created eNB tunnel endpoint %u for ue id %u, rab id %u\n", teid, ue_idP, rab_idP);
//            return 0;
//        } else {
//            LOG_W(GTPU, "Teid %u already in use... %s\n",
//                  teid, (max_attempt > 1) ? "Trying another one" : "Last chance");
//        }
//    } while(max_attempt-- && rc != NW_GTPV1U_OK);
//
//    bearer_p->state = BEARER_DOWN;
//    LOG_I(GTPU, "Failed to created eNB tunnel endpoint %u for ue id %u, rab id %u, bearer down\n", teid, ue_idP, rab_idP);
//
//    return -1;
//}
494 495


496
//-----------------------------------------------------------------------------
497
static NwGtpv1uRcT gtpv1u_start_timer_wrapper(
498
  NwGtpv1uTimerMgrHandleT tmrMgrHandle,
499 500 501
  uint32_t                  timeoutSec,
  uint32_t                  timeoutUsec,
  uint32_t                  tmrType,
502 503
  void                   *timeoutArg,
  NwGtpv1uTimerHandleT   *hTmr)
504
{
505

506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
  NwGtpv1uRcT rc = NW_GTPV1U_OK;
  long        timer_id;

  if (tmrType == NW_GTPV1U_TMR_TYPE_ONE_SHOT) {
    timer_setup(timeoutSec,
                timeoutUsec,
                TASK_GTPV1_U,
                INSTANCE_DEFAULT,
                TIMER_ONE_SHOT,
                timeoutArg,
                &timer_id);
  } else {
    timer_setup(timeoutSec,
                timeoutUsec,
                TASK_GTPV1_U,
                INSTANCE_DEFAULT,
                TIMER_PERIODIC,
                timeoutArg,
                &timer_id);
  }
526

527
  return rc;
528
}
529 530


531 532 533
//-----------------------------------------------------------------------------
static NwGtpv1uRcT
gtpv1u_stop_timer_wrapper(
534 535
  NwGtpv1uTimerMgrHandleT tmrMgrHandle,
  NwGtpv1uTimerHandleT hTmr)
536
{
537

538
  NwGtpv1uRcT rc = NW_GTPV1U_OK;
539

540
  return rc;
541 542
}

543

544
//-----------------------------------------------------------------------------
545 546
int
gtpv1u_initial_req(
547 548 549 550
  gtpv1u_data_t *gtpv1u_data_pP,
  teid_t         teidP,
  tcp_udp_port_t portP,
  uint32_t       address)
551
{
552 553
  NwGtpv1uUlpApiT ulp_req;
  NwGtpv1uRcT     rc = NW_GTPV1U_FAILURE;
554

555
  memset(&ulp_req, 0, sizeof(NwGtpv1uUlpApiT));
556

557 558 559 560
  ulp_req.apiType = NW_GTPV1U_ULP_API_INITIAL_REQ;
  ulp_req.apiInfo.initialReqInfo.teid     = teidP;
  ulp_req.apiInfo.initialReqInfo.peerPort = portP;
  ulp_req.apiInfo.initialReqInfo.peerIp   = address;
561

562 563 564 565 566 567 568 569 570
  rc = nwGtpv1uProcessUlpReq(gtpv1u_data_pP->gtpv1u_stack, &ulp_req);

  if (rc == NW_GTPV1U_OK) {
    LOG_D(GTPU, "Successfully sent initial req for teid %u\n", teidP);
  } else {
    LOG_W(GTPU, "Could not send initial req for teid %u\n", teidP);
  }

  return (rc == NW_GTPV1U_OK) ? 0 : -1;
571 572
}

573
//-----------------------------------------------------------------------------
574 575
int
gtpv1u_new_data_req(
576 577
  uint8_t  enb_module_idP,
  rnti_t   ue_rntiP,
578 579 580 581 582
  uint8_t  rab_idP,
  uint8_t *buffer_pP,
  uint32_t buf_lenP,
  uint32_t buf_offsetP
)
583
{
584

585 586 587 588 589 590 591
  NwGtpv1uUlpApiT          stack_req;
  NwGtpv1uRcT              rc            = NW_GTPV1U_FAILURE;
  struct gtpv1u_ue_data_s  ue;
  struct gtpv1u_ue_data_s *ue_inst_p     = NULL;
  struct gtpv1u_bearer_s  *bearer_p      = NULL;
  hashtable_rc_t           hash_rc       = HASH_TABLE_KEY_NOT_EXISTS;;
  gtpv1u_data_t           *gtpv1u_data_p = NULL;
592

593
  memset(&ue, 0, sizeof(struct gtpv1u_ue_data_s));
594
  ue.ue_id = ue_rntiP;
595

596
  AssertFatal(enb_module_idP >=0, "Bad parameter enb module id %u\n", enb_module_idP);
597 598
  AssertFatal((rab_idP - GTPV1U_BEARER_OFFSET)< GTPV1U_MAX_BEARERS_ID, "Bad parameter rab id %u\n", rab_idP);
  AssertFatal((rab_idP - GTPV1U_BEARER_OFFSET) >= 0 , "Bad parameter rab id %u\n", rab_idP);
599

600 601
  gtpv1u_data_p = &gtpv1u_data_g;
  /* Check that UE context is present in ue map. */
602
  hash_rc = hashtable_get(gtpv1u_data_p->ue_mapping, (uint64_t)ue_rntiP, (void**)&ue_inst_p);
603

604
  if (hash_rc ==  HASH_TABLE_KEY_NOT_EXISTS ) {
605
    LOG_E(GTPU, "[UE %d] Trying to send data on non-existing UE context\n", ue_rntiP);
606 607
    return -1;
  }
608

609
  bearer_p = &ue_inst_p->bearers[rab_idP - GTPV1U_BEARER_OFFSET];
610

611 612 613 614 615 616 617
  /* Ensure the bearer in ready.
   * TODO: handle the cases where the bearer is in HANDOVER state.
   * In such case packets should be placed in FIFO.
   */
  if (bearer_p->state != BEARER_UP) {
    LOG_W(GTPU, "Trying to send data over bearer with state(%u) != BEARER_UP\n",
          bearer_p->state);
618
//#warning  LG: HACK WHILE WAITING FOR NAS, normally return -1
619

620 621 622
    if (bearer_p->state != BEARER_IN_CONFIG)
      return -1;
  }
623

624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662
  memset(&stack_req, 0, sizeof(NwGtpv1uUlpApiT));

  stack_req.apiType                   = NW_GTPV1U_ULP_API_SEND_TPDU;
  stack_req.apiInfo.sendtoInfo.teid   = bearer_p->teid_sgw;
  stack_req.apiInfo.sendtoInfo.ipAddr = bearer_p->sgw_ip_addr;

  LOG_D(GTPU, "TX TO TEID %u addr 0x%x\n",bearer_p->teid_sgw, bearer_p->sgw_ip_addr);
  rc = nwGtpv1uGpduMsgNew(gtpv1u_data_p->gtpv1u_stack,
                          bearer_p->teid_sgw,
                          NW_FALSE,
                          gtpv1u_data_p->seq_num++,
                          buffer_pP,
                          buf_lenP,
                          buf_offsetP,
                          &(stack_req.apiInfo.sendtoInfo.hMsg));

  if (rc != NW_GTPV1U_OK) {
    LOG_E(GTPU, "nwGtpv1uGpduMsgNew failed: 0x%x\n", rc);
    return -1;
  }

  rc = nwGtpv1uProcessUlpReq(gtpv1u_data_p->gtpv1u_stack,
                             &stack_req);

  if (rc != NW_GTPV1U_OK) {
    LOG_E(GTPU, "nwGtpv1uProcessUlpReq failed: 0x%x\n", rc);
    return -1;
  }

  rc = nwGtpv1uMsgDelete(gtpv1u_data_p->gtpv1u_stack,
                         stack_req.apiInfo.sendtoInfo.hMsg);

  if (rc != NW_GTPV1U_OK) {
    LOG_E(GTPU, "nwGtpv1uMsgDelete failed: 0x%x\n", rc);
    return -1;
  }

  LOG_D(GTPU, "%s() return code OK\n", __FUNCTION__);
  return 0;
663 664
}

665
//-----------------------------------------------------------------------------
666
int
667
gtpv1u_create_s1u_tunnel(
668 669 670 671
  const instance_t                              instanceP,
  const gtpv1u_enb_create_tunnel_req_t * const  create_tunnel_req_pP,
        gtpv1u_enb_create_tunnel_resp_t * const create_tunnel_resp_pP
  )
672
{
673 674 675 676 677 678 679
  /* Create a new nw-gtpv1-u stack req using API */
  NwGtpv1uUlpApiT          stack_req;
  NwGtpv1uRcT              rc                   = NW_GTPV1U_FAILURE;
  /* Local tunnel end-point identifier */
  teid_t                   s1u_teid             = 0;
  gtpv1u_teid_data_t      *gtpv1u_teid_data_p   = NULL;
  gtpv1u_ue_data_t        *gtpv1u_ue_data_p     = NULL;
680
  //MessageDef              *message_p            = NULL;
681 682 683 684 685 686 687 688
  hashtable_rc_t           hash_rc              = HASH_TABLE_KEY_NOT_EXISTS;
  int                      i;
  ebi_t                    eps_bearer_id        = 0;
  //    int                      ipv4_addr            = 0;
  int                      ip_offset            = 0;
  in_addr_t                in_addr;
  int                      addrs_length_in_bytes= 0;

gauthier's avatar
gauthier committed
689 690 691 692 693 694 695 696 697 698

  MSC_LOG_RX_MESSAGE(
		  MSC_GTPU_ENB,
		  MSC_RRC_ENB,
		  NULL,0,
		  MSC_AS_TIME_FMT" CREATE_TUNNEL_REQ RNTI %"PRIx16" inst %u ntuns %u ebid %u sgw-s1u teid %u",
		  0,0,create_tunnel_req_pP->rnti, instanceP,
		  create_tunnel_req_pP->num_tunnels, create_tunnel_req_pP->eps_bearer_id[0],
		  create_tunnel_req_pP->sgw_S1u_teid[0]);

699 700 701
  create_tunnel_resp_pP->rnti        = create_tunnel_req_pP->rnti;
  create_tunnel_resp_pP->status      = 0;
  create_tunnel_resp_pP->num_tunnels = 0;
702 703 704 705

  for (i = 0; i < create_tunnel_req_pP->num_tunnels; i++) {
    ip_offset               = 0;
    eps_bearer_id = create_tunnel_req_pP->eps_bearer_id[i];
706 707
    LOG_D(GTPU, "Rx GTPV1U_ENB_CREATE_TUNNEL_REQ ue rnti %x eps bearer id %u\n",
          create_tunnel_req_pP->rnti, eps_bearer_id);
708
    memset(&stack_req, 0, sizeof(NwGtpv1uUlpApiT));
709

710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725
    stack_req.apiType = NW_GTPV1U_ULP_API_CREATE_TUNNEL_ENDPOINT;

    do {
      s1u_teid = gtpv1u_new_teid();
      LOG_D(GTPU, "gtpv1u_create_s1u_tunnel() 0x%x %u(dec)\n", s1u_teid, s1u_teid);
      stack_req.apiInfo.createTunnelEndPointInfo.teid          = s1u_teid;
      stack_req.apiInfo.createTunnelEndPointInfo.hUlpSession   = 0;
      stack_req.apiInfo.createTunnelEndPointInfo.hStackSession = 0;

      rc = nwGtpv1uProcessUlpReq(gtpv1u_data_g.gtpv1u_stack, &stack_req);
      LOG_D(GTPU, ".\n");
    } while (rc != NW_GTPV1U_OK);

    //-----------------------
    // PDCP->GTPV1U mapping
    //-----------------------
726
    hash_rc = hashtable_get(gtpv1u_data_g.ue_mapping, create_tunnel_req_pP->rnti, (void **)&gtpv1u_ue_data_p);
727 728 729 730 731

    if ((hash_rc == HASH_TABLE_KEY_NOT_EXISTS) || (hash_rc == HASH_TABLE_OK)) {

      if (hash_rc == HASH_TABLE_KEY_NOT_EXISTS) {
        gtpv1u_ue_data_p = calloc (1, sizeof(gtpv1u_ue_data_t));
732
        hash_rc = hashtable_insert(gtpv1u_data_g.ue_mapping, create_tunnel_req_pP->rnti, gtpv1u_ue_data_p);
733 734 735
        AssertFatal(hash_rc == HASH_TABLE_OK, "Error inserting ue_mapping in GTPV1U hashtable");
      }

736
      gtpv1u_ue_data_p->ue_id       = create_tunnel_req_pP->rnti;
737
      gtpv1u_ue_data_p->instance_id = 0; // TO DO
738
      memcpy(&create_tunnel_resp_pP->enb_addr.buffer,
739 740
             &gtpv1u_data_g.enb_ip_address_for_S1u_S12_S4_up,
             sizeof (in_addr_t));
741
      create_tunnel_resp_pP->enb_addr.length = sizeof (in_addr_t);
742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763

      addrs_length_in_bytes = create_tunnel_req_pP->sgw_addr[i].length / 8;
      AssertFatal((addrs_length_in_bytes == 4) ||
                  (addrs_length_in_bytes == 16) ||
                  (addrs_length_in_bytes == 20),
                  "Bad transport layer address length %d (bits) %d (bytes)",
                  create_tunnel_req_pP->sgw_addr[i].length, addrs_length_in_bytes);

      if ((addrs_length_in_bytes == 4) ||
          (addrs_length_in_bytes == 20)) {
        in_addr = *((in_addr_t*)create_tunnel_req_pP->sgw_addr[i].buffer);
        ip_offset = 4;
        gtpv1u_ue_data_p->bearers[eps_bearer_id - GTPV1U_BEARER_OFFSET].sgw_ip_addr = in_addr;
      }

      if ((addrs_length_in_bytes == 16) ||
          (addrs_length_in_bytes == 20)) {
        memcpy(gtpv1u_ue_data_p->bearers[eps_bearer_id - GTPV1U_BEARER_OFFSET].sgw_ip6_addr.s6_addr,
               &create_tunnel_req_pP->sgw_addr[i].buffer[ip_offset],
               16);
      }

764 765 766 767 768
      gtpv1u_ue_data_p->bearers[eps_bearer_id - GTPV1U_BEARER_OFFSET].state                  = BEARER_IN_CONFIG;
      gtpv1u_ue_data_p->bearers[eps_bearer_id - GTPV1U_BEARER_OFFSET].teid_eNB               = s1u_teid;
      gtpv1u_ue_data_p->bearers[eps_bearer_id - GTPV1U_BEARER_OFFSET].teid_eNB_stack_session = stack_req.apiInfo.createTunnelEndPointInfo.hStackSession;
      gtpv1u_ue_data_p->bearers[eps_bearer_id - GTPV1U_BEARER_OFFSET].teid_sgw               = create_tunnel_req_pP->sgw_S1u_teid[i];
      create_tunnel_resp_pP->enb_S1u_teid[i] = s1u_teid;
769

770
    } else {
771 772
      create_tunnel_resp_pP->enb_S1u_teid[i] = 0;
      create_tunnel_resp_pP->status         = 0xFF;
773
    }
774

775 776
    create_tunnel_resp_pP->eps_bearer_id[i] = eps_bearer_id;
    create_tunnel_resp_pP->num_tunnels      += 1;
777 778 779 780 781 782 783 784 785

    //-----------------------
    // GTPV1U->PDCP mapping
    //-----------------------
    hash_rc = hashtable_get(gtpv1u_data_g.teid_mapping, s1u_teid, (void**)&gtpv1u_teid_data_p);

    if (hash_rc == HASH_TABLE_KEY_NOT_EXISTS) {
      gtpv1u_teid_data_p = calloc (1, sizeof(gtpv1u_teid_data_t));
      gtpv1u_teid_data_p->enb_id        = 0; // TO DO
786
      gtpv1u_teid_data_p->ue_id         = create_tunnel_req_pP->rnti;
787 788 789 790
      gtpv1u_teid_data_p->eps_bearer_id = eps_bearer_id;
      hash_rc = hashtable_insert(gtpv1u_data_g.teid_mapping, s1u_teid, gtpv1u_teid_data_p);
      AssertFatal(hash_rc == HASH_TABLE_OK, "Error inserting teid mapping in GTPV1U hashtable");
    } else {
791 792
      create_tunnel_resp_pP->enb_S1u_teid[i] = 0;
      create_tunnel_resp_pP->status         = 0xFF;
793
    }
794
  }
knopp's avatar
knopp committed
795 796 797 798 799
  MSC_LOG_TX_MESSAGE(
		  MSC_GTPU_ENB,
		  MSC_RRC_ENB,
		  NULL,0,
		  "0 GTPV1U_ENB_CREATE_TUNNEL_RESP rnti %x teid %x",
800
		  create_tunnel_resp_pP->rnti,
knopp's avatar
knopp committed
801
		  s1u_teid);
802

803 804
  LOG_D(GTPU, "Tx GTPV1U_ENB_CREATE_TUNNEL_RESP ue rnti %x status %d\n",
        create_tunnel_req_pP->rnti,
805 806
        create_tunnel_resp_pP->status);
  return 0;
807
}
808 809 810



811
//-----------------------------------------------------------------------------
812 813 814
static int gtpv1u_delete_s1u_tunnel(
  const instance_t                             instanceP,
  const gtpv1u_enb_delete_tunnel_req_t * const req_pP)
815
{
816 817 818 819 820 821 822
  NwGtpv1uUlpApiT          stack_req;
  NwGtpv1uRcT              rc                   = NW_GTPV1U_FAILURE;
  MessageDef              *message_p = NULL;
  gtpv1u_ue_data_t        *gtpv1u_ue_data_p     = NULL;
  hashtable_rc_t           hash_rc              = HASH_TABLE_KEY_NOT_EXISTS;
  teid_t                   teid_eNB             = 0;
  int                      erab_index           = 0;
823

824
  message_p = itti_alloc_new_message(TASK_GTPV1_U, GTPV1U_ENB_DELETE_TUNNEL_RESP);
825

826
  GTPV1U_ENB_DELETE_TUNNEL_RESP(message_p).rnti     = req_pP->rnti;
827
  GTPV1U_ENB_DELETE_TUNNEL_RESP(message_p).status       = 0;
828 829


830
  hash_rc = hashtable_get(gtpv1u_data_g.ue_mapping, req_pP->rnti, (void**)&gtpv1u_ue_data_p);
831

832
  if (hash_rc == HASH_TABLE_OK) {
833

834 835
    for (erab_index = 0; erab_index < req_pP->num_erab; erab_index++) {
      teid_eNB = gtpv1u_ue_data_p->bearers[req_pP->eps_bearer_id[erab_index] - GTPV1U_BEARER_OFFSET].teid_eNB;
836 837
      LOG_D(GTPU, "Rx GTPV1U_ENB_DELETE_TUNNEL user rnti %x eNB S1U teid %u eps bearer id %u\n",
            req_pP->rnti, teid_eNB, req_pP->eps_bearer_id[erab_index]);
838

839 840 841 842 843 844
      {
        memset(&stack_req, 0, sizeof(NwGtpv1uUlpApiT));
        stack_req.apiType = NW_GTPV1U_ULP_API_DESTROY_TUNNEL_ENDPOINT;
        LOG_D(GTPU, "gtpv1u_delete_s1u_tunnel erab %u  %u\n",
              req_pP->eps_bearer_id[erab_index],
              teid_eNB);
845
        stack_req.apiInfo.destroyTunnelEndPointInfo.hStackSessionHandle   = gtpv1u_ue_data_p->bearers[req_pP->eps_bearer_id[erab_index] - GTPV1U_BEARER_OFFSET].teid_eNB_stack_session;
846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865

        rc = nwGtpv1uProcessUlpReq(gtpv1u_data_g.gtpv1u_stack, &stack_req);
        LOG_D(GTPU, ".\n");
      }

      if (rc != NW_GTPV1U_OK) {
        GTPV1U_ENB_DELETE_TUNNEL_RESP(message_p).status       |= 0xFF;
        LOG_E(GTPU, "NW_GTPV1U_ULP_API_DESTROY_TUNNEL_ENDPOINT failed");
      }

      //-----------------------
      // PDCP->GTPV1U mapping
      //-----------------------
      gtpv1u_ue_data_p->bearers[req_pP->eps_bearer_id[erab_index] - GTPV1U_BEARER_OFFSET].state       = BEARER_DOWN;
      gtpv1u_ue_data_p->bearers[req_pP->eps_bearer_id[erab_index] - GTPV1U_BEARER_OFFSET].teid_eNB    = 0;
      gtpv1u_ue_data_p->bearers[req_pP->eps_bearer_id[erab_index] - GTPV1U_BEARER_OFFSET].teid_sgw    = 0;
      gtpv1u_ue_data_p->bearers[req_pP->eps_bearer_id[erab_index] - GTPV1U_BEARER_OFFSET].sgw_ip_addr = 0;
      gtpv1u_ue_data_p->num_bearers -= 1;

      if (gtpv1u_ue_data_p->num_bearers == 0) {
866 867
        hash_rc = hashtable_remove(gtpv1u_data_g.ue_mapping, req_pP->rnti);
        LOG_D(GTPU, "Removed user rnti %x,no more bearers configured\n", req_pP->rnti);
868 869 870 871 872 873 874 875
      }

      //-----------------------
      // GTPV1U->PDCP mapping
      //-----------------------
      hash_rc = hashtable_remove(gtpv1u_data_g.teid_mapping, teid_eNB);

      if (hash_rc != HASH_TABLE_OK) {
876
        LOG_D(GTPU, "Removed user rnti %x , enb S1U teid %u not found\n", req_pP->rnti, teid_eNB);
877 878 879
      }
    }
  }// else silently do nothing
880 881


882 883
  LOG_D(GTPU, "Tx GTPV1U_ENB_DELETE_TUNNEL_RESP user rnti %x eNB S1U teid %u status %u\n",
        GTPV1U_ENB_DELETE_TUNNEL_RESP(message_p).rnti,
884
        GTPV1U_ENB_DELETE_TUNNEL_RESP(message_p).enb_S1u_teid,
885
        GTPV1U_ENB_DELETE_TUNNEL_RESP(message_p).status);
knopp's avatar
knopp committed
886 887 888 889 890 891 892 893 894

  MSC_LOG_TX_MESSAGE(
		  MSC_GTPU_ENB,
		  MSC_RRC_ENB,
		  NULL,0,
		  "0 GTPV1U_ENB_DELETE_TUNNEL_RESP rnti %x teid %x",
		  GTPV1U_ENB_DELETE_TUNNEL_RESP(message_p).rnti,
		  teid_eNB);

895
  return itti_send_msg_to_task(TASK_RRC_ENB, instanceP, message_p);
896 897
}

898
//-----------------------------------------------------------------------------
899
static int gtpv1u_eNB_init(void)
900
{
901 902 903 904 905 906 907
  int                     ret;
  NwGtpv1uRcT             rc = NW_GTPV1U_FAILURE;
  NwGtpv1uUlpEntityT      ulp;
  NwGtpv1uUdpEntityT      udp;
  NwGtpv1uLogMgrEntityT   log;
  NwGtpv1uTimerMgrEntityT tmr;
  Enb_properties_t       *enb_properties_p  = NULL;
908

909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928
  enb_properties_p = enb_config_get()->properties[0];


  memset(&gtpv1u_data_g, 0, sizeof(gtpv1u_data_g));

  LOG_I(GTPU, "Initializing GTPU stack %p\n",&gtpv1u_data_g);
  //gtpv1u_data_g.gtpv1u_stack;
  /* Initialize UE hashtable */
  gtpv1u_data_g.ue_mapping      = hashtable_create (32, NULL, NULL);
  AssertFatal(gtpv1u_data_g.ue_mapping != NULL, " ERROR Initializing TASK_GTPV1_U task interface: in hashtable_create returned %p\n", gtpv1u_data_g.ue_mapping);
  gtpv1u_data_g.teid_mapping    = hashtable_create (256, NULL, NULL);
  AssertFatal(gtpv1u_data_g.teid_mapping != NULL, " ERROR Initializing TASK_GTPV1_U task interface: in hashtable_create\n");
  gtpv1u_data_g.enb_ip_address_for_S1u_S12_S4_up         = enb_properties_p->enb_ipv4_address_for_S1U;
  gtpv1u_data_g.ip_addr         = NULL;
  gtpv1u_data_g.enb_port_for_S1u_S12_S4_up = enb_properties_p->enb_port_for_S1U;
  //gtpv1u_data_g.udp_data;
  gtpv1u_data_g.seq_num         = 0;
  gtpv1u_data_g.restart_counter = 0;

  /* Initializing GTPv1-U stack */
gauthier's avatar
gauthier committed
929
  if ((rc = nwGtpv1uInitialize(&gtpv1u_data_g.gtpv1u_stack, GTPU_STACK_ENB)) != NW_GTPV1U_OK) {
930 931 932
    LOG_E(GTPU, "Failed to setup nwGtpv1u stack %x\n", rc);
    return -1;
  }
933

934 935 936 937 938
  if ((rc = nwGtpv1uSetLogLevel(gtpv1u_data_g.gtpv1u_stack,
                                NW_LOG_LEVEL_DEBG)) != NW_GTPV1U_OK) {
    LOG_E(GTPU, "Failed to setup loglevel for stack %x\n", rc);
    return -1;
  }
939

940

941 942 943 944
  /* Set the ULP API callback. Called once message have been processed by the
   * nw-gtpv1u stack.
   */
  ulp.ulpReqCallback = gtpv1u_eNB_process_stack_req;
945

946 947 948 949
  if ((rc = nwGtpv1uSetUlpEntity(gtpv1u_data_g.gtpv1u_stack, &ulp)) != NW_GTPV1U_OK) {
    LOG_E(GTPU, "nwGtpv1uSetUlpEntity: %x", rc);
    return -1;
  }
950

951 952 953 954
  /* nw-gtpv1u stack requires an udp callback to send data over UDP.
   * We provide a wrapper to UDP task.
   */
  udp.udpDataReqCallback = gtpv1u_eNB_send_udp_msg;
955

956 957 958 959
  if ((rc = nwGtpv1uSetUdpEntity(gtpv1u_data_g.gtpv1u_stack, &udp)) != NW_GTPV1U_OK) {
    LOG_E(GTPU, "nwGtpv1uSetUdpEntity: %x", rc);
    return -1;
  }
960

961
  log.logReqCallback = gtpv1u_eNB_log_request;
962

963 964 965 966
  if ((rc = nwGtpv1uSetLogMgrEntity(gtpv1u_data_g.gtpv1u_stack, &log)) != NW_GTPV1U_OK) {
    LOG_E(GTPU, "nwGtpv1uSetLogMgrEntity: %x", rc);
    return -1;
  }
967

968 969 970 971 972 973
  /* Timer interface is more complicated as both wrappers doesn't send a message
   * to the timer task but call the timer API functions start/stop timer.
   */
  tmr.tmrMgrHandle     = 0;
  tmr.tmrStartCallback = gtpv1u_start_timer_wrapper;
  tmr.tmrStopCallback  = gtpv1u_stop_timer_wrapper;
974

975 976 977 978
  if ((rc = nwGtpv1uSetTimerMgrEntity(gtpv1u_data_g.gtpv1u_stack, &tmr)) != NW_GTPV1U_OK) {
    LOG_E(GTPU, "nwGtpv1uSetTimerMgrEntity: %x", rc);
    return -1;
  }
979

980
#if defined(GTP_DUMP_SOCKET) && GTP_DUMP_SOCKET > 0
981 982 983 984 985

  if ((ret = gtpv1u_eNB_create_dump_socket()) < 0) {
    return -1;
  }

986
#endif
987 988 989 990 991
  ret = gtpv1u_eNB_send_init_udp(gtpv1u_data_g.enb_port_for_S1u_S12_S4_up);

  if (ret < 0) {
    return ret;
  }
992

993 994
  LOG_D(GTPU, "Initializing GTPV1U interface for eNB: DONE\n");
  return 0;
995
}
996 997


998
//-----------------------------------------------------------------------------
999 1000
void *gtpv1u_eNB_task(void *args)
{
1001 1002
  int                       rc = 0;
  instance_t                instance;
1003
  //const char               *msg_name_p;
1004

1005 1006 1007
  rc = gtpv1u_eNB_init();
  AssertFatal(rc == 0, "gtpv1u_eNB_init Failed");
  itti_mark_task_ready(TASK_GTPV1_U);
1008
  MSC_START_USE();
1009 1010 1011 1012 1013 1014 1015 1016

  while(1) {
    /* Trying to fetch a message from the message queue.
     * If the queue is empty, this function will block till a
     * message is sent to the task.
     */
    MessageDef *received_message_p = NULL;
    itti_receive_msg(TASK_GTPV1_U, &received_message_p);
gauthier's avatar
gauthier committed
1017
    VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_GTPV1U_ENB_TASK, VCD_FUNCTION_IN);
1018 1019
    DevAssert(received_message_p != NULL);

1020
    instance = ITTI_MSG_INSTANCE(received_message_p);
1021
    //msg_name_p = ITTI_MSG_NAME(received_message_p);
1022

1023
    switch (ITTI_MSG_ID(received_message_p)) {
1024

1025
    case GTPV1U_ENB_DELETE_TUNNEL_REQ: {
1026
      gtpv1u_delete_s1u_tunnel(instance, &received_message_p->ittiMsg.Gtpv1uDeleteTunnelReq);
1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
    }
    break;

    // DATA COMING FROM UDP
    case UDP_DATA_IND: {
      udp_data_ind_t *udp_data_ind_p;
      udp_data_ind_p = &received_message_p->ittiMsg.udp_data_ind;
      nwGtpv1uProcessUdpReq(gtpv1u_data_g.gtpv1u_stack,
                            udp_data_ind_p->buffer,
                            udp_data_ind_p->buffer_length,
                            udp_data_ind_p->peer_port,
                            udp_data_ind_p->peer_address);
      //itti_free(ITTI_MSG_ORIGIN_ID(received_message_p), udp_data_ind_p->buffer);
    }
    break;

    // DATA TO BE SENT TO UDP
    case GTPV1U_ENB_TUNNEL_DATA_REQ: {
      gtpv1u_enb_tunnel_data_req_t *data_req_p           = NULL;
1046
      NwGtpv1uUlpApiT               stack_req;
1047 1048 1049 1050 1051 1052
      NwGtpv1uRcT                   rc                   = NW_GTPV1U_FAILURE;
      hashtable_rc_t                hash_rc              = HASH_TABLE_KEY_NOT_EXISTS;
      gtpv1u_ue_data_t             *gtpv1u_ue_data_p     = NULL;
      teid_t                        enb_s1u_teid         = 0;
      teid_t                        sgw_s1u_teid         = 0;

gauthier's avatar
gauthier committed
1053
      VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_GTPV1U_PROCESS_TUNNEL_DATA_REQ, VCD_FUNCTION_IN);
1054 1055
      data_req_p = &GTPV1U_ENB_TUNNEL_DATA_REQ(received_message_p);
      //ipv4_send_data(ipv4_data_p->sd, data_ind_p->buffer, data_ind_p->length);
1056

1057
#if defined(GTP_DUMP_SOCKET) && GTP_DUMP_SOCKET > 0
1058
      gtpv1u_eNB_write_dump_socket(&data_req_p->buffer[data_req_p->offset],data_req_p->length);
1059
#endif
1060 1061
      memset(&stack_req, 0, sizeof(NwGtpv1uUlpApiT));

1062
      hash_rc = hashtable_get(gtpv1u_data_g.ue_mapping, (uint64_t)data_req_p->rnti, (void**)&gtpv1u_ue_data_p);
1063 1064

      if (hash_rc == HASH_TABLE_KEY_NOT_EXISTS) {
1065
        LOG_E(GTPU, "nwGtpv1uProcessUlpReq failed: while getting ue rnti %x in hashtable ue_mapping\n", data_req_p->rnti);
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
      } else {
        if ((data_req_p->rab_id >= GTPV1U_BEARER_OFFSET) && (data_req_p->rab_id <= max_val_DRB_Identity)) {
          enb_s1u_teid                        = gtpv1u_ue_data_p->bearers[data_req_p->rab_id - GTPV1U_BEARER_OFFSET].teid_eNB;
          sgw_s1u_teid                        = gtpv1u_ue_data_p->bearers[data_req_p->rab_id - GTPV1U_BEARER_OFFSET].teid_sgw;
          stack_req.apiType                   = NW_GTPV1U_ULP_API_SEND_TPDU;
          stack_req.apiInfo.sendtoInfo.teid   = sgw_s1u_teid;
          stack_req.apiInfo.sendtoInfo.ipAddr = gtpv1u_ue_data_p->bearers[data_req_p->rab_id - GTPV1U_BEARER_OFFSET].sgw_ip_addr;

          rc = nwGtpv1uGpduMsgNew(
                 gtpv1u_data_g.gtpv1u_stack,
                 sgw_s1u_teid,
                 NW_FALSE,
                 gtpv1u_data_g.seq_num++,
                 data_req_p->buffer,
                 data_req_p->length,
                 data_req_p->offset,
                 &(stack_req.apiInfo.sendtoInfo.hMsg));

          if (rc != NW_GTPV1U_OK) {
            LOG_E(GTPU, "nwGtpv1uGpduMsgNew failed: 0x%x\n", rc);
1086
            MSC_LOG_EVENT(MSC_GTPU_ENB,"0 Failed send G-PDU ltid %u rtid %u size %u",
gauthier's avatar
gauthier committed
1087
            		enb_s1u_teid,sgw_s1u_teid,data_req_p->length);
1088
            (void)enb_s1u_teid; /* avoid gcc warning "set but not used" */
1089 1090 1091 1092 1093
          } else {
            rc = nwGtpv1uProcessUlpReq(gtpv1u_data_g.gtpv1u_stack, &stack_req);

            if (rc != NW_GTPV1U_OK) {
              LOG_E(GTPU, "nwGtpv1uProcessUlpReq failed: 0x%x\n", rc);
1094
              MSC_LOG_EVENT(MSC_GTPU_ENB,"0 Failed send G-PDU ltid %u rtid %u size %u",
gauthier's avatar
gauthier committed
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
              		enb_s1u_teid,sgw_s1u_teid,data_req_p->length);
            } else {
            	  MSC_LOG_TX_MESSAGE(
            			  MSC_GTPU_ENB,
            			  MSC_GTPU_SGW,
            			  NULL,
            			  0,
            			  MSC_AS_TIME_FMT" G-PDU ltid %u rtid %u size %u",
            			  0,0,
            			  enb_s1u_teid,
            			  sgw_s1u_teid,
            			  data_req_p->length);

1108
            }
1109 1110 1111 1112 1113 1114

            rc = nwGtpv1uMsgDelete(gtpv1u_data_g.gtpv1u_stack,
                                   stack_req.apiInfo.sendtoInfo.hMsg);

            if (rc != NW_GTPV1U_OK) {
              LOG_E(GTPU, "nwGtpv1uMsgDelete failed: 0x%x\n", rc);
1115
            }
1116
          }
1117
        }
1118 1119
      }

gauthier's avatar
gauthier committed
1120
      VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_GTPV1U_PROCESS_TUNNEL_DATA_REQ, VCD_FUNCTION_OUT);
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
      /* Buffer still needed, do not free it */
      //itti_free(ITTI_MSG_ORIGIN_ID(received_message_p), data_req_p->buffer);
    }
    break;

    case TERMINATE_MESSAGE: {
      if (gtpv1u_data_g.ue_mapping != NULL) {
        hashtable_destroy (gtpv1u_data_g.ue_mapping);
      }

      if (gtpv1u_data_g.teid_mapping != NULL) {
        hashtable_destroy (gtpv1u_data_g.teid_mapping);
      }

      itti_exit_task();
    }
    break;

    case TIMER_HAS_EXPIRED:
      nwGtpv1uProcessTimeout(&received_message_p->ittiMsg.timer_has_expired.arg);
      break;

    default: {
      LOG_E(GTPU, "Unkwnon message ID %d:%s\n",
            ITTI_MSG_ID(received_message_p),
            ITTI_MSG_NAME(received_message_p));
1147
    }
1148 1149 1150 1151 1152 1153
    break;
    }

    rc = itti_free(ITTI_MSG_ORIGIN_ID(received_message_p), received_message_p);
    AssertFatal(rc == EXIT_SUCCESS, "Failed to free memory (%d)!\n", rc);
    received_message_p = NULL;
gauthier's avatar
gauthier committed
1154
    VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_GTPV1U_ENB_TASK, VCD_FUNCTION_OUT);
1155 1156 1157
  }

  return NULL;
1158
}
1159