- Timestamp:
- Feb 21, 2013 11:26:35 AM (12 years ago)
- Location:
- pjproject/trunk/pjnath
- Files:
-
- 1 added
- 22 edited
Legend:
- Unmodified
- Added
- Removed
-
pjproject/trunk/pjnath/build/Makefile
r4308 r4360 41 41 # 42 42 export PJNATH_TEST_SRCDIR = ../src/pjnath-test 43 export PJNATH_TEST_OBJS += ice_test.o stun.o sess_auth.o server.o \43 export PJNATH_TEST_OBJS += ice_test.o stun.o sess_auth.o server.o concur_test.o \ 44 44 stun_sock_test.o turn_sock_test.o test.o 45 45 export PJNATH_TEST_CFLAGS += $(_CFLAGS) -
pjproject/trunk/pjnath/include/pjnath/ice_session.h
r4196 r4360 613 613 pj_pool_t *pool; /**< Pool instance. */ 614 614 void *user_data; /**< App. data. */ 615 pj_ mutex_t *mutex; /**< Mutex.*/615 pj_grp_lock_t *grp_lock; /**< Group lock */ 616 616 pj_ice_sess_role role; /**< ICE role. */ 617 617 pj_ice_sess_options opt; /**< Options */ … … 731 731 * generated. 732 732 * @param local_passwd Optional string to be used as local password. 733 * @param grp_lock Optional group lock to be used by this session. 734 * If NULL, the session will create one itself. 733 735 * @param p_ice Pointer to receive the ICE session instance. 734 736 * … … 742 744 const pj_str_t *local_ufrag, 743 745 const pj_str_t *local_passwd, 746 pj_grp_lock_t *grp_lock, 744 747 pj_ice_sess **p_ice); 745 748 -
pjproject/trunk/pjnath/include/pjnath/stun_session.h
r4352 r4360 31 31 #include <pjnath/stun_transaction.h> 32 32 #include <pj/list.h> 33 #include <pj/lock.h> 33 34 #include <pj/timer.h> 34 35 … … 385 386 * @param cb Session callback. 386 387 * @param fingerprint Enable message fingerprint for outgoing messages. 388 * @param grp_lock Optional group lock to be used by this session. 389 * If NULL, the session will create one itself. 387 390 * @param p_sess Pointer to receive STUN session instance. 388 391 * … … 393 396 const pj_stun_session_cb *cb, 394 397 pj_bool_t fingerprint, 398 pj_grp_lock_t *grp_lock, 395 399 pj_stun_session **p_sess); 396 400 … … 430 434 */ 431 435 PJ_DECL(void*) pj_stun_session_get_user_data(pj_stun_session *sess); 432 433 /**434 * Change the lock object used by the STUN session. By default, the STUN435 * session uses a mutex to protect its internal data. If application already436 * protects access to STUN session with higher layer lock, it may disable437 * the mutex protection in the STUN session by changing the STUN session438 * lock to a NULL mutex.439 *440 * @param sess The STUN session instance.441 * @param lock New lock instance to be used by the STUN session.442 * @param auto_del Specify whether STUN session should destroy this443 * lock instance when it's destroyed.444 */445 PJ_DECL(pj_status_t) pj_stun_session_set_lock(pj_stun_session *sess,446 pj_lock_t *lock,447 pj_bool_t auto_del);448 436 449 437 /** -
pjproject/trunk/pjnath/include/pjnath/stun_sock.h
r4343 r4360 28 28 #include <pjlib-util/resolver.h> 29 29 #include <pj/ioqueue.h> 30 #include <pj/lock.h> 30 31 #include <pj/sock.h> 31 32 #include <pj/sock_qos.h> … … 218 219 typedef struct pj_stun_sock_cfg 219 220 { 221 /** 222 * The group lock to be used by the STUN socket. If NULL, the STUN socket 223 * will create one internally. 224 * 225 * Default: NULL 226 */ 227 pj_grp_lock_t *grp_lock; 228 220 229 /** 221 230 * Packet buffer size. -
pjproject/trunk/pjnath/include/pjnath/stun_transaction.h
r4352 r4360 28 28 #include <pjnath/stun_msg.h> 29 29 #include <pjnath/stun_config.h> 30 #include <pj/lock.h> 30 31 31 32 … … 125 126 * various settings for the transaction. 126 127 * @param pool Pool to be used to allocate memory from. 128 * @param grp_lock Group lock to synchronize. 127 129 * @param cb Callback structure, to be used by the transaction 128 130 * to send message and to notify the application about … … 134 136 PJ_DECL(pj_status_t) pj_stun_client_tsx_create( pj_stun_config *cfg, 135 137 pj_pool_t *pool, 138 pj_grp_lock_t *grp_lock, 136 139 const pj_stun_tsx_cb *cb, 137 140 pj_stun_client_tsx **p_tsx); … … 160 163 161 164 /** 162 * Destroy a STUN client transaction immediately. This function can be 163 * called at any time to stop the transaction and destroy it. 165 * Stop the client transaction. 164 166 * 165 167 * @param tsx The STUN transaction. … … 168 170 * is NULL. 169 171 */ 170 PJ_DECL(pj_status_t) pj_stun_client_tsx_ destroy(pj_stun_client_tsx *tsx);172 PJ_DECL(pj_status_t) pj_stun_client_tsx_stop(pj_stun_client_tsx *tsx); 171 173 172 174 -
pjproject/trunk/pjnath/include/pjnath/turn_session.h
r3553 r4360 418 418 * @param af Address family of the client connection. Currently 419 419 * pj_AF_INET() and pj_AF_INET6() are supported. 420 * @param conn_type Connection type to the TURN server. 420 * @param conn_type Connection type to the TURN server. 421 * @param grp_lock Optional group lock object to be used by this session. 422 * If this value is NULL, the session will create 423 * a group lock internally. 421 424 * @param cb Callback to receive events from the TURN session. 422 425 * @param options Option flags, currently this value must be zero. … … 433 436 int af, 434 437 pj_turn_tp_type conn_type, 438 pj_grp_lock_t *grp_lock, 435 439 const pj_turn_session_cb *cb, 436 440 unsigned options, -
pjproject/trunk/pjnath/include/pjnath/turn_sock.h
r4343 r4360 109 109 typedef struct pj_turn_sock_cfg 110 110 { 111 /** 112 * The group lock to be used by the STUN socket. If NULL, the STUN socket 113 * will create one internally. 114 * 115 * Default: NULL 116 */ 117 pj_grp_lock_t *grp_lock; 118 111 119 /** 112 120 * Packet buffer size. -
pjproject/trunk/pjnath/src/pjnath-test/ice_test.c
r3553 r4360 30 30 #define NODELAY 0xFFFFFFFF 31 31 #define SRV_DOMAIN "pjsip.lab.domain" 32 32 #define MAX_THREADS 16 33 34 #define THIS_FILE "ice_test.c" 33 35 #define INDENT " " 34 36 … … 49 51 }; 50 52 51 53 /* Role comp# host? stun? turn? flag? ans_del snd_del des_del */ 52 54 /* Test session configuration */ 53 55 struct test_cfg … … 61 63 62 64 unsigned answer_delay; /* Delay before sending SDP */ 63 unsigned send_delay; /* Delay before sending data*/64 unsigned destroy_delay; /* Delay before destroy()*/65 unsigned send_delay; /* unused */ 66 unsigned destroy_delay; /* unused */ 65 67 66 68 struct test_result expected;/* Expected result */ … … 80 82 }; 81 83 84 /* Session param */ 85 struct sess_param 86 { 87 unsigned worker_cnt; 88 unsigned worker_timeout; 89 pj_bool_t worker_quit; 90 91 pj_bool_t destroy_after_create; 92 pj_bool_t destroy_after_one_done; 93 }; 94 82 95 /* The test session */ 83 96 struct test_sess … … 87 100 pj_dns_resolver *resolver; 88 101 102 struct sess_param *param; 103 89 104 test_server *server; 105 106 pj_thread_t *worker_threads[MAX_THREADS]; 90 107 91 108 unsigned server_flag; … … 191 208 struct test_cfg *caller_cfg, 192 209 struct test_cfg *callee_cfg, 210 struct sess_param *test_param, 193 211 struct test_sess **p_sess) 194 212 { … … 205 223 sess->pool = pool; 206 224 sess->stun_cfg = stun_cfg; 225 sess->param = test_param; 207 226 208 227 pj_memcpy(&sess->caller.cfg, caller_cfg, sizeof(*caller_cfg)); … … 262 281 static void destroy_sess(struct test_sess *sess, unsigned wait_msec) 263 282 { 283 unsigned i; 284 264 285 if (sess->caller.ice) { 265 286 pj_ice_strans_destroy(sess->caller.ice); … … 270 291 pj_ice_strans_destroy(sess->callee.ice); 271 292 sess->callee.ice = NULL; 293 } 294 295 sess->param->worker_quit = PJ_TRUE; 296 for (i=0; i<sess->param->worker_cnt; ++i) { 297 if (sess->worker_threads[i]) 298 pj_thread_join(sess->worker_threads[i]); 272 299 } 273 300 … … 327 354 ept->result.nego_status = status; 328 355 break; 356 case PJ_ICE_STRANS_OP_KEEP_ALIVE: 357 /* keep alive failed? */ 358 break; 329 359 default: 330 360 pj_assert(!"Unknown op"); … … 385 415 c1 = pj_ice_strans_get_valid_pair(ept1->ice, i+1); 386 416 if (c1 == NULL) { 387 PJ_LOG(3,( "", INDENT "err: unable to get valid pair for ice1 "417 PJ_LOG(3,(THIS_FILE, INDENT "err: unable to get valid pair for ice1 " 388 418 "component %d", i+1)); 389 419 return start_err - 2; … … 392 422 c2 = pj_ice_strans_get_valid_pair(ept2->ice, i+1); 393 423 if (c2 == NULL) { 394 PJ_LOG(3,( "", INDENT "err: unable to get valid pair for ice2 "424 PJ_LOG(3,(THIS_FILE, INDENT "err: unable to get valid pair for ice2 " 395 425 "component %d", i+1)); 396 426 return start_err - 4; … … 398 428 399 429 if (pj_sockaddr_cmp(&c1->rcand->addr, &c2->lcand->addr) != 0) { 400 PJ_LOG(3,( "", INDENT "err: candidate pair does not match "430 PJ_LOG(3,(THIS_FILE, INDENT "err: candidate pair does not match " 401 431 "for component %d", i+1)); 402 432 return start_err - 6; … … 409 439 pj_ice_strans_get_valid_pair(ept1->ice, i+1) != NULL) 410 440 { 411 PJ_LOG(3,( "", INDENT "err: ice1 shouldn't have valid pair "441 PJ_LOG(3,(THIS_FILE, INDENT "err: ice1 shouldn't have valid pair " 412 442 "for component %d", i+1)); 413 443 return start_err - 8; … … 416 446 pj_ice_strans_get_valid_pair(ept2->ice, i+1) != NULL) 417 447 { 418 PJ_LOG(3,( "", INDENT "err: ice2 shouldn't have valid pair "448 PJ_LOG(3,(THIS_FILE, INDENT "err: ice2 shouldn't have valid pair " 419 449 "for component %d", i+1)); 420 450 return start_err - 9; … … 437 467 break; \ 438 468 } \ 439 if (t.sec - t0.sec > (timeout)) break; \ 469 PJ_TIME_VAL_SUB(t, t0); \ 470 if (PJ_TIME_VAL_MSEC(t) >= (timeout)) break; \ 440 471 } \ 441 472 } 442 473 443 444 static int perform_test(const char *title, 445 pj_stun_config *stun_cfg, 446 unsigned server_flag, 447 struct test_cfg *caller_cfg, 448 struct test_cfg *callee_cfg) 474 int worker_thread_proc(void *data) 475 { 476 pj_status_t rc; 477 struct test_sess *sess = (struct test_sess *) data; 478 pj_stun_config *stun_cfg = sess->stun_cfg; 479 480 /* Wait until negotiation is complete on both endpoints */ 481 #define ALL_DONE (sess->param->worker_quit || \ 482 (sess->caller.result.nego_status!=PJ_EPENDING && \ 483 sess->callee.result.nego_status!=PJ_EPENDING)) 484 WAIT_UNTIL(sess->param->worker_timeout, ALL_DONE, rc); 485 486 return 0; 487 } 488 489 static int perform_test2(const char *title, 490 pj_stun_config *stun_cfg, 491 unsigned server_flag, 492 struct test_cfg *caller_cfg, 493 struct test_cfg *callee_cfg, 494 struct sess_param *test_param) 449 495 { 450 496 pjlib_state pjlib_state; 451 497 struct test_sess *sess; 498 unsigned i; 452 499 int rc; 453 500 454 PJ_LOG(3,( "", INDENT "%s", title));501 PJ_LOG(3,(THIS_FILE, INDENT "%s", title)); 455 502 456 503 capture_pjlib_state(stun_cfg, &pjlib_state); 457 504 458 rc = create_sess(stun_cfg, server_flag, caller_cfg, callee_cfg, &sess);505 rc = create_sess(stun_cfg, server_flag, caller_cfg, callee_cfg, test_param, &sess); 459 506 if (rc != 0) 460 507 return rc; … … 464 511 465 512 /* Wait until both ICE transports are initialized */ 466 WAIT_UNTIL(30 , ALL_READY, rc);513 WAIT_UNTIL(30000, ALL_READY, rc); 467 514 468 515 if (!ALL_READY) { 469 PJ_LOG(3,( "", INDENT "err: init timed-out"));516 PJ_LOG(3,(THIS_FILE, INDENT "err: init timed-out")); 470 517 destroy_sess(sess, 500); 471 518 return -100; … … 490 537 goto on_return; 491 538 } 492 493 539 /* Init ICE on caller */ 494 540 rc = pj_ice_strans_init_ice(sess->caller.ice, sess->caller.cfg.role, … … 508 554 return -110; 509 555 } 510 511 556 /* Start ICE on callee */ 512 557 rc = start_ice(&sess->callee, &sess->caller); … … 515 560 return -120; 516 561 } 517 518 562 /* Wait for callee's answer_delay */ 519 563 poll_events(stun_cfg, sess->callee.cfg.answer_delay, PJ_FALSE); 520 521 564 /* Start ICE on caller */ 522 565 rc = start_ice(&sess->caller, &sess->callee); … … 526 569 } 527 570 528 /* Wait until negotiation is complete on both endpoints */ 529 #define ALL_DONE (sess->caller.result.nego_status!=PJ_EPENDING && \ 530 sess->callee.result.nego_status!=PJ_EPENDING) 531 WAIT_UNTIL(30, ALL_DONE, rc); 532 571 for (i=0; i<sess->param->worker_cnt; ++i) { 572 pj_status_t status; 573 574 status = pj_thread_create(sess->pool, "worker_thread", 575 worker_thread_proc, sess, 0, 0, 576 &sess->worker_threads[i]); 577 if (status != PJ_SUCCESS) { 578 PJ_LOG(3,(THIS_FILE, INDENT "err: create thread")); 579 destroy_sess(sess, 500); 580 return -135; 581 } 582 } 583 584 if (sess->param->destroy_after_create) 585 goto on_destroy; 586 587 if (sess->param->destroy_after_one_done) { 588 while (sess->caller.result.init_status==PJ_EPENDING && 589 sess->callee.result.init_status==PJ_EPENDING) 590 { 591 if (sess->param->worker_cnt) 592 pj_thread_sleep(0); 593 else 594 poll_events(stun_cfg, 0, PJ_FALSE); 595 } 596 goto on_destroy; 597 } 598 599 WAIT_UNTIL(30000, ALL_DONE, rc); 533 600 if (!ALL_DONE) { 534 PJ_LOG(3,( "", INDENT "err: negotiation timed-out"));601 PJ_LOG(3,(THIS_FILE, INDENT "err: negotiation timed-out")); 535 602 destroy_sess(sess, 500); 536 603 return -140; … … 562 629 563 630 /* Looks like everything is okay */ 631 on_destroy: 564 632 565 633 /* Destroy ICE stream transports first to let it de-allocate … … 579 647 on_return: 580 648 /* Wait.. */ 581 poll_events(stun_cfg, 500, PJ_FALSE);649 poll_events(stun_cfg, 200, PJ_FALSE); 582 650 583 651 /* Now destroy everything */ … … 592 660 } 593 661 594 return 0; 662 return rc; 663 } 664 665 static int perform_test(const char *title, 666 pj_stun_config *stun_cfg, 667 unsigned server_flag, 668 struct test_cfg *caller_cfg, 669 struct test_cfg *callee_cfg) 670 { 671 struct sess_param test_param; 672 673 pj_bzero(&test_param, sizeof(test_param)); 674 return perform_test2(title, stun_cfg, server_flag, caller_cfg, 675 callee_cfg, &test_param); 595 676 } 596 677 … … 681 762 goto on_return; 682 763 } 683 764 684 765 /* Simple test first with srflx candidate */ 685 766 if (1) { … … 745 826 }; 746 827 747 rc = perform_test(cfg.title, &stun_cfg, cfg.server_flag, 828 rc = perform_test(cfg.title, &stun_cfg, cfg.server_flag, 748 829 &cfg.ua1, &cfg.ua2); 749 830 if (rc != 0) … … 786 867 } 787 868 869 788 870 /* STUN failure, testing TURN deallocation */ 789 871 if (1) { … … 793 875 0xFFFF & (~(CREATE_STUN_SERVER)), 794 876 /* Role comp# host? stun? turn? flag? ans_del snd_del des_del */ 795 {ROLE1, 2, YES, YES, YES, 0, 0, 0, 0, {PJNATH_ESTUNTIMEDOUT, -1}},796 {ROLE2, 2, YES, YES, YES, 0, 0, 0, 0, {PJNATH_ESTUNTIMEDOUT, -1}}877 {ROLE1, 1, YES, YES, YES, 0, 0, 0, 0, {PJNATH_ESTUNTIMEDOUT, -1}}, 878 {ROLE2, 1, YES, YES, YES, 0, 0, 0, 0, {PJNATH_ESTUNTIMEDOUT, -1}} 797 879 }; 798 880 799 rc = perform_test(cfg.title, &stun_cfg, cfg.server_flag, 881 rc = perform_test(cfg.title, &stun_cfg, cfg.server_flag, 800 882 &cfg.ua1, &cfg.ua2); 801 883 if (rc != 0) … … 819 901 unsigned d; 820 902 821 PJ_LOG(3,( "", " %s", cfg->title));903 PJ_LOG(3,(THIS_FILE, " %s", cfg->title)); 822 904 823 905 /* For each test item, test with various answer delay */ … … 877 959 } 878 960 961 int ice_one_conc_test(pj_stun_config *stun_cfg, int err_quit) 962 { 963 struct sess_cfg_t { 964 const char *title; 965 unsigned server_flag; 966 struct test_cfg ua1; 967 struct test_cfg ua2; 968 } cfg = 969 { 970 "Concurrency test", 971 0xFFFF, 972 /* Role comp# host? stun? turn? flag? ans_del snd_del des_del */ 973 {ROLE1, 1, YES, YES, YES, 0, 0, 0, 0, {PJ_SUCCESS, PJ_SUCCESS}}, 974 {ROLE2, 1, YES, YES, YES, 0, 0, 0, 0, {PJ_SUCCESS, PJ_SUCCESS}} 975 }; 976 struct sess_param test_param; 977 int rc; 978 979 980 /* test a: destroy as soon as nego starts */ 981 cfg.title = " ice test a: immediate destroy"; 982 pj_bzero(&test_param, sizeof(test_param)); 983 test_param.worker_cnt = 4; 984 test_param.worker_timeout = 1000; 985 test_param.destroy_after_create = PJ_TRUE; 986 987 rc = perform_test2(cfg.title, stun_cfg, cfg.server_flag, 988 &cfg.ua1, &cfg.ua2, &test_param); 989 if (rc != 0 && err_quit) 990 return rc; 991 992 /* test b: destroy as soon as one is done */ 993 cfg.title = " ice test b: destroy after 1 success"; 994 test_param.destroy_after_create = PJ_FALSE; 995 test_param.destroy_after_one_done = PJ_TRUE; 996 997 rc = perform_test2(cfg.title, stun_cfg, cfg.server_flag, 998 &cfg.ua1, &cfg.ua2, &test_param); 999 if (rc != 0 && err_quit) 1000 return rc; 1001 1002 /* test c: normal */ 1003 cfg.title = " ice test c: normal flow"; 1004 pj_bzero(&test_param, sizeof(test_param)); 1005 test_param.worker_cnt = 4; 1006 test_param.worker_timeout = 1000; 1007 1008 rc = perform_test2(cfg.title, stun_cfg, cfg.server_flag, 1009 &cfg.ua1, &cfg.ua2, &test_param); 1010 if (rc != 0 && err_quit) 1011 return rc; 1012 1013 return 0; 1014 } 1015 1016 int ice_conc_test(void) 1017 { 1018 const int LOOP = 100; 1019 pj_pool_t *pool; 1020 pj_stun_config stun_cfg; 1021 unsigned i; 1022 int rc; 1023 1024 pool = pj_pool_create(mem, NULL, 512, 512, NULL); 1025 rc = create_stun_config(pool, &stun_cfg); 1026 if (rc != PJ_SUCCESS) { 1027 pj_pool_release(pool); 1028 return -7; 1029 } 1030 1031 for (i = 0; i < LOOP; i++) { 1032 PJ_LOG(3,(THIS_FILE, INDENT "Test %d of %d", i+1, LOOP)); 1033 rc = ice_one_conc_test(&stun_cfg, PJ_TRUE); 1034 if (rc) 1035 break; 1036 } 1037 1038 on_return: 1039 destroy_stun_config(&stun_cfg); 1040 pj_pool_release(pool); 1041 1042 return rc; 1043 } -
pjproject/trunk/pjnath/src/pjnath-test/sess_auth.c
r3553 r4360 248 248 sess_cb.on_rx_request = &server_on_rx_request; 249 249 sess_cb.on_send_msg = &server_send_msg; 250 status = pj_stun_session_create(&stun_cfg, "server", &sess_cb, PJ_FALSE, &server->sess);250 status = pj_stun_session_create(&stun_cfg, "server", &sess_cb, PJ_FALSE, NULL, &server->sess); 251 251 if (status != PJ_SUCCESS) { 252 252 destroy_server(); … … 480 480 sess_cb.on_request_complete = &client_on_request_complete; 481 481 sess_cb.on_send_msg = &client_send_msg; 482 status = pj_stun_session_create(&stun_cfg, "client", &sess_cb, PJ_FALSE, &client->sess);482 status = pj_stun_session_create(&stun_cfg, "client", &sess_cb, PJ_FALSE, NULL, &client->sess); 483 483 if (status != PJ_SUCCESS) { 484 484 destroy_client_server(); -
pjproject/trunk/pjnath/src/pjnath-test/stun_sock_test.c
r3553 r4360 299 299 pj_str_t srv_addr; 300 300 pj_time_val timeout, t; 301 int ret = 0;301 int i, ret = 0; 302 302 pj_status_t status; 303 303 … … 360 360 destroy_server(srv); 361 361 destroy_client(client); 362 for (i=0; i<7; ++i) 363 handle_events(cfg, 50); 362 364 return ret; 363 365 } … … 374 376 pj_str_t srv_addr; 375 377 pj_time_val timeout, t; 376 int ret = 0;378 int i, ret = 0; 377 379 pj_status_t status; 378 380 … … 427 429 destroy_server(srv); 428 430 destroy_client(client); 431 for (i=0; i<7; ++i) 432 handle_events(cfg, 50); 429 433 return ret; 430 434 } … … 441 445 pj_str_t srv_addr; 442 446 pj_time_val timeout, t; 443 int ret = 0;447 int i, ret = 0; 444 448 pj_status_t status; 445 449 … … 792 796 destroy_server(srv); 793 797 destroy_client(client); 798 for (i=0; i<7; ++i) 799 handle_events(cfg, 50); 794 800 return ret; 795 801 } -
pjproject/trunk/pjnath/src/pjnath-test/test.c
r3553 r4360 35 35 pj_ioqueue_t *ioqueue; 36 36 pj_timer_heap_t *timer_heap; 37 pj_lock_t *lock; 37 38 pj_status_t status; 38 39 … … 50 51 } 51 52 53 pj_lock_create_recursive_mutex(pool, NULL, &lock); 54 pj_timer_heap_set_lock(timer_heap, lock, PJ_TRUE); 55 52 56 pj_stun_config_init(stun_cfg, mem, 0, ioqueue, timer_heap); 53 57 … … 106 110 st->timer_cnt = pj_timer_heap_count(cfg->timer_heap); 107 111 108 cp = (pj_caching_pool*) mem;112 cp = (pj_caching_pool*)cfg->pf; 109 113 st->pool_used_cnt = cp->used_count; 110 114 } … … 121 125 PJ_LOG(3,("", " error: possibly leaking timer")); 122 126 rc |= ERR_TIMER_LEAK; 127 128 #if PJ_TIMER_DEBUG 129 pj_timer_heap_dump(cfg->timer_heap); 130 #endif 123 131 } 124 132 … … 149 157 PJ_LOG_HAS_MICRO_SEC; 150 158 159 pj_log_func *orig_log_func; 160 FILE *log_file; 161 162 static void test_log_func(int level, const char *data, int len) 163 { 164 if (log_file) { 165 fwrite(data, len, 1, log_file); 166 } 167 if (level <= 3) 168 orig_log_func(level, data, len); 169 } 170 151 171 static int test_inner(void) 152 172 { … … 159 179 pj_log_set_level(3); 160 180 pj_log_set_decor(param_log_decor); 181 #elif 1 182 log_file = fopen("pjnath-test.log", "wt"); 183 pj_log_set_level(5); 184 orig_log_func = pj_log_get_log_func(); 185 pj_log_set_log_func(&test_log_func); 161 186 #endif 162 187 … … 190 215 #endif 191 216 217 #if INCLUDE_CONCUR_TEST 218 DO_TEST(concur_test()); 219 #endif 220 192 221 on_return: 222 if (log_file) 223 fclose(log_file); 193 224 return rc; 194 225 } -
pjproject/trunk/pjnath/src/pjnath-test/test.h
r3553 r4360 26 26 #define INCLUDE_STUN_SOCK_TEST 1 27 27 #define INCLUDE_TURN_SOCK_TEST 1 28 #define INCLUDE_CONCUR_TEST 1 28 29 29 30 int stun_test(void); … … 32 33 int turn_sock_test(void); 33 34 int ice_test(void); 35 int concur_test(void); 34 36 int test_main(void); 35 37 36 38 extern void app_perror(const char *title, pj_status_t rc); 37 39 extern pj_pool_factory *mem; 40 41 int ice_one_conc_test(pj_stun_config *stun_cfg, int err_quit); 38 42 39 43 //////////////////////////////////// -
pjproject/trunk/pjnath/src/pjnath/ice_session.c
r4357 r4360 98 98 }; 99 99 100 #define THIS_FILE "ice_session.c" 100 101 #define CHECK_NAME_LEN 128 101 102 #define LOG4(expr) PJ_LOG(4,expr) … … 135 136 static void on_ice_complete(pj_ice_sess *ice, pj_status_t status); 136 137 static void ice_keep_alive(pj_ice_sess *ice, pj_bool_t send_now); 138 static void ice_on_destroy(void *obj); 137 139 static void destroy_ice(pj_ice_sess *ice, 138 140 pj_status_t reason); … … 289 291 status = pj_stun_session_create(&ice->stun_cfg, NULL, 290 292 &sess_cb, PJ_TRUE, 293 ice->grp_lock, 291 294 &comp->stun_sess); 292 295 if (status != PJ_SUCCESS) … … 333 336 const pj_str_t *local_ufrag, 334 337 const pj_str_t *local_passwd, 338 pj_grp_lock_t *grp_lock, 335 339 pj_ice_sess **p_ice) 336 340 { … … 360 364 name, ice); 361 365 362 status = pj_mutex_create_recursive(pool, ice->obj_name, 363 &ice->mutex); 364 if (status != PJ_SUCCESS) { 365 destroy_ice(ice, status); 366 return status; 367 } 366 if (grp_lock) { 367 ice->grp_lock = grp_lock; 368 } else { 369 status = pj_grp_lock_create(pool, NULL, &ice->grp_lock); 370 if (status != PJ_SUCCESS) { 371 pj_pool_release(pool); 372 return status; 373 } 374 } 375 376 pj_grp_lock_add_ref(ice->grp_lock); 377 pj_grp_lock_add_handler(ice->grp_lock, pool, ice, 378 &ice_on_destroy); 368 379 369 380 pj_memcpy(&ice->cb, cb, sizeof(*cb)); … … 445 456 446 457 /* 458 * Callback to really destroy the session 459 */ 460 static void ice_on_destroy(void *obj) 461 { 462 pj_ice_sess *ice = (pj_ice_sess*) obj; 463 464 if (ice->pool) { 465 pj_pool_t *pool = ice->pool; 466 ice->pool = NULL; 467 pj_pool_release(pool); 468 } 469 LOG4((THIS_FILE, "ICE session %p destroyed", ice)); 470 } 471 472 /* 447 473 * Destroy 448 474 */ … … 453 479 454 480 if (reason == PJ_SUCCESS) { 455 LOG4((ice->obj_name, "Destroying ICE session")); 481 LOG4((ice->obj_name, "Destroying ICE session %p", ice)); 482 } 483 484 pj_grp_lock_acquire(ice->grp_lock); 485 486 if (ice->is_destroying) { 487 pj_grp_lock_release(ice->grp_lock); 488 return; 456 489 } 457 490 458 491 ice->is_destroying = PJ_TRUE; 459 492 460 /* Let other callbacks finish */ 461 if (ice->mutex) { 462 pj_mutex_lock(ice->mutex); 463 pj_mutex_unlock(ice->mutex); 464 } 465 466 if (ice->timer.id) { 467 pj_timer_heap_cancel(ice->stun_cfg.timer_heap, 468 &ice->timer); 469 ice->timer.id = PJ_FALSE; 470 } 493 pj_timer_heap_cancel_if_active(ice->stun_cfg.timer_heap, 494 &ice->timer, PJ_FALSE); 471 495 472 496 for (i=0; i<ice->comp_cnt; ++i) { … … 477 501 } 478 502 479 if (ice->clist.timer.id) { 480 pj_timer_heap_cancel(ice->stun_cfg.timer_heap, &ice->clist.timer); 481 ice->clist.timer.id = PJ_FALSE; 482 } 483 484 if (ice->mutex) { 485 pj_mutex_destroy(ice->mutex); 486 ice->mutex = NULL; 487 } 488 489 if (ice->pool) { 490 pj_pool_t *pool = ice->pool; 491 ice->pool = NULL; 492 pj_pool_release(pool); 493 } 503 pj_timer_heap_cancel_if_active(ice->stun_cfg.timer_heap, 504 &ice->clist.timer, 505 PJ_FALSE); 506 507 pj_grp_lock_dec_ref(ice->grp_lock); 508 pj_grp_lock_release(ice->grp_lock); 494 509 } 495 510 … … 710 725 PJ_ASSERT_RETURN(comp_id <= ice->comp_cnt, PJ_EINVAL); 711 726 712 pj_ mutex_lock(ice->mutex);727 pj_grp_lock_acquire(ice->grp_lock); 713 728 714 729 if (ice->lcand_cnt >= PJ_ARRAY_SIZE(ice->lcand)) { … … 750 765 751 766 on_error: 752 pj_ mutex_unlock(ice->mutex);767 pj_grp_lock_release(ice->grp_lock); 753 768 return status; 754 769 } … … 767 782 *cand_id = -1; 768 783 769 pj_ mutex_lock(ice->mutex);784 pj_grp_lock_acquire(ice->grp_lock); 770 785 771 786 /* First find in valid list if we have nominated pair */ … … 775 790 if (check->lcand->comp_id == comp_id) { 776 791 *cand_id = GET_LCAND_ID(check->lcand); 777 pj_ mutex_unlock(ice->mutex);792 pj_grp_lock_release(ice->grp_lock); 778 793 return PJ_SUCCESS; 779 794 } … … 787 802 { 788 803 *cand_id = GET_LCAND_ID(lcand); 789 pj_ mutex_unlock(ice->mutex);804 pj_grp_lock_release(ice->grp_lock); 790 805 return PJ_SUCCESS; 791 806 } … … 800 815 { 801 816 *cand_id = GET_LCAND_ID(lcand); 802 pj_ mutex_unlock(ice->mutex);817 pj_grp_lock_release(ice->grp_lock); 803 818 return PJ_SUCCESS; 804 819 } … … 812 827 { 813 828 *cand_id = GET_LCAND_ID(lcand); 814 pj_ mutex_unlock(ice->mutex);829 pj_grp_lock_release(ice->grp_lock); 815 830 return PJ_SUCCESS; 816 831 } … … 818 833 819 834 /* Still no candidate is found! :( */ 820 pj_ mutex_unlock(ice->mutex);835 pj_grp_lock_release(ice->grp_lock); 821 836 822 837 pj_assert(!"Should have a candidate by now"); … … 1128 1143 pj_ice_sess *ice = (pj_ice_sess*) te->user_data; 1129 1144 enum timer_type type = (enum timer_type)te->id; 1130 pj_bool_t has_mutex = PJ_TRUE;1131 1145 1132 1146 PJ_UNUSED_ARG(th); 1133 1147 1134 pj_ mutex_lock(ice->mutex);1148 pj_grp_lock_acquire(ice->grp_lock); 1135 1149 1136 1150 te->id = TIMER_NONE; 1151 1152 if (ice->is_destroying) { 1153 /* Stray timer, could happen when destroy is invoked while callback 1154 * is pending. */ 1155 pj_grp_lock_release(ice->grp_lock); 1156 return; 1157 } 1137 1158 1138 1159 switch (type) { … … 1158 1179 ice_status = ice->ice_status; 1159 1180 on_ice_complete = ice->cb.on_ice_complete; 1160 has_mutex = PJ_FALSE;1161 pj_mutex_unlock(ice->mutex);1162 1181 1163 1182 /* Notify app about ICE completion*/ … … 1177 1196 } 1178 1197 1179 if (has_mutex) 1180 pj_mutex_unlock(ice->mutex); 1198 pj_grp_lock_release(ice->grp_lock); 1181 1199 } 1182 1200 … … 1236 1254 pj_time_val_normalize(&delay); 1237 1255 1238 ice->timer.id = TIMER_KEEP_ALIVE; 1239 pj_timer_heap_schedule(ice->stun_cfg.timer_heap, &ice->timer, &delay); 1256 pj_timer_heap_schedule_w_grp_lock(ice->stun_cfg.timer_heap, 1257 &ice->timer, &delay, 1258 TIMER_KEEP_ALIVE, 1259 ice->grp_lock); 1240 1260 1241 1261 } else { … … 1251 1271 ice->ice_status = status; 1252 1272 1253 if (ice->timer.id != TIMER_NONE) { 1254 pj_timer_heap_cancel(ice->stun_cfg.timer_heap, &ice->timer); 1255 ice->timer.id = TIMER_NONE; 1256 } 1273 pj_timer_heap_cancel_if_active(ice->stun_cfg.timer_heap, &ice->timer, 1274 TIMER_NONE); 1257 1275 1258 1276 /* Log message */ … … 1267 1285 pj_time_val delay = {0, 0}; 1268 1286 1269 ice->timer.id = TIMER_COMPLETION_CALLBACK; 1270 pj_timer_heap_schedule(ice->stun_cfg.timer_heap, 1271 &ice->timer, &delay); 1287 pj_timer_heap_schedule_w_grp_lock(ice->stun_cfg.timer_heap, 1288 &ice->timer, &delay, 1289 TIMER_COMPLETION_CALLBACK, 1290 ice->grp_lock); 1272 1291 } 1273 1292 } … … 1497 1516 pj_time_val_normalize(&delay); 1498 1517 1499 ice->timer.id = TIMER_CONTROLLED_WAIT_NOM; 1500 pj_timer_heap_schedule(ice->stun_cfg.timer_heap, 1501 &ice->timer, 1502 &delay); 1518 pj_timer_heap_schedule_w_grp_lock( 1519 ice->stun_cfg.timer_heap, 1520 &ice->timer, &delay, 1521 TIMER_CONTROLLED_WAIT_NOM, 1522 ice->grp_lock); 1503 1523 1504 1524 LOG5((ice->obj_name, … … 1576 1596 ice->opt.nominated_check_delay)); 1577 1597 1578 if (ice->timer.id != TIMER_NONE) { 1579 pj_timer_heap_cancel(ice->stun_cfg.timer_heap, &ice->timer); 1580 ice->timer.id = TIMER_NONE; 1581 } 1598 pj_timer_heap_cancel_if_active(ice->stun_cfg.timer_heap, &ice->timer, 1599 TIMER_NONE); 1582 1600 1583 1601 /* All components have valid pair. Let connectivity checks run for … … 1588 1606 pj_time_val_normalize(&delay); 1589 1607 1590 ice->timer.id = TIMER_START_NOMINATED_CHECK; 1591 pj_timer_heap_schedule(ice->stun_cfg.timer_heap, &ice->timer, &delay); 1608 pj_timer_heap_schedule_w_grp_lock(ice->stun_cfg.timer_heap, 1609 &ice->timer, &delay, 1610 TIMER_START_NOMINATED_CHECK, 1611 ice->grp_lock); 1592 1612 return PJ_FALSE; 1593 1613 } … … 1619 1639 PJ_ETOOMANY); 1620 1640 1621 pj_ mutex_lock(ice->mutex);1641 pj_grp_lock_acquire(ice->grp_lock); 1622 1642 1623 1643 /* Save credentials */ … … 1667 1687 1668 1688 if (clist->count >= PJ_ICE_MAX_CHECKS) { 1669 pj_ mutex_unlock(ice->mutex);1689 pj_grp_lock_release(ice->grp_lock); 1670 1690 return PJ_ETOOMANY; 1671 1691 } … … 1695 1715 if (clist->count == 0) { 1696 1716 LOG4((ice->obj_name, "Error: no checklist can be created")); 1697 pj_ mutex_unlock(ice->mutex);1717 pj_grp_lock_release(ice->grp_lock); 1698 1718 return PJ_ENOTFOUND; 1699 1719 } … … 1705 1725 status = prune_checklist(ice, clist); 1706 1726 if (status != PJ_SUCCESS) { 1707 pj_ mutex_unlock(ice->mutex);1727 pj_grp_lock_release(ice->grp_lock); 1708 1728 return status; 1709 1729 } … … 1732 1752 dump_checklist("Checklist created:", ice, clist); 1733 1753 1734 pj_ mutex_unlock(ice->mutex);1754 pj_grp_lock_release(ice->grp_lock); 1735 1755 1736 1756 return PJ_SUCCESS; … … 1851 1871 clist = td->clist; 1852 1872 1853 if (ice->is_destroying) 1854 return PJ_SUCCESS; 1855 1856 pj_mutex_lock(ice->mutex); 1873 pj_grp_lock_acquire(ice->grp_lock); 1857 1874 1858 1875 if (ice->is_destroying) { 1859 pj_ mutex_unlock(ice->mutex);1876 pj_grp_lock_release(ice->grp_lock); 1860 1877 return PJ_SUCCESS; 1861 1878 } … … 1879 1896 status = perform_check(ice, clist, i, ice->is_nominating); 1880 1897 if (status != PJ_SUCCESS) { 1881 pj_ mutex_unlock(ice->mutex);1898 pj_grp_lock_release(ice->grp_lock); 1882 1899 pj_log_pop_indent(); 1883 1900 return status; … … 1899 1916 status = perform_check(ice, clist, i, ice->is_nominating); 1900 1917 if (status != PJ_SUCCESS) { 1901 pj_ mutex_unlock(ice->mutex);1918 pj_grp_lock_release(ice->grp_lock); 1902 1919 pj_log_pop_indent(); 1903 1920 return status; … … 1916 1933 pj_time_val timeout = {0, PJ_ICE_TA_VAL}; 1917 1934 1918 te->id = PJ_TRUE;1919 1935 pj_time_val_normalize(&timeout); 1920 pj_timer_heap_schedule(th, te, &timeout); 1921 } 1922 1923 pj_mutex_unlock(ice->mutex); 1936 pj_timer_heap_schedule_w_grp_lock(th, te, &timeout, PJ_TRUE, 1937 ice->grp_lock); 1938 } 1939 1940 pj_grp_lock_release(ice->grp_lock); 1924 1941 pj_log_pop_indent(); 1925 1942 return PJ_SUCCESS; … … 1941 1958 /* Stop our timer if it's active */ 1942 1959 if (ice->timer.id == TIMER_START_NOMINATED_CHECK) { 1943 pj_timer_heap_cancel (ice->stun_cfg.timer_heap, &ice->timer);1944 ice->timer.id = TIMER_NONE;1960 pj_timer_heap_cancel_if_active(ice->stun_cfg.timer_heap, &ice->timer, 1961 TIMER_NONE); 1945 1962 } 1946 1963 … … 1970 1987 1971 1988 /* And (re)start the periodic check */ 1972 if (ice->clist.timer.id) { 1973 pj_timer_heap_cancel(ice->stun_cfg.timer_heap, &ice->clist.timer); 1974 ice->clist.timer.id = PJ_FALSE; 1975 } 1976 1977 ice->clist.timer.id = PJ_TRUE; 1989 pj_timer_heap_cancel_if_active(ice->stun_cfg.timer_heap, 1990 &ice->clist.timer, PJ_FALSE); 1991 1978 1992 delay.sec = delay.msec = 0; 1979 status = pj_timer_heap_schedule (ice->stun_cfg.timer_heap,1980 &ice->clist.timer, &delay); 1981 if (status != PJ_SUCCESS) {1982 ice->clist.timer.id = PJ_FALSE;1983 } else{1993 status = pj_timer_heap_schedule_w_grp_lock(ice->stun_cfg.timer_heap, 1994 &ice->clist.timer, &delay, 1995 PJ_TRUE, 1996 ice->grp_lock); 1997 if (status == PJ_SUCCESS) { 1984 1998 LOG5((ice->obj_name, "Periodic timer rescheduled..")); 1985 1999 } … … 2031 2045 2032 2046 /* Lock session */ 2033 pj_ mutex_lock(ice->mutex);2047 pj_grp_lock_acquire(ice->grp_lock); 2034 2048 2035 2049 LOG4((ice->obj_name, "Starting ICE check..")); … … 2061 2075 if (i == clist->count) { 2062 2076 pj_assert(!"Unable to find checklist for component 1"); 2063 pj_ mutex_unlock(ice->mutex);2077 pj_grp_lock_release(ice->grp_lock); 2064 2078 pj_log_pop_indent(); 2065 2079 return PJNATH_EICEINCOMPID; … … 2115 2129 * return start_periodic_check(ice->stun_cfg.timer_heap, &clist->timer); 2116 2130 */ 2117 clist->timer.id = PJ_TRUE;2118 2131 delay.sec = delay.msec = 0; 2119 status = pj_timer_heap_schedule(ice->stun_cfg.timer_heap, 2120 &clist->timer, &delay); 2132 status = pj_timer_heap_schedule_w_grp_lock(ice->stun_cfg.timer_heap, 2133 &clist->timer, &delay, 2134 PJ_TRUE, ice->grp_lock); 2121 2135 if (status != PJ_SUCCESS) { 2122 2136 clist->timer.id = PJ_FALSE; 2123 2137 } 2124 2138 2125 pj_ mutex_unlock(ice->mutex);2139 pj_grp_lock_release(ice->grp_lock); 2126 2140 pj_log_pop_indent(); 2127 2141 return status; … … 2144 2158 pj_ice_sess *ice = sd->ice; 2145 2159 pj_ice_msg_data *msg_data = (pj_ice_msg_data*) token; 2160 pj_status_t status; 2146 2161 2147 return (*ice->cb.on_tx_pkt)(ice, sd->comp_id, msg_data->transport_id, 2148 pkt, pkt_size, dst_addr, addr_len); 2162 pj_grp_lock_acquire(ice->grp_lock); 2163 2164 if (ice->is_destroying) { 2165 /* Stray retransmit timer that could happen while 2166 * we're being destroyed */ 2167 pj_grp_lock_release(ice->grp_lock); 2168 return PJ_EINVALIDOP; 2169 } 2170 2171 status = (*ice->cb.on_tx_pkt)(ice, sd->comp_id, msg_data->transport_id, 2172 pkt, pkt_size, dst_addr, addr_len); 2173 2174 pj_grp_lock_release(ice->grp_lock); 2175 return status; 2149 2176 } 2150 2177 … … 2181 2208 check->tdata = NULL; 2182 2209 2183 pj_mutex_lock(ice->mutex); 2210 pj_grp_lock_acquire(ice->grp_lock); 2211 2212 if (ice->is_destroying) { 2213 /* Not sure if this is possible but just in case */ 2214 pj_grp_lock_release(ice->grp_lock); 2215 return; 2216 } 2184 2217 2185 2218 /* Init lcand to NULL. lcand will be found from the mapped address … … 2232 2265 check->nominated || ice->is_nominating); 2233 2266 pj_log_pop_indent(); 2234 pj_ mutex_unlock(ice->mutex);2267 pj_grp_lock_release(ice->grp_lock); 2235 2268 return; 2236 2269 } … … 2247 2280 on_check_complete(ice, check); 2248 2281 pj_log_pop_indent(); 2249 pj_ mutex_unlock(ice->mutex);2282 pj_grp_lock_release(ice->grp_lock); 2250 2283 return; 2251 2284 } … … 2271 2304 on_check_complete(ice, check); 2272 2305 pj_log_pop_indent(); 2273 pj_ mutex_unlock(ice->mutex);2306 pj_grp_lock_release(ice->grp_lock); 2274 2307 return; 2275 2308 } … … 2304 2337 PJNATH_ESTUNNOMAPPEDADDR); 2305 2338 on_check_complete(ice, check); 2306 pj_ mutex_unlock(ice->mutex);2339 pj_grp_lock_release(ice->grp_lock); 2307 2340 return; 2308 2341 } … … 2352 2385 status); 2353 2386 on_check_complete(ice, check); 2354 pj_ mutex_unlock(ice->mutex);2387 pj_grp_lock_release(ice->grp_lock); 2355 2388 return; 2356 2389 } … … 2412 2445 if (on_check_complete(ice, check)) { 2413 2446 /* ICE complete! */ 2414 pj_ mutex_unlock(ice->mutex);2447 pj_grp_lock_release(ice->grp_lock); 2415 2448 return; 2416 2449 } 2417 2450 2418 pj_ mutex_unlock(ice->mutex);2451 pj_grp_lock_release(ice->grp_lock); 2419 2452 } 2420 2453 … … 2457 2490 ice = sd->ice; 2458 2491 2459 pj_mutex_lock(ice->mutex); 2492 pj_grp_lock_acquire(ice->grp_lock); 2493 2494 if (ice->is_destroying) { 2495 pj_grp_lock_release(ice->grp_lock); 2496 return PJ_EINVALIDOP; 2497 } 2460 2498 2461 2499 /* … … 2472 2510 if (prio_attr == NULL) { 2473 2511 LOG5((ice->obj_name, "Received Binding request with no PRIORITY")); 2474 pj_ mutex_unlock(ice->mutex);2512 pj_grp_lock_release(ice->grp_lock); 2475 2513 return PJ_SUCCESS; 2476 2514 } … … 2517 2555 NULL, token, PJ_TRUE, 2518 2556 src_addr, src_addr_len); 2519 pj_ mutex_unlock(ice->mutex);2557 pj_grp_lock_release(ice->grp_lock); 2520 2558 return PJ_SUCCESS; 2521 2559 } … … 2529 2567 NULL, token, PJ_TRUE, 2530 2568 src_addr, src_addr_len); 2531 pj_ mutex_unlock(ice->mutex);2569 pj_grp_lock_release(ice->grp_lock); 2532 2570 return PJ_SUCCESS; 2533 2571 } else { … … 2544 2582 status = pj_stun_session_create_res(sess, rdata, 0, NULL, &tdata); 2545 2583 if (status != PJ_SUCCESS) { 2546 pj_ mutex_unlock(ice->mutex);2584 pj_grp_lock_release(ice->grp_lock); 2547 2585 return status; 2548 2586 } … … 2596 2634 } 2597 2635 2598 pj_ mutex_unlock(ice->mutex);2636 pj_grp_lock_release(ice->grp_lock); 2599 2637 return PJ_SUCCESS; 2600 2638 } … … 2885 2923 } 2886 2924 2887 pj_mutex_lock(ice->mutex); 2925 pj_grp_lock_acquire(ice->grp_lock); 2926 2927 if (ice->is_destroying) { 2928 pj_grp_lock_release(ice->grp_lock); 2929 return PJ_EINVALIDOP; 2930 } 2888 2931 2889 2932 comp = find_comp(ice, comp_id); 2890 2933 if (comp == NULL) { 2891 2934 status = PJNATH_EICEINCOMPID; 2892 pj_ mutex_unlock(ice->mutex);2935 pj_grp_lock_release(ice->grp_lock); 2893 2936 goto on_return; 2894 2937 } … … 2896 2939 if (comp->valid_check == NULL) { 2897 2940 status = PJNATH_EICEINPROGRESS; 2898 pj_ mutex_unlock(ice->mutex);2941 pj_grp_lock_release(ice->grp_lock); 2899 2942 goto on_return; 2900 2943 } … … 2905 2948 2906 2949 /* Release the mutex now to avoid deadlock (see ticket #1451). */ 2907 pj_mutex_unlock(ice->mutex); 2950 pj_grp_lock_release(ice->grp_lock); 2951 2952 PJ_RACE_ME(5); 2908 2953 2909 2954 status = (*ice->cb.on_tx_pkt)(ice, comp_id, transport_id, … … 2932 2977 PJ_ASSERT_RETURN(ice, PJ_EINVAL); 2933 2978 2934 pj_mutex_lock(ice->mutex); 2979 pj_grp_lock_acquire(ice->grp_lock); 2980 2981 if (ice->is_destroying) { 2982 pj_grp_lock_release(ice->grp_lock); 2983 return PJ_EINVALIDOP; 2984 } 2935 2985 2936 2986 comp = find_comp(ice, comp_id); 2937 2987 if (comp == NULL) { 2938 pj_ mutex_unlock(ice->mutex);2988 pj_grp_lock_release(ice->grp_lock); 2939 2989 return PJNATH_EICEINCOMPID; 2940 2990 } … … 2949 2999 if (msg_data == NULL) { 2950 3000 pj_assert(!"Invalid transport ID"); 2951 pj_ mutex_unlock(ice->mutex);3001 pj_grp_lock_release(ice->grp_lock); 2952 3002 return PJ_EINVAL; 2953 3003 } … … 2969 3019 ice->tmp.errmsg)); 2970 3020 } 2971 pj_ mutex_unlock(ice->mutex);3021 pj_grp_lock_release(ice->grp_lock); 2972 3022 } else { 2973 3023 /* Not a STUN packet. Call application's callback instead, but release 2974 3024 * the mutex now or otherwise we may get deadlock. 2975 3025 */ 2976 pj_mutex_unlock(ice->mutex); 3026 pj_grp_lock_release(ice->grp_lock); 3027 3028 PJ_RACE_ME(5); 2977 3029 2978 3030 (*ice->cb.on_rx_data)(ice, comp_id, transport_id, pkt, pkt_size, -
pjproject/trunk/pjnath/src/pjnath/ice_strans.c
r4314 r4360 127 127 128 128 /* Forward decls */ 129 static void ice_st_on_destroy(void *obj); 129 130 static void destroy_ice_st(pj_ice_strans *ice_st); 130 131 #define ice_st_perror(ice_st,msg,rc) pjnath_perror(ice_st->obj_name,msg,rc) 131 132 static void sess_init_update(pj_ice_strans *ice_st); 132 133 static void sess_add_ref(pj_ice_strans *ice_st);134 static pj_bool_t sess_dec_ref(pj_ice_strans *ice_st);135 133 136 134 /** … … 173 171 pj_ice_strans_cfg cfg; /**< Configuration. */ 174 172 pj_ice_strans_cb cb; /**< Application callback. */ 175 pj_ lock_t *init_lock; /**< Initialization mutex.*/173 pj_grp_lock_t *grp_lock; /**< Group lock. */ 176 174 177 175 pj_ice_strans_state state; /**< Session state. */ … … 184 182 pj_timer_entry ka_timer; /**< STUN keep-alive timer. */ 185 183 186 pj_atomic_t *busy_cnt; /**< To prevent destroy */187 184 pj_bool_t destroy_req;/**< Destroy has been called? */ 188 185 pj_bool_t cb_called; /**< Init error callback called?*/ … … 552 549 pj_log_push_indent(); 553 550 554 pj_ice_strans_cfg_copy(pool, &ice_st->cfg, cfg); 555 pj_memcpy(&ice_st->cb, cb, sizeof(*cb)); 556 557 status = pj_atomic_create(pool, 0, &ice_st->busy_cnt); 551 status = pj_grp_lock_create(pool, NULL, &ice_st->grp_lock); 558 552 if (status != PJ_SUCCESS) { 559 destroy_ice_st(ice_st); 560 return status; 561 } 562 563 status = pj_lock_create_recursive_mutex(pool, ice_st->obj_name, 564 &ice_st->init_lock); 565 if (status != PJ_SUCCESS) { 566 destroy_ice_st(ice_st); 553 pj_pool_release(pool); 567 554 pj_log_pop_indent(); 568 555 return status; 569 556 } 557 558 pj_grp_lock_add_ref(ice_st->grp_lock); 559 pj_grp_lock_add_handler(ice_st->grp_lock, pool, ice_st, 560 &ice_st_on_destroy); 561 562 pj_ice_strans_cfg_copy(pool, &ice_st->cfg, cfg); 563 ice_st->cfg.stun.cfg.grp_lock = ice_st->grp_lock; 564 ice_st->cfg.turn.cfg.grp_lock = ice_st->grp_lock; 565 pj_memcpy(&ice_st->cb, cb, sizeof(*cb)); 570 566 571 567 ice_st->comp_cnt = comp_cnt; … … 579 575 * called before we finish initialization. 580 576 */ 581 pj_ lock_acquire(ice_st->init_lock);577 pj_grp_lock_acquire(ice_st->grp_lock); 582 578 583 579 for (i=0; i<comp_cnt; ++i) { 584 580 status = create_comp(ice_st, i+1); 585 581 if (status != PJ_SUCCESS) { 586 pj_ lock_release(ice_st->init_lock);582 pj_grp_lock_release(ice_st->grp_lock); 587 583 destroy_ice_st(ice_st); 588 584 pj_log_pop_indent(); … … 592 588 593 589 /* Done with initialization */ 594 pj_ lock_release(ice_st->init_lock);595 596 PJ_LOG(4,(ice_st->obj_name, "ICE stream transport created"));590 pj_grp_lock_release(ice_st->grp_lock); 591 592 PJ_LOG(4,(ice_st->obj_name, "ICE stream transport %p created", ice_st)); 597 593 598 594 *p_ice_st = ice_st; … … 606 602 } 607 603 604 /* REALLY destroy ICE */ 605 static void ice_st_on_destroy(void *obj) 606 { 607 pj_ice_strans *ice_st = (pj_ice_strans*)obj; 608 609 PJ_LOG(4,(ice_st->obj_name, "ICE stream transport %p destroyed", obj)); 610 611 /* Done */ 612 pj_pool_release(ice_st->pool); 613 } 614 608 615 /* Destroy ICE */ 609 616 static void destroy_ice_st(pj_ice_strans *ice_st) … … 611 618 unsigned i; 612 619 613 PJ_LOG(5,(ice_st->obj_name, "ICE stream transport destroying..")); 620 PJ_LOG(5,(ice_st->obj_name, "ICE stream transport %p destroy request..", 621 ice_st)); 614 622 pj_log_push_indent(); 623 624 pj_grp_lock_acquire(ice_st->grp_lock); 625 626 if (ice_st->destroy_req) { 627 pj_grp_lock_release(ice_st->grp_lock); 628 return; 629 } 630 631 ice_st->destroy_req = PJ_TRUE; 615 632 616 633 /* Destroy ICE if we have ICE */ … … 624 641 if (ice_st->comp[i]) { 625 642 if (ice_st->comp[i]->stun_sock) { 626 pj_stun_sock_set_user_data(ice_st->comp[i]->stun_sock, NULL);627 643 pj_stun_sock_destroy(ice_st->comp[i]->stun_sock); 628 644 ice_st->comp[i]->stun_sock = NULL; 629 645 } 630 646 if (ice_st->comp[i]->turn_sock) { 631 pj_turn_sock_set_user_data(ice_st->comp[i]->turn_sock, NULL);632 647 pj_turn_sock_destroy(ice_st->comp[i]->turn_sock); 633 648 ice_st->comp[i]->turn_sock = NULL; … … 635 650 } 636 651 } 637 ice_st->comp_cnt = 0; 638 639 /* Destroy mutex */ 640 if (ice_st->init_lock) { 641 pj_lock_acquire(ice_st->init_lock); 642 pj_lock_release(ice_st->init_lock); 643 pj_lock_destroy(ice_st->init_lock); 644 ice_st->init_lock = NULL; 645 } 646 647 /* Destroy reference counter */ 648 if (ice_st->busy_cnt) { 649 pj_assert(pj_atomic_get(ice_st->busy_cnt)==0); 650 pj_atomic_destroy(ice_st->busy_cnt); 651 ice_st->busy_cnt = NULL; 652 } 653 654 PJ_LOG(4,(ice_st->obj_name, "ICE stream transport destroyed")); 655 656 /* Done */ 657 pj_pool_release(ice_st->pool); 652 653 pj_grp_lock_dec_ref(ice_st->grp_lock); 654 pj_grp_lock_release(ice_st->grp_lock); 655 658 656 pj_log_pop_indent(); 659 657 } … … 740 738 PJ_DEF(pj_status_t) pj_ice_strans_destroy(pj_ice_strans *ice_st) 741 739 { 742 PJ_ASSERT_RETURN(ice_st, PJ_EINVAL); 743 744 sess_add_ref(ice_st); 745 ice_st->destroy_req = PJ_TRUE; 746 if (sess_dec_ref(ice_st)) { 747 PJ_LOG(5,(ice_st->obj_name, 748 "ICE strans object is busy, will destroy later")); 749 return PJ_EPENDING; 750 } 751 740 destroy_ice_st(ice_st); 752 741 return PJ_SUCCESS; 753 742 } 754 743 755 756 /*757 * Increment busy counter.758 */759 static void sess_add_ref(pj_ice_strans *ice_st)760 {761 pj_atomic_inc(ice_st->busy_cnt);762 }763 764 /*765 * Decrement busy counter. If the counter has reached zero and destroy766 * has been requested, destroy the object and return FALSE.767 */768 static pj_bool_t sess_dec_ref(pj_ice_strans *ice_st)769 {770 int count = pj_atomic_dec_and_get(ice_st->busy_cnt);771 pj_assert(count >= 0);772 if (count==0 && ice_st->destroy_req) {773 destroy_ice_st(ice_st);774 return PJ_FALSE;775 } else {776 return PJ_TRUE;777 }778 }779 744 780 745 /* … … 841 806 status = pj_ice_sess_create(&ice_st->cfg.stun_cfg, ice_st->obj_name, role, 842 807 ice_st->comp_cnt, &ice_cb, 843 local_ufrag, local_passwd, &ice_st->ice); 808 local_ufrag, local_passwd, 809 ice_st->grp_lock, 810 &ice_st->ice); 844 811 if (status != PJ_SUCCESS) 845 812 return status; … … 1256 1223 unsigned msec; 1257 1224 1258 sess_add_ref(ice_st);1225 pj_grp_lock_add_ref(ice_st->grp_lock); 1259 1226 1260 1227 pj_gettimeofday(&t); … … 1338 1305 } 1339 1306 1340 sess_dec_ref(ice_st);1307 pj_grp_lock_dec_ref(ice_st->grp_lock); 1341 1308 } 1342 1309 … … 1427 1394 ice_st = comp->ice_st; 1428 1395 1429 sess_add_ref(ice_st);1396 pj_grp_lock_add_ref(ice_st->grp_lock); 1430 1397 1431 1398 if (ice_st->ice == NULL) { … … 1452 1419 } 1453 1420 1454 return sess_dec_ref(ice_st);1421 return pj_grp_lock_dec_ref(ice_st->grp_lock) ? PJ_FALSE : PJ_TRUE; 1455 1422 } 1456 1423 … … 1483 1450 ice_st = comp->ice_st; 1484 1451 1485 sess_add_ref(ice_st);1452 pj_grp_lock_add_ref(ice_st->grp_lock); 1486 1453 1487 1454 /* Wait until initialization completes */ 1488 pj_ lock_acquire(ice_st->init_lock);1455 pj_grp_lock_acquire(ice_st->grp_lock); 1489 1456 1490 1457 /* Find the srflx cancidate */ … … 1496 1463 } 1497 1464 1498 pj_ lock_release(ice_st->init_lock);1465 pj_grp_lock_release(ice_st->grp_lock); 1499 1466 1500 1467 /* It is possible that we don't have srflx candidate even though this … … 1503 1470 */ 1504 1471 if (cand == NULL) { 1505 return sess_dec_ref(ice_st);1472 return pj_grp_lock_dec_ref(ice_st->grp_lock) ? PJ_FALSE : PJ_TRUE; 1506 1473 } 1507 1474 … … 1619 1586 } 1620 1587 1621 return sess_dec_ref(ice_st);1588 return pj_grp_lock_dec_ref(ice_st->grp_lock)? PJ_FALSE : PJ_TRUE; 1622 1589 } 1623 1590 … … 1638 1605 } 1639 1606 1640 sess_add_ref(comp->ice_st);1607 pj_grp_lock_add_ref(comp->ice_st->grp_lock); 1641 1608 1642 1609 if (comp->ice_st->ice == NULL) { … … 1665 1632 } 1666 1633 1667 sess_dec_ref(comp->ice_st);1634 pj_grp_lock_dec_ref(comp->ice_st->grp_lock); 1668 1635 } 1669 1636 … … 1687 1654 pj_log_push_indent(); 1688 1655 1689 sess_add_ref(comp->ice_st);1656 pj_grp_lock_add_ref(comp->ice_st->grp_lock); 1690 1657 1691 1658 if (new_state == PJ_TURN_STATE_READY) { … … 1701 1668 1702 1669 /* Wait until initialization completes */ 1703 pj_ lock_acquire(comp->ice_st->init_lock);1670 pj_grp_lock_acquire(comp->ice_st->grp_lock); 1704 1671 1705 1672 /* Find relayed candidate in the component */ … … 1712 1679 pj_assert(cand != NULL); 1713 1680 1714 pj_ lock_release(comp->ice_st->init_lock);1681 pj_grp_lock_release(comp->ice_st->grp_lock); 1715 1682 1716 1683 /* Update candidate */ … … 1745 1712 comp->turn_sock = NULL; 1746 1713 1747 /* Set session to fail if we're still initializing */ 1748 if (comp->ice_st->state < PJ_ICE_STRANS_STATE_READY) { 1749 sess_fail(comp->ice_st, PJ_ICE_STRANS_OP_INIT, 1750 "TURN allocation failed", info.last_status); 1751 } else if (comp->turn_err_cnt > 1) { 1752 sess_fail(comp->ice_st, PJ_ICE_STRANS_OP_KEEP_ALIVE, 1753 "TURN refresh failed", info.last_status); 1754 } else { 1755 PJ_PERROR(4,(comp->ice_st->obj_name, info.last_status, 1756 "Comp %d: TURN allocation failed, retrying", 1757 comp->comp_id)); 1758 add_update_turn(comp->ice_st, comp); 1759 } 1760 } 1761 1762 sess_dec_ref(comp->ice_st); 1714 /* Set session to fail on error. last_status PJ_SUCCESS means normal 1715 * deallocation, which should not trigger sess_fail as it may have 1716 * been initiated by ICE destroy 1717 */ 1718 if (info.last_status != PJ_SUCCESS) { 1719 if (comp->ice_st->state < PJ_ICE_STRANS_STATE_READY) { 1720 sess_fail(comp->ice_st, PJ_ICE_STRANS_OP_INIT, 1721 "TURN allocation failed", info.last_status); 1722 } else if (comp->turn_err_cnt > 1) { 1723 sess_fail(comp->ice_st, PJ_ICE_STRANS_OP_KEEP_ALIVE, 1724 "TURN refresh failed", info.last_status); 1725 } else { 1726 PJ_PERROR(4,(comp->ice_st->obj_name, info.last_status, 1727 "Comp %d: TURN allocation failed, retrying", 1728 comp->comp_id)); 1729 add_update_turn(comp->ice_st, comp); 1730 } 1731 } 1732 } 1733 1734 pj_grp_lock_dec_ref(comp->ice_st->grp_lock); 1763 1735 1764 1736 pj_log_pop_indent(); -
pjproject/trunk/pjnath/src/pjnath/nat_detect.c
r3553 r4360 308 308 sess_cb.on_send_msg = &on_send_msg; 309 309 status = pj_stun_session_create(stun_cfg, pool->obj_name, &sess_cb, 310 PJ_FALSE, &sess->stun_sess);310 PJ_FALSE, NULL, &sess->stun_sess); 311 311 if (status != PJ_SUCCESS) 312 312 goto on_error; -
pjproject/trunk/pjnath/src/pjnath/stun_session.c
r4352 r4360 26 26 pj_stun_config *cfg; 27 27 pj_pool_t *pool; 28 pj_lock_t *lock; 29 pj_bool_t delete_lock; 28 pj_grp_lock_t *grp_lock; 30 29 pj_stun_session_cb cb; 31 30 void *user_data; 32 33 pj_atomic_t *busy; 34 pj_bool_t destroy_request; 31 pj_bool_t is_destroying; 35 32 36 33 pj_bool_t use_fingerprint; … … 56 53 57 54 #define SNAME(s_) ((s_)->pool->obj_name) 58 59 #if PJ_LOG_MAX_LEVEL >= 5 55 #define THIS_FILE "stun_session.c" 56 57 #if 1 60 58 # define TRACE_(expr) PJ_LOG(5,expr) 61 59 #else … … 63 61 #endif 64 62 65 #define LOG_ERR_(sess,title,rc) pjnath_perror(sess->pool->obj_name,title,rc)63 #define LOG_ERR_(sess,title,rc) PJ_PERROR(3,(sess->pool->obj_name,rc,title)) 66 64 67 65 #define TDATA_POOL_SIZE PJNATH_POOL_LEN_STUN_TDATA … … 78 76 pj_size_t pkt_size); 79 77 static void stun_tsx_on_destroy(pj_stun_client_tsx *tsx); 78 static void stun_sess_on_destroy(void *comp); 80 79 81 80 static pj_stun_tsx_cb tsx_cb = … … 149 148 150 149 tdata = (pj_stun_tx_data*) pj_stun_client_tsx_get_data(tsx); 151 tsx_erase(tdata->sess, tdata); 152 153 pj_stun_client_tsx_destroy(tsx); 154 pj_pool_release(tdata->pool); 150 pj_stun_client_tsx_stop(tsx); 151 if (tdata) { 152 tsx_erase(tdata->sess, tdata); 153 pj_pool_release(tdata->pool); 154 } 155 156 TRACE_((THIS_FILE, "STUN transaction %p destroyed", tsx)); 155 157 } 156 158 157 159 static void destroy_tdata(pj_stun_tx_data *tdata, pj_bool_t force) 158 160 { 161 TRACE_((THIS_FILE, "tdata %p destroy request, force=%d, tsx=%p", tdata, 162 force, tdata->client_tsx)); 163 159 164 if (tdata->res_timer.id != PJ_FALSE) { 160 pj_timer_heap_cancel(tdata->sess->cfg->timer_heap, 161 &tdata->res_timer); 162 tdata->res_timer.id = PJ_FALSE; 165 pj_timer_heap_cancel_if_active(tdata->sess->cfg->timer_heap, 166 &tdata->res_timer, PJ_FALSE); 163 167 pj_list_erase(tdata); 164 168 } 165 169 166 170 if (force) { 171 pj_list_erase(tdata); 167 172 if (tdata->client_tsx) { 168 tsx_erase(tdata->sess, tdata);169 pj_stun_client_tsx_ destroy(tdata->client_tsx);173 pj_stun_client_tsx_stop(tdata->client_tsx); 174 pj_stun_client_tsx_set_data(tdata->client_tsx, NULL); 170 175 } 171 176 pj_pool_release(tdata->pool); … … 173 178 } else { 174 179 if (tdata->client_tsx) { 175 pj_time_val delay = {2, 0}; 180 /* "Probably" this is to absorb retransmission */ 181 pj_time_val delay = {0, 300}; 176 182 pj_stun_client_tsx_schedule_destroy(tdata->client_tsx, &delay); 177 183 … … 207 213 208 214 pj_list_erase(tdata); 209 pj_stun_msg_destroy_tdata(tdata->sess, tdata);215 destroy_tdata(tdata, PJ_FALSE); 210 216 } 211 217 … … 420 426 421 427 /* Lock the session and prevent user from destroying us in the callback */ 422 pj_atomic_inc(sess->busy); 423 pj_lock_acquire(sess->lock); 428 pj_grp_lock_acquire(sess->grp_lock); 429 if (sess->is_destroying) { 430 pj_stun_msg_destroy_tdata(sess, tdata); 431 pj_grp_lock_release(sess->grp_lock); 432 return; 433 } 424 434 425 435 /* Handle authentication challenge */ … … 435 445 * from the pending list too. 436 446 */ 437 pj_stun_msg_destroy_tdata(sess, tdata); 447 if (status == PJNATH_ESTUNTIMEDOUT) 448 destroy_tdata(tdata, PJ_TRUE); 449 else 450 destroy_tdata(tdata, PJ_FALSE); 438 451 tdata = NULL; 439 452 440 pj_lock_release(sess->lock); 441 442 if (pj_atomic_dec_and_get(sess->busy)==0 && sess->destroy_request) { 443 pj_stun_session_destroy(sess); 444 return; 445 } 453 pj_grp_lock_release(sess->grp_lock); 446 454 } 447 455 … … 458 466 459 467 /* Lock the session and prevent user from destroying us in the callback */ 460 pj_atomic_inc(sess->busy); 461 pj_lock_acquire(sess->lock); 468 pj_grp_lock_acquire(sess->grp_lock); 462 469 470 if (sess->is_destroying) { 471 /* Stray timer */ 472 pj_grp_lock_release(sess->grp_lock); 473 return PJ_EINVALIDOP; 474 } 475 463 476 status = sess->cb.on_send_msg(tdata->sess, tdata->token, stun_pkt, 464 477 pkt_size, tdata->dst_addr, 465 478 tdata->addr_len); 466 pj_lock_release(sess->lock); 467 468 if (pj_atomic_dec_and_get(sess->busy)==0 && sess->destroy_request) { 469 pj_stun_session_destroy(sess); 470 return PJNATH_ESTUNDESTROYED; 471 } else { 472 return status; 473 } 479 if (pj_grp_lock_release(sess->grp_lock)) 480 return PJ_EGONE; 481 482 return status; 474 483 } 475 484 … … 480 489 const pj_stun_session_cb *cb, 481 490 pj_bool_t fingerprint, 491 pj_grp_lock_t *grp_lock, 482 492 pj_stun_session **p_sess) 483 493 { … … 502 512 sess->log_flag = 0xFFFF; 503 513 514 if (grp_lock) { 515 sess->grp_lock = grp_lock; 516 } else { 517 status = pj_grp_lock_create(pool, NULL, &sess->grp_lock); 518 if (status != PJ_SUCCESS) { 519 pj_pool_release(pool); 520 return status; 521 } 522 } 523 524 pj_grp_lock_add_ref(sess->grp_lock); 525 pj_grp_lock_add_handler(sess->grp_lock, pool, sess, 526 &stun_sess_on_destroy); 527 504 528 pj_stun_session_set_software_name(sess, &cfg->software_name); 505 529 506 sess->rx_pool = pj_pool_create(sess->cfg->pf, name, 507 PJNATH_POOL_LEN_STUN_TDATA, 530 sess->rx_pool = pj_pool_create(sess->cfg->pf, name, 531 PJNATH_POOL_LEN_STUN_TDATA, 508 532 PJNATH_POOL_INC_STUN_TDATA, NULL); 509 533 … … 511 535 pj_list_init(&sess->cached_response_list); 512 536 513 status = pj_lock_create_recursive_mutex(pool, name, &sess->lock);514 if (status != PJ_SUCCESS) {515 pj_pool_release(pool);516 return status;517 }518 sess->delete_lock = PJ_TRUE;519 520 status = pj_atomic_create(pool, 0, &sess->busy);521 if (status != PJ_SUCCESS) {522 pj_lock_destroy(sess->lock);523 pj_pool_release(pool);524 return status;525 }526 527 537 *p_sess = sess; 528 538 … … 530 540 } 531 541 532 PJ_DEF(pj_status_t) pj_stun_session_destroy(pj_stun_session *sess) 533 { 534 PJ_ASSERT_RETURN(sess, PJ_EINVAL); 535 536 pj_lock_acquire(sess->lock); 537 538 /* Can't destroy if we're in a callback */ 539 sess->destroy_request = PJ_TRUE; 540 if (pj_atomic_get(sess->busy)) { 541 pj_lock_release(sess->lock); 542 return PJ_EPENDING; 543 } 542 static void stun_sess_on_destroy(void *comp) 543 { 544 pj_stun_session *sess = (pj_stun_session*)comp; 544 545 545 546 while (!pj_list_empty(&sess->pending_request_list)) { … … 552 553 destroy_tdata(tdata, PJ_TRUE); 553 554 } 554 pj_lock_release(sess->lock);555 556 if (sess->delete_lock) {557 pj_lock_destroy(sess->lock);558 }559 555 560 556 if (sess->rx_pool) { … … 565 561 pj_pool_release(sess->pool); 566 562 563 TRACE_((THIS_FILE, "STUN session %p destroyed", sess)); 564 } 565 566 PJ_DEF(pj_status_t) pj_stun_session_destroy(pj_stun_session *sess) 567 { 568 pj_stun_tx_data *tdata; 569 570 PJ_ASSERT_RETURN(sess, PJ_EINVAL); 571 572 TRACE_((SNAME(sess), "STUN session %p destroy request, ref_cnt=%d", 573 sess, pj_grp_lock_get_ref(sess->grp_lock))); 574 575 pj_grp_lock_acquire(sess->grp_lock); 576 577 if (sess->is_destroying) { 578 /* Prevent from decrementing the ref counter more than once */ 579 pj_grp_lock_release(sess->grp_lock); 580 return PJ_EINVALIDOP; 581 } 582 583 sess->is_destroying = PJ_TRUE; 584 585 /* We need to stop transactions and cached response because they are 586 * holding the group lock's reference counter while retransmitting. 587 */ 588 tdata = sess->pending_request_list.next; 589 while (tdata != &sess->pending_request_list) { 590 if (tdata->client_tsx) 591 pj_stun_client_tsx_stop(tdata->client_tsx); 592 tdata = tdata->next; 593 } 594 595 tdata = sess->cached_response_list.next; 596 while (tdata != &sess->cached_response_list) { 597 pj_timer_heap_cancel_if_active(tdata->sess->cfg->timer_heap, 598 &tdata->res_timer, PJ_FALSE); 599 tdata = tdata->next; 600 } 601 602 pj_grp_lock_dec_ref(sess->grp_lock); 603 pj_grp_lock_release(sess->grp_lock); 567 604 return PJ_SUCCESS; 568 605 } … … 573 610 { 574 611 PJ_ASSERT_RETURN(sess, PJ_EINVAL); 575 pj_ lock_acquire(sess->lock);612 pj_grp_lock_acquire(sess->grp_lock); 576 613 sess->user_data = user_data; 577 pj_ lock_release(sess->lock);614 pj_grp_lock_release(sess->grp_lock); 578 615 return PJ_SUCCESS; 579 616 } … … 585 622 } 586 623 587 PJ_DEF(pj_status_t) pj_stun_session_set_lock( pj_stun_session *sess,588 pj_lock_t *lock,589 pj_bool_t auto_del)590 {591 pj_lock_t *old_lock = sess->lock;592 pj_bool_t old_del;593 594 PJ_ASSERT_RETURN(sess && lock, PJ_EINVAL);595 596 pj_lock_acquire(old_lock);597 sess->lock = lock;598 old_del = sess->delete_lock;599 sess->delete_lock = auto_del;600 pj_lock_release(old_lock);601 602 if (old_lock)603 pj_lock_destroy(old_lock);604 605 return PJ_SUCCESS;606 }607 608 624 PJ_DEF(pj_status_t) pj_stun_session_set_software_name(pj_stun_session *sess, 609 625 const pj_str_t *sw) 610 626 { 611 627 PJ_ASSERT_RETURN(sess, PJ_EINVAL); 628 pj_grp_lock_acquire(sess->grp_lock); 612 629 if (sw && sw->slen) 613 630 pj_strdup(sess->pool, &sess->srv_name, sw); 614 631 else 615 632 sess->srv_name.slen = 0; 633 pj_grp_lock_release(sess->grp_lock); 616 634 return PJ_SUCCESS; 617 635 } … … 623 641 PJ_ASSERT_RETURN(sess, PJ_EINVAL); 624 642 643 pj_grp_lock_acquire(sess->grp_lock); 625 644 sess->auth_type = auth_type; 626 645 if (cred) { … … 630 649 pj_bzero(&sess->cred, sizeof(sess->cred)); 631 650 } 651 pj_grp_lock_release(sess->grp_lock); 632 652 633 653 return PJ_SUCCESS; … … 706 726 PJ_ASSERT_RETURN(sess && p_tdata, PJ_EINVAL); 707 727 728 pj_grp_lock_acquire(sess->grp_lock); 729 if (sess->is_destroying) { 730 pj_grp_lock_release(sess->grp_lock); 731 return PJ_EINVALIDOP; 732 } 733 708 734 status = create_tdata(sess, &tdata); 709 735 if (status != PJ_SUCCESS) 710 return status;736 goto on_error; 711 737 712 738 /* Create STUN message */ 713 739 status = pj_stun_msg_create(tdata->pool, method, magic, 714 740 tsx_id, &tdata->msg); 715 if (status != PJ_SUCCESS) { 716 pj_pool_release(tdata->pool); 717 return status; 718 } 741 if (status != PJ_SUCCESS) 742 goto on_error; 719 743 720 744 /* copy the request's transaction ID as the transaction key. */ … … 732 756 /* MUST put authentication in request */ 733 757 status = get_auth(sess, tdata); 734 if (status != PJ_SUCCESS) { 735 pj_pool_release(tdata->pool); 736 return status; 737 } 758 if (status != PJ_SUCCESS) 759 goto on_error; 738 760 739 761 } else if (sess->auth_type == PJ_STUN_AUTH_LONG_TERM) { … … 743 765 if (sess->next_nonce.slen != 0) { 744 766 status = get_auth(sess, tdata); 745 if (status != PJ_SUCCESS) { 746 pj_pool_release(tdata->pool); 747 return status; 748 } 767 if (status != PJ_SUCCESS) 768 goto on_error; 749 769 tdata->auth_info.nonce = sess->next_nonce; 750 770 tdata->auth_info.realm = sess->server_realm; … … 753 773 } else { 754 774 pj_assert(!"Invalid authentication type"); 775 status = PJ_EBUG; 776 goto on_error; 777 } 778 779 *p_tdata = tdata; 780 pj_grp_lock_release(sess->grp_lock); 781 return PJ_SUCCESS; 782 783 on_error: 784 if (tdata) 755 785 pj_pool_release(tdata->pool); 756 return PJ_EBUG; 757 } 758 759 *p_tdata = tdata; 760 return PJ_SUCCESS; 786 pj_grp_lock_release(sess->grp_lock); 787 return status; 761 788 } 762 789 … … 770 797 PJ_ASSERT_RETURN(sess && p_tdata, PJ_EINVAL); 771 798 799 pj_grp_lock_acquire(sess->grp_lock); 800 if (sess->is_destroying) { 801 pj_grp_lock_release(sess->grp_lock); 802 return PJ_EINVALIDOP; 803 } 804 772 805 status = create_tdata(sess, &tdata); 773 if (status != PJ_SUCCESS) 806 if (status != PJ_SUCCESS) { 807 pj_grp_lock_release(sess->grp_lock); 774 808 return status; 809 } 775 810 776 811 /* Create STUN message */ … … 780 815 if (status != PJ_SUCCESS) { 781 816 pj_pool_release(tdata->pool); 817 pj_grp_lock_release(sess->grp_lock); 782 818 return status; 783 819 } 784 820 785 821 *p_tdata = tdata; 822 823 pj_grp_lock_release(sess->grp_lock); 786 824 return PJ_SUCCESS; 787 825 } … … 799 837 pj_stun_tx_data *tdata = NULL; 800 838 839 pj_grp_lock_acquire(sess->grp_lock); 840 if (sess->is_destroying) { 841 pj_grp_lock_release(sess->grp_lock); 842 return PJ_EINVALIDOP; 843 } 844 801 845 status = create_tdata(sess, &tdata); 802 if (status != PJ_SUCCESS) 846 if (status != PJ_SUCCESS) { 847 pj_grp_lock_release(sess->grp_lock); 803 848 return status; 849 } 804 850 805 851 /* Create STUN response message */ … … 808 854 if (status != PJ_SUCCESS) { 809 855 pj_pool_release(tdata->pool); 856 pj_grp_lock_release(sess->grp_lock); 810 857 return status; 811 858 } … … 821 868 822 869 *p_tdata = tdata; 870 871 pj_grp_lock_release(sess->grp_lock); 823 872 824 873 return PJ_SUCCESS; … … 868 917 PJ_ASSERT_RETURN(sess && addr_len && server && tdata, PJ_EINVAL); 869 918 919 /* Lock the session and prevent user from destroying us in the callback */ 920 pj_grp_lock_acquire(sess->grp_lock); 921 if (sess->is_destroying) { 922 pj_grp_lock_release(sess->grp_lock); 923 return PJ_EINVALIDOP; 924 } 925 870 926 pj_log_push_indent(); 871 927 … … 876 932 tdata->token = token; 877 933 tdata->retransmit = retransmit; 878 879 /* Lock the session and prevent user from destroying us in the callback */880 pj_atomic_inc(sess->busy);881 pj_lock_acquire(sess->lock);882 934 883 935 /* Apply options */ … … 910 962 911 963 /* Create STUN client transaction */ 912 status = pj_stun_client_tsx_create(sess->cfg, tdata->pool, 964 status = pj_stun_client_tsx_create(sess->cfg, tdata->pool, 965 sess->grp_lock, 913 966 &tsx_cb, &tdata->client_tsx); 914 967 PJ_ASSERT_RETURN(status==PJ_SUCCESS, status); … … 940 993 941 994 pj_memset(&tdata->res_timer, 0, sizeof(tdata->res_timer)); 942 pj_timer_entry_init(&tdata->res_timer, PJ_ TRUE, tdata,995 pj_timer_entry_init(&tdata->res_timer, PJ_FALSE, tdata, 943 996 &on_cache_timeout); 944 997 … … 946 999 timeout.msec = sess->cfg->res_cache_msec % 1000; 947 1000 948 status = pj_timer_heap_schedule(sess->cfg->timer_heap, 949 &tdata->res_timer, 950 &timeout); 1001 status = pj_timer_heap_schedule_w_grp_lock(sess->cfg->timer_heap, 1002 &tdata->res_timer, 1003 &timeout, PJ_TRUE, 1004 sess->grp_lock); 951 1005 if (status != PJ_SUCCESS) { 952 tdata->res_timer.id = PJ_FALSE;953 1006 pj_stun_msg_destroy_tdata(sess, tdata); 954 1007 LOG_ERR_(sess, "Error scheduling response timer", status); … … 976 1029 977 1030 on_return: 978 pj_lock_release(sess->lock);979 980 1031 pj_log_pop_indent(); 981 1032 982 /* Check if application has called destroy() in the callback */ 983 if (pj_atomic_dec_and_get(sess->busy)==0 && sess->destroy_request) { 984 pj_stun_session_destroy(sess); 985 return PJNATH_ESTUNDESTROYED; 986 } 1033 if (pj_grp_lock_release(sess->grp_lock)) 1034 return PJ_EGONE; 987 1035 988 1036 return status; … … 1006 1054 pj_stun_tx_data *tdata; 1007 1055 1056 pj_grp_lock_acquire(sess->grp_lock); 1057 if (sess->is_destroying) { 1058 pj_grp_lock_release(sess->grp_lock); 1059 return PJ_EINVALIDOP; 1060 } 1061 1008 1062 status = pj_stun_session_create_res(sess, rdata, code, 1009 1063 (errmsg?pj_cstr(&reason,errmsg):NULL), 1010 1064 &tdata); 1011 if (status != PJ_SUCCESS) 1065 if (status != PJ_SUCCESS) { 1066 pj_grp_lock_release(sess->grp_lock); 1012 1067 return status; 1013 1014 return pj_stun_session_send_msg(sess, token, cache, PJ_FALSE, 1015 dst_addr, addr_len, tdata); 1068 } 1069 1070 status = pj_stun_session_send_msg(sess, token, cache, PJ_FALSE, 1071 dst_addr, addr_len, tdata); 1072 1073 pj_grp_lock_release(sess->grp_lock); 1074 return status; 1016 1075 } 1017 1076 … … 1030 1089 1031 1090 /* Lock the session and prevent user from destroying us in the callback */ 1032 pj_atomic_inc(sess->busy); 1033 pj_lock_acquire(sess->lock); 1091 pj_grp_lock_acquire(sess->grp_lock); 1092 if (sess->is_destroying) { 1093 pj_grp_lock_release(sess->grp_lock); 1094 return PJ_EINVALIDOP; 1095 } 1034 1096 1035 1097 if (notify) { … … 1041 1103 pj_stun_msg_destroy_tdata(sess, tdata); 1042 1104 1043 pj_lock_release(sess->lock); 1044 1045 if (pj_atomic_dec_and_get(sess->busy)==0 && sess->destroy_request) { 1046 pj_stun_session_destroy(sess); 1047 return PJNATH_ESTUNDESTROYED; 1048 } 1105 pj_grp_lock_release(sess->grp_lock); 1049 1106 1050 1107 return PJ_SUCCESS; … … 1064 1121 1065 1122 /* Lock the session and prevent user from destroying us in the callback */ 1066 pj_atomic_inc(sess->busy); 1067 pj_lock_acquire(sess->lock); 1123 pj_grp_lock_acquire(sess->grp_lock); 1124 if (sess->is_destroying) { 1125 pj_grp_lock_release(sess->grp_lock); 1126 return PJ_EINVALIDOP; 1127 } 1068 1128 1069 1129 status = pj_stun_client_tsx_retransmit(tdata->client_tsx, mod_count); 1070 1130 1071 pj_lock_release(sess->lock); 1072 1073 if (pj_atomic_dec_and_get(sess->busy)==0 && sess->destroy_request) { 1074 pj_stun_session_destroy(sess); 1075 return PJNATH_ESTUNDESTROYED; 1076 } 1131 pj_grp_lock_release(sess->grp_lock); 1077 1132 1078 1133 return status; … … 1362 1417 PJ_ASSERT_RETURN(sess && packet && pkt_size, PJ_EINVAL); 1363 1418 1419 /* Lock the session and prevent user from destroying us in the callback */ 1420 pj_grp_lock_acquire(sess->grp_lock); 1421 1422 if (sess->is_destroying) { 1423 pj_grp_lock_release(sess->grp_lock); 1424 return PJ_EINVALIDOP; 1425 } 1426 1364 1427 pj_log_push_indent(); 1365 1366 /* Lock the session and prevent user from destroying us in the callback */1367 pj_atomic_inc(sess->busy);1368 pj_lock_acquire(sess->lock);1369 1428 1370 1429 /* Reset pool */ … … 1419 1478 1420 1479 on_return: 1421 pj_lock_release(sess->lock);1422 1423 1480 pj_log_pop_indent(); 1424 1481 1425 /* If we've received destroy request while we're on the callback, 1426 * destroy the session now. 1427 */ 1428 if (pj_atomic_dec_and_get(sess->busy)==0 && sess->destroy_request) { 1429 pj_stun_session_destroy(sess); 1430 return PJNATH_ESTUNDESTROYED; 1431 } 1482 if (pj_grp_lock_release(sess->grp_lock)) 1483 return PJ_EGONE; 1432 1484 1433 1485 return status; -
pjproject/trunk/pjnath/src/pjnath/stun_sock.c
r4344 r4360 29 29 #include <pj/ip_helper.h> 30 30 #include <pj/log.h> 31 #include <pj/os.h> 31 32 #include <pj/pool.h> 32 33 #include <pj/rand.h> 33 34 35 #if 1 36 # define TRACE_(x) PJ_LOG(5,x) 37 #else 38 # define TRACE_(x) 39 #endif 34 40 35 41 enum { MAX_BIND_RETRY = 100 }; … … 40 46 pj_pool_t *pool; /* Pool */ 41 47 void *user_data; /* Application user data */ 42 48 pj_bool_t is_destroying; /* Destroy already called */ 43 49 int af; /* Address family */ 44 50 pj_stun_config stun_cfg; /* STUN config (ioqueue etc)*/ … … 59 65 pj_uint16_t tsx_id[6]; /* .. to match STUN msg */ 60 66 pj_stun_session *stun_sess; /* STUN session */ 61 67 pj_grp_lock_t *grp_lock; /* Session group lock */ 62 68 }; 63 69 … … 65 71 * Prototypes for static functions 66 72 */ 73 74 /* Destructor for group lock */ 75 static void stun_sock_destructor(void *obj); 67 76 68 77 /* This callback is called by the STUN session to send packet */ … … 203 212 stun_sock->ka_interval = PJ_STUN_KEEP_ALIVE_SEC; 204 213 214 if (cfg && cfg->grp_lock) { 215 stun_sock->grp_lock = cfg->grp_lock; 216 } else { 217 status = pj_grp_lock_create(pool, NULL, &stun_sock->grp_lock); 218 if (status != PJ_SUCCESS) { 219 pj_pool_release(pool); 220 return status; 221 } 222 } 223 224 pj_grp_lock_add_ref(stun_sock->grp_lock); 225 pj_grp_lock_add_handler(stun_sock->grp_lock, pool, stun_sock, 226 &stun_sock_destructor); 227 205 228 /* Create socket and bind socket */ 206 229 status = pj_sock_socket(af, pj_SOCK_DGRAM(), 0, &stun_sock->sock_fd); … … 253 276 254 277 pj_activesock_cfg_default(&activesock_cfg); 278 activesock_cfg.grp_lock = stun_sock->grp_lock; 255 279 activesock_cfg.async_cnt = cfg->async_cnt; 256 280 activesock_cfg.concurrency = 0; … … 291 315 stun_sock->obj_name, 292 316 &sess_cb, PJ_FALSE, 317 stun_sock->grp_lock, 293 318 &stun_sock->stun_sess); 294 319 if (status != PJ_SUCCESS) … … 333 358 PJ_ASSERT_RETURN(stun_sock && domain && default_port, PJ_EINVAL); 334 359 360 pj_grp_lock_acquire(stun_sock->grp_lock); 361 335 362 /* Check whether the domain contains IP address */ 336 363 stun_sock->srv_addr.addr.sa_family = (pj_uint16_t)stun_sock->af; … … 361 388 362 389 /* Processing will resume when the DNS SRV callback is called */ 363 return status;364 390 365 391 } else { … … 379 405 380 406 /* Start sending Binding request */ 381 return get_mapped_addr(stun_sock); 382 } 383 } 384 385 /* Destroy */ 386 PJ_DEF(pj_status_t) pj_stun_sock_destroy(pj_stun_sock *stun_sock) 387 { 407 status = get_mapped_addr(stun_sock); 408 } 409 410 pj_grp_lock_release(stun_sock->grp_lock); 411 return status; 412 } 413 414 /* Destructor */ 415 static void stun_sock_destructor(void *obj) 416 { 417 pj_stun_sock *stun_sock = (pj_stun_sock*)obj; 418 388 419 if (stun_sock->q) { 389 420 pj_dns_srv_cancel_query(stun_sock->q, PJ_FALSE); … … 391 422 } 392 423 393 if (stun_sock->stun_sess) { 394 pj_stun_session_set_user_data(stun_sock->stun_sess, NULL); 395 } 396 397 /* Destroy the active socket first just in case we'll get 398 * stray callback. 399 */ 400 if (stun_sock->active_sock != NULL) { 401 pj_activesock_t *asock = stun_sock->active_sock; 402 stun_sock->active_sock = NULL; 403 stun_sock->sock_fd = PJ_INVALID_SOCKET; 404 pj_activesock_set_user_data(asock, NULL); 405 pj_activesock_close(asock); 406 } else if (stun_sock->sock_fd != PJ_INVALID_SOCKET) { 407 pj_sock_close(stun_sock->sock_fd); 408 stun_sock->sock_fd = PJ_INVALID_SOCKET; 409 } 410 411 if (stun_sock->ka_timer.id != 0) { 412 pj_timer_heap_cancel(stun_sock->stun_cfg.timer_heap, 413 &stun_sock->ka_timer); 414 stun_sock->ka_timer.id = 0; 415 } 416 424 /* 417 425 if (stun_sock->stun_sess) { 418 426 pj_stun_session_destroy(stun_sock->stun_sess); 419 427 stun_sock->stun_sess = NULL; 420 428 } 429 */ 421 430 422 431 if (stun_sock->pool) { … … 426 435 } 427 436 437 TRACE_(("", "STUN sock %p destroyed", stun_sock)); 438 439 } 440 441 /* Destroy */ 442 PJ_DEF(pj_status_t) pj_stun_sock_destroy(pj_stun_sock *stun_sock) 443 { 444 TRACE_((stun_sock->obj_name, "STUN sock %p request, ref_cnt=%d", 445 stun_sock, pj_grp_lock_get_ref(stun_sock->grp_lock))); 446 447 pj_grp_lock_acquire(stun_sock->grp_lock); 448 if (stun_sock->is_destroying) { 449 /* Destroy already called */ 450 pj_grp_lock_release(stun_sock->grp_lock); 451 return PJ_EINVALIDOP; 452 } 453 454 stun_sock->is_destroying = PJ_TRUE; 455 pj_timer_heap_cancel_if_active(stun_sock->stun_cfg.timer_heap, 456 &stun_sock->ka_timer, 0); 457 458 if (stun_sock->active_sock != NULL) { 459 stun_sock->sock_fd = PJ_INVALID_SOCKET; 460 pj_activesock_close(stun_sock->active_sock); 461 } else if (stun_sock->sock_fd != PJ_INVALID_SOCKET) { 462 pj_sock_close(stun_sock->sock_fd); 463 stun_sock->sock_fd = PJ_INVALID_SOCKET; 464 } 465 466 if (stun_sock->stun_sess) { 467 pj_stun_session_destroy(stun_sock->stun_sess); 468 } 469 pj_grp_lock_dec_ref(stun_sock->grp_lock); 470 pj_grp_lock_release(stun_sock->grp_lock); 428 471 return PJ_SUCCESS; 429 472 } … … 469 512 pj_stun_sock *stun_sock = (pj_stun_sock*) user_data; 470 513 514 pj_grp_lock_acquire(stun_sock->grp_lock); 515 471 516 /* Clear query */ 472 517 stun_sock->q = NULL; … … 475 520 if (status != PJ_SUCCESS) { 476 521 sess_fail(stun_sock, PJ_STUN_SOCK_DNS_OP, status); 522 pj_grp_lock_release(stun_sock->grp_lock); 477 523 return; 478 524 } … … 491 537 /* Start sending Binding request */ 492 538 get_mapped_addr(stun_sock); 539 540 pj_grp_lock_release(stun_sock->grp_lock); 493 541 } 494 542 … … 534 582 PJ_ASSERT_RETURN(stun_sock && info, PJ_EINVAL); 535 583 584 pj_grp_lock_acquire(stun_sock->grp_lock); 585 536 586 /* Copy STUN server address and mapped address */ 537 587 pj_memcpy(&info->srv_addr, &stun_sock->srv_addr, … … 544 594 status = pj_sock_getsockname(stun_sock->sock_fd, &info->bound_addr, 545 595 &addr_len); 546 if (status != PJ_SUCCESS) 596 if (status != PJ_SUCCESS) { 597 pj_grp_lock_release(stun_sock->grp_lock); 547 598 return status; 599 } 548 600 549 601 /* If socket is bound to a specific interface, then only put that … … 561 613 /* Get the default address */ 562 614 status = pj_gethostip(stun_sock->af, &def_addr); 563 if (status != PJ_SUCCESS) 615 if (status != PJ_SUCCESS) { 616 pj_grp_lock_release(stun_sock->grp_lock); 564 617 return status; 618 } 565 619 566 620 pj_sockaddr_set_port(&def_addr, port); … … 570 624 status = pj_enum_ip_interface(stun_sock->af, &info->alias_cnt, 571 625 info->aliases); 572 if (status != PJ_SUCCESS) 626 if (status != PJ_SUCCESS) { 627 pj_grp_lock_release(stun_sock->grp_lock); 573 628 return status; 629 } 574 630 575 631 /* Set the port number for each address. … … 591 647 } 592 648 649 pj_grp_lock_release(stun_sock->grp_lock); 593 650 return PJ_SUCCESS; 594 651 } … … 604 661 { 605 662 pj_ssize_t size; 663 pj_status_t status; 664 606 665 PJ_ASSERT_RETURN(stun_sock && pkt && dst_addr && addr_len, PJ_EINVAL); 607 666 667 pj_grp_lock_acquire(stun_sock->grp_lock); 668 669 if (!stun_sock->active_sock) { 670 /* We have been shutdown, but this callback may still get called 671 * by retransmit timer. 672 */ 673 pj_grp_lock_release(stun_sock->grp_lock); 674 return PJ_EINVALIDOP; 675 } 676 608 677 if (send_key==NULL) 609 678 send_key = &stun_sock->send_key; 610 679 611 680 size = pkt_len; 612 return pj_activesock_sendto(stun_sock->active_sock, send_key, 613 pkt, &size, flag, dst_addr, addr_len); 681 status = pj_activesock_sendto(stun_sock->active_sock, send_key, 682 pkt, &size, flag, dst_addr, addr_len); 683 684 pj_grp_lock_release(stun_sock->grp_lock); 685 return status; 614 686 } 615 687 … … 626 698 627 699 stun_sock = (pj_stun_sock *) pj_stun_session_get_user_data(sess); 628 if (!stun_sock || !stun_sock->active_sock) 700 if (!stun_sock || !stun_sock->active_sock) { 701 /* We have been shutdown, but this callback may still get called 702 * by retransmit timer. 703 */ 629 704 return PJ_EINVALIDOP; 705 } 630 706 631 707 pj_assert(token==INTERNAL_MSG_TOKEN); … … 633 709 634 710 size = pkt_size; 635 return pj_activesock_sendto(stun_sock->active_sock, 711 return pj_activesock_sendto(stun_sock->active_sock, 636 712 &stun_sock->int_send_key, 637 713 pkt, &size, 0, dst_addr, addr_len); … … 727 803 static void start_ka_timer(pj_stun_sock *stun_sock) 728 804 { 729 if (stun_sock->ka_timer.id != 0) { 730 pj_timer_heap_cancel(stun_sock->stun_cfg.timer_heap, 731 &stun_sock->ka_timer); 732 stun_sock->ka_timer.id = 0; 733 } 805 pj_timer_heap_cancel_if_active(stun_sock->stun_cfg.timer_heap, 806 &stun_sock->ka_timer, 0); 734 807 735 808 pj_assert(stun_sock->ka_interval != 0); 736 if (stun_sock->ka_interval > 0 ) {809 if (stun_sock->ka_interval > 0 && !stun_sock->is_destroying) { 737 810 pj_time_val delay; 738 811 … … 740 813 delay.msec = 0; 741 814 742 if (pj_timer_heap_schedule(stun_sock->stun_cfg.timer_heap, 743 &stun_sock->ka_timer, 744 &delay) == PJ_SUCCESS) 745 { 746 stun_sock->ka_timer.id = PJ_TRUE; 747 } 815 pj_timer_heap_schedule_w_grp_lock(stun_sock->stun_cfg.timer_heap, 816 &stun_sock->ka_timer, 817 &delay, PJ_TRUE, 818 stun_sock->grp_lock); 748 819 } 749 820 } … … 757 828 758 829 PJ_UNUSED_ARG(th); 830 pj_grp_lock_acquire(stun_sock->grp_lock); 759 831 760 832 /* Time to send STUN Binding request */ 761 if (get_mapped_addr(stun_sock) != PJ_SUCCESS) 833 if (get_mapped_addr(stun_sock) != PJ_SUCCESS) { 834 pj_grp_lock_release(stun_sock->grp_lock); 762 835 return; 836 } 763 837 764 838 /* Next keep-alive timer will be scheduled once the request 765 839 * is complete. 766 840 */ 841 pj_grp_lock_release(stun_sock->grp_lock); 767 842 } 768 843 … … 788 863 return PJ_TRUE; 789 864 } 865 866 pj_grp_lock_acquire(stun_sock->grp_lock); 790 867 791 868 /* Check that this is STUN message */ … … 824 901 PJ_STUN_IS_DATAGRAM, NULL, NULL, 825 902 src_addr, addr_len); 826 return status!=PJNATH_ESTUNDESTROYED ? PJ_TRUE : PJ_FALSE; 903 904 status = pj_grp_lock_release(stun_sock->grp_lock); 905 906 return status!=PJ_EGONE ? PJ_TRUE : PJ_FALSE; 827 907 828 908 process_app_data: … … 832 912 ret = (*stun_sock->cb.on_rx_data)(stun_sock, data, size, 833 913 src_addr, addr_len); 834 return ret; 835 } 836 837 return PJ_TRUE; 914 status = pj_grp_lock_release(stun_sock->grp_lock); 915 return status!=PJ_EGONE ? PJ_TRUE : PJ_FALSE; 916 } 917 918 status = pj_grp_lock_release(stun_sock->grp_lock); 919 return status!=PJ_EGONE ? PJ_TRUE : PJ_FALSE; 838 920 } 839 921 … … 857 939 if (stun_sock->cb.on_data_sent) { 858 940 pj_bool_t ret; 941 942 pj_grp_lock_acquire(stun_sock->grp_lock); 859 943 860 944 /* If app gives NULL send_key in sendto() function, then give … … 867 951 ret = (*stun_sock->cb.on_data_sent)(stun_sock, send_key, sent); 868 952 953 pj_grp_lock_release(stun_sock->grp_lock); 869 954 return ret; 870 955 } -
pjproject/trunk/pjnath/src/pjnath/stun_transaction.c
r4352 r4360 27 27 28 28 29 #define THIS_FILE "stun_transaction.c" 30 #define TIMER_INACTIVE 0 29 31 #define TIMER_ACTIVE 1 30 32 … … 35 37 pj_stun_tsx_cb cb; 36 38 void *user_data; 39 pj_grp_lock_t *grp_lock; 37 40 38 41 pj_bool_t complete; … … 52 55 53 56 57 #if 1 58 # define TRACE_(expr) PJ_LOG(5,expr) 59 #else 60 # define TRACE_(expr) 61 #endif 62 63 54 64 static void retransmit_timer_callback(pj_timer_heap_t *timer_heap, 55 65 pj_timer_entry *timer); … … 57 67 pj_timer_entry *timer); 58 68 59 #define stun_perror(tsx,msg,rc) pjnath_perror(tsx->obj_name, msg, rc)60 61 69 /* 62 70 * Create a STUN client transaction. … … 64 72 PJ_DEF(pj_status_t) pj_stun_client_tsx_create(pj_stun_config *cfg, 65 73 pj_pool_t *pool, 74 pj_grp_lock_t *grp_lock, 66 75 const pj_stun_tsx_cb *cb, 67 76 pj_stun_client_tsx **p_tsx) … … 75 84 tsx->rto_msec = cfg->rto_msec; 76 85 tsx->timer_heap = cfg->timer_heap; 86 tsx->grp_lock = grp_lock; 77 87 pj_memcpy(&tsx->cb, cb, sizeof(*cb)); 78 88 … … 83 93 tsx->destroy_timer.user_data = tsx; 84 94 85 pj_ansi_snprintf(tsx->obj_name, sizeof(tsx->obj_name), " stuntsx%p", tsx);95 pj_ansi_snprintf(tsx->obj_name, sizeof(tsx->obj_name), "utsx%p", tsx); 86 96 87 97 *p_tsx = tsx; … … 101 111 PJ_ASSERT_RETURN(tsx->cb.on_destroy, PJ_EINVAL); 102 112 113 pj_grp_lock_acquire(tsx->grp_lock); 114 103 115 /* Cancel previously registered timer */ 104 if (tsx->destroy_timer.id != 0) { 105 pj_timer_heap_cancel(tsx->timer_heap, &tsx->destroy_timer); 106 tsx->destroy_timer.id = 0; 107 } 116 pj_timer_heap_cancel_if_active(tsx->timer_heap, &tsx->destroy_timer, 117 TIMER_INACTIVE); 108 118 109 119 /* Stop retransmission, just in case */ 110 if (tsx->retransmit_timer.id != 0) {111 pj_timer_heap_cancel(tsx->timer_heap, &tsx->retransmit_timer);112 tsx->retransmit_timer.id = 0; 113 }114 115 status = pj_timer_heap_schedule(tsx->timer_heap,116 &tsx->destroy_timer, delay); 117 if (status != PJ_SUCCESS) 120 pj_timer_heap_cancel_if_active(tsx->timer_heap, &tsx->retransmit_timer, 121 TIMER_INACTIVE); 122 123 status = pj_timer_heap_schedule_w_grp_lock(tsx->timer_heap, 124 &tsx->destroy_timer, delay, 125 TIMER_ACTIVE, tsx->grp_lock); 126 if (status != PJ_SUCCESS) { 127 pj_grp_lock_release(tsx->grp_lock); 118 128 return status; 119 120 tsx->destroy_timer.id = TIMER_ACTIVE; 129 } 130 121 131 tsx->cb.on_complete = NULL; 122 132 133 pj_grp_lock_release(tsx->grp_lock); 134 135 TRACE_((tsx->obj_name, "STUN transaction %p schedule destroy", tsx)); 136 123 137 return PJ_SUCCESS; 124 138 } … … 128 142 * Destroy transaction immediately. 129 143 */ 130 PJ_DEF(pj_status_t) pj_stun_client_tsx_ destroy(pj_stun_client_tsx *tsx)144 PJ_DEF(pj_status_t) pj_stun_client_tsx_stop(pj_stun_client_tsx *tsx) 131 145 { 132 146 PJ_ASSERT_RETURN(tsx, PJ_EINVAL); 133 147 134 if (tsx->retransmit_timer.id != 0) { 135 pj_timer_heap_cancel(tsx->timer_heap, &tsx->retransmit_timer); 136 tsx->retransmit_timer.id = 0; 137 } 138 if (tsx->destroy_timer.id != 0) { 139 pj_timer_heap_cancel(tsx->timer_heap, &tsx->destroy_timer); 140 tsx->destroy_timer.id = 0; 141 } 142 143 PJ_LOG(5,(tsx->obj_name, "STUN client transaction destroyed")); 148 /* Don't call grp_lock_acquire() because we might be called on 149 * group lock's destructor. 150 */ 151 pj_timer_heap_cancel_if_active(tsx->timer_heap, &tsx->retransmit_timer, 152 TIMER_INACTIVE); 153 pj_timer_heap_cancel_if_active(tsx->timer_heap, &tsx->destroy_timer, 154 TIMER_INACTIVE); 155 156 PJ_LOG(5,(tsx->obj_name, "STUN client transaction %p stopped, ref_cnt=%d", 157 tsx, pj_grp_lock_get_ref(tsx->grp_lock))); 158 144 159 return PJ_SUCCESS; 145 160 } … … 186 201 pj_status_t status; 187 202 188 PJ_ASSERT_RETURN(tsx->retransmit_timer.id == 0||203 PJ_ASSERT_RETURN(tsx->retransmit_timer.id == TIMER_INACTIVE || 189 204 !tsx->require_retransmit, PJ_EBUSY); 190 205 … … 212 227 * cancel transmission). 213 228 */; 214 status = pj_timer_heap_schedule(tsx->timer_heap, 215 &tsx->retransmit_timer, 216 &tsx->retransmit_time); 229 status = pj_timer_heap_schedule_w_grp_lock(tsx->timer_heap, 230 &tsx->retransmit_timer, 231 &tsx->retransmit_time, 232 TIMER_ACTIVE, 233 tsx->grp_lock); 217 234 if (status != PJ_SUCCESS) { 218 tsx->retransmit_timer.id = 0;235 tsx->retransmit_timer.id = TIMER_INACTIVE; 219 236 return status; 220 237 } 221 tsx->retransmit_timer.id = TIMER_ACTIVE;222 238 } 223 239 … … 236 252 /* We've been destroyed, don't access the object. */ 237 253 } else if (status != PJ_SUCCESS) { 238 if ( tsx->retransmit_timer.id != 0 &&mod_count) {239 pj_timer_heap_cancel(tsx->timer_heap,240 &tsx->retransmit_timer);241 tsx->retransmit_timer.id = 0;242 } 243 stun_perror(tsx, "STUN error sending message", status);254 if (mod_count) { 255 pj_timer_heap_cancel_if_active( tsx->timer_heap, 256 &tsx->retransmit_timer, 257 TIMER_INACTIVE); 258 } 259 PJ_PERROR(4, (tsx->obj_name, status, "STUN error sending message")); 244 260 } 245 261 … … 261 277 PJ_ASSERT_RETURN(tsx && pkt && pkt_len, PJ_EINVAL); 262 278 PJ_ASSERT_RETURN(tsx->retransmit_timer.id == 0, PJ_EBUSY); 279 280 pj_grp_lock_acquire(tsx->grp_lock); 263 281 264 282 /* Encode message */ … … 287 305 * cancel transmission). 288 306 */; 289 status = pj_timer_heap_schedule(tsx->timer_heap, 290 &tsx->retransmit_timer, 291 &tsx->retransmit_time); 307 status = pj_timer_heap_schedule_w_grp_lock(tsx->timer_heap, 308 &tsx->retransmit_timer, 309 &tsx->retransmit_time, 310 TIMER_ACTIVE, 311 tsx->grp_lock); 292 312 if (status != PJ_SUCCESS) { 293 tsx->retransmit_timer.id = 0; 313 tsx->retransmit_timer.id = TIMER_INACTIVE; 314 pj_grp_lock_release(tsx->grp_lock); 294 315 return status; 295 316 } 296 tsx->retransmit_timer.id = TIMER_ACTIVE;297 317 } 298 318 … … 300 320 status = tsx_transmit_msg(tsx, PJ_TRUE); 301 321 if (status != PJ_SUCCESS) { 302 if (tsx->retransmit_timer.id != 0) { 303 pj_timer_heap_cancel(tsx->timer_heap, 304 &tsx->retransmit_timer); 305 tsx->retransmit_timer.id = 0; 306 } 322 pj_timer_heap_cancel_if_active(tsx->timer_heap, 323 &tsx->retransmit_timer, 324 TIMER_INACTIVE); 325 pj_grp_lock_release(tsx->grp_lock); 307 326 return status; 308 327 } 309 328 329 pj_grp_lock_release(tsx->grp_lock); 310 330 return PJ_SUCCESS; 311 331 } … … 320 340 321 341 PJ_UNUSED_ARG(timer_heap); 342 pj_grp_lock_acquire(tsx->grp_lock); 322 343 323 344 if (tsx->transmit_count >= PJ_STUN_MAX_TRANSMIT_COUNT) { … … 332 353 } 333 354 } 355 pj_grp_lock_release(tsx->grp_lock); 334 356 /* We might have been destroyed, don't try to access the object */ 335 357 pj_log_pop_indent(); … … 339 361 tsx->retransmit_timer.id = 0; 340 362 status = tsx_transmit_msg(tsx, PJ_TRUE); 341 if (status == PJNATH_ESTUNDESTROYED) { 342 /* We've been destroyed, don't try to access the object */ 343 } else if (status != PJ_SUCCESS) { 363 if (status != PJ_SUCCESS) { 344 364 tsx->retransmit_timer.id = 0; 345 365 if (!tsx->complete) { … … 349 369 } 350 370 } 351 /* We might have been destroyed, don't try to access the object */ 352 } 371 } 372 373 pj_grp_lock_release(tsx->grp_lock); 374 /* We might have been destroyed, don't try to access the object */ 353 375 } 354 376 … … 363 385 } 364 386 365 if (tsx->retransmit_timer.id != 0) { 366 pj_timer_heap_cancel(tsx->timer_heap, &tsx->retransmit_timer); 367 tsx->retransmit_timer.id = 0; 368 } 387 pj_timer_heap_cancel_if_active(tsx->timer_heap, &tsx->retransmit_timer, 388 TIMER_INACTIVE); 369 389 370 390 return tsx_transmit_msg(tsx, mod_count); … … 380 400 381 401 tsx->destroy_timer.id = PJ_FALSE; 402 382 403 tsx->cb.on_destroy(tsx); 383 404 /* Don't access transaction after this */ … … 409 430 * We can cancel retransmit timer now. 410 431 */ 411 if (tsx->retransmit_timer.id) { 412 pj_timer_heap_cancel(tsx->timer_heap, &tsx->retransmit_timer); 413 tsx->retransmit_timer.id = 0; 414 } 432 pj_timer_heap_cancel_if_active(tsx->timer_heap, &tsx->retransmit_timer, 433 TIMER_INACTIVE); 415 434 416 435 /* Find STUN error code attribute */ -
pjproject/trunk/pjnath/src/pjnath/turn_session.c
r4201 r4360 113 113 void *user_data; 114 114 pj_stun_config stun_cfg; 115 116 pj_lock_t *lock; 115 pj_bool_t is_destroying; 116 117 pj_grp_lock_t *grp_lock; 117 118 int busy; 118 119 … … 162 163 static void sess_shutdown(pj_turn_session *sess, 163 164 pj_status_t status); 165 static void turn_sess_on_destroy(void *comp); 164 166 static void do_destroy(pj_turn_session *sess); 165 167 static void send_refresh(pj_turn_session *sess, int lifetime); … … 237 239 int af, 238 240 pj_turn_tp_type conn_type, 241 pj_grp_lock_t *grp_lock, 239 242 const pj_turn_session_cb *cb, 240 243 unsigned options, … … 245 248 pj_turn_session *sess; 246 249 pj_stun_session_cb stun_cb; 247 pj_lock_t *null_lock;248 250 pj_status_t status; 249 251 … … 282 284 283 285 /* Session lock */ 284 status = pj_lock_create_recursive_mutex(pool, sess->obj_name, 285 &sess->lock); 286 if (status != PJ_SUCCESS) { 287 do_destroy(sess); 288 return status; 289 } 286 if (grp_lock) { 287 sess->grp_lock = grp_lock; 288 } else { 289 status = pj_grp_lock_create(pool, NULL, &sess->grp_lock); 290 if (status != PJ_SUCCESS) { 291 pj_pool_release(pool); 292 return status; 293 } 294 } 295 296 pj_grp_lock_add_ref(sess->grp_lock); 297 pj_grp_lock_add_handler(sess->grp_lock, pool, sess, 298 &turn_sess_on_destroy); 290 299 291 300 /* Timer */ … … 298 307 stun_cb.on_rx_indication = &stun_on_rx_indication; 299 308 status = pj_stun_session_create(&sess->stun_cfg, sess->obj_name, &stun_cb, 300 PJ_FALSE, &sess->stun);309 PJ_FALSE, sess->grp_lock, &sess->stun); 301 310 if (status != PJ_SUCCESS) { 302 311 do_destroy(sess); … … 307 316 pj_stun_session_set_user_data(sess->stun, sess); 308 317 309 /* Replace mutex in STUN session with a NULL mutex, since access to310 * STUN session is serialized.311 */312 status = pj_lock_create_null_mutex(pool, name, &null_lock);313 if (status != PJ_SUCCESS) {314 do_destroy(sess);315 return status;316 }317 pj_stun_session_set_lock(sess->stun, null_lock, PJ_TRUE);318 319 318 /* Done */ 320 319 … … 326 325 327 326 328 /* Destroy */ 329 static void do_destroy(pj_turn_session *sess) 330 { 331 /* Lock session */ 332 if (sess->lock) { 333 pj_lock_acquire(sess->lock); 334 } 335 336 /* Cancel pending timer, if any */ 337 if (sess->timer.id != TIMER_NONE) { 338 pj_timer_heap_cancel(sess->timer_heap, &sess->timer); 339 sess->timer.id = TIMER_NONE; 340 } 341 342 /* Destroy STUN session */ 343 if (sess->stun) { 344 pj_stun_session_destroy(sess->stun); 345 sess->stun = NULL; 346 } 347 348 /* Destroy lock */ 349 if (sess->lock) { 350 pj_lock_release(sess->lock); 351 pj_lock_destroy(sess->lock); 352 sess->lock = NULL; 353 } 327 static void turn_sess_on_destroy(void *comp) 328 { 329 pj_turn_session *sess = (pj_turn_session*) comp; 354 330 355 331 /* Destroy pool */ … … 362 338 pj_pool_release(pool); 363 339 } 340 } 341 342 /* Destroy */ 343 static void do_destroy(pj_turn_session *sess) 344 { 345 PJ_LOG(4,(sess->obj_name, "TURN session destroy request, ref_cnt=%d", 346 pj_grp_lock_get_ref(sess->grp_lock))); 347 348 pj_grp_lock_acquire(sess->grp_lock); 349 if (sess->is_destroying) { 350 pj_grp_lock_release(sess->grp_lock); 351 return; 352 } 353 354 sess->is_destroying = PJ_TRUE; 355 pj_timer_heap_cancel_if_active(sess->timer_heap, &sess->timer, TIMER_NONE); 356 pj_stun_session_destroy(sess->stun); 357 358 pj_grp_lock_dec_ref(sess->grp_lock); 359 pj_grp_lock_release(sess->grp_lock); 364 360 } 365 361 … … 438 434 set_state(sess, PJ_TURN_STATE_DESTROYING); 439 435 440 if (sess->timer.id != TIMER_NONE) { 441 pj_timer_heap_cancel(sess->timer_heap, &sess->timer); 442 sess->timer.id = TIMER_NONE; 443 } 444 445 sess->timer.id = TIMER_DESTROY; 446 pj_timer_heap_schedule(sess->timer_heap, &sess->timer, &delay); 436 pj_timer_heap_cancel_if_active(sess->timer_heap, &sess->timer, 437 TIMER_NONE); 438 pj_timer_heap_schedule_w_grp_lock(sess->timer_heap, &sess->timer, 439 &delay, TIMER_DESTROY, 440 sess->grp_lock); 447 441 } 448 442 } … … 456 450 PJ_ASSERT_RETURN(sess, PJ_EINVAL); 457 451 458 pj_ lock_acquire(sess->lock);452 pj_grp_lock_acquire(sess->grp_lock); 459 453 460 454 sess_shutdown(sess, PJ_SUCCESS); 461 455 462 pj_ lock_release(sess->lock);456 pj_grp_lock_release(sess->grp_lock); 463 457 464 458 return PJ_SUCCESS; … … 554 548 pj_status_t status; 555 549 556 pj_ lock_acquire(sess->lock);550 pj_grp_lock_acquire(sess->grp_lock); 557 551 status = pj_stun_session_set_software_name(sess->stun, sw); 558 pj_ lock_release(sess->lock);552 pj_grp_lock_release(sess->grp_lock); 559 553 560 554 return status; … … 577 571 PJ_ASSERT_RETURN(sess->state == PJ_TURN_STATE_NULL, PJ_EINVALIDOP); 578 572 579 pj_ lock_acquire(sess->lock);573 pj_grp_lock_acquire(sess->grp_lock); 580 574 581 575 /* See if "domain" contains just IP address */ … … 677 671 678 672 on_return: 679 pj_ lock_release(sess->lock);673 pj_grp_lock_release(sess->grp_lock); 680 674 return status; 681 675 } … … 691 685 PJ_ASSERT_RETURN(sess->stun, PJ_EINVALIDOP); 692 686 693 pj_ lock_acquire(sess->lock);687 pj_grp_lock_acquire(sess->grp_lock); 694 688 695 689 pj_stun_session_set_credential(sess->stun, PJ_STUN_AUTH_LONG_TERM, cred); 696 690 697 pj_ lock_release(sess->lock);691 pj_grp_lock_release(sess->grp_lock); 698 692 699 693 return PJ_SUCCESS; … … 716 710 PJ_EINVALIDOP); 717 711 718 pj_ lock_acquire(sess->lock);712 pj_grp_lock_acquire(sess->grp_lock); 719 713 720 714 if (param && param != &sess->alloc_param) … … 727 721 state_names[sess->state])); 728 722 729 pj_ lock_release(sess->lock);723 pj_grp_lock_release(sess->grp_lock); 730 724 return PJ_SUCCESS; 731 725 … … 739 733 PJ_STUN_MAGIC, NULL, &tdata); 740 734 if (status != PJ_SUCCESS) { 741 pj_ lock_release(sess->lock);735 pj_grp_lock_release(sess->grp_lock); 742 736 return status; 743 737 } … … 779 773 } 780 774 781 pj_ lock_release(sess->lock);775 pj_grp_lock_release(sess->grp_lock); 782 776 return status; 783 777 } … … 800 794 PJ_ASSERT_RETURN(sess && addr_cnt && addr, PJ_EINVAL); 801 795 802 pj_ lock_acquire(sess->lock);796 pj_grp_lock_acquire(sess->grp_lock); 803 797 804 798 /* Create a bare CreatePermission request */ … … 807 801 PJ_STUN_MAGIC, NULL, &tdata); 808 802 if (status != PJ_SUCCESS) { 809 pj_ lock_release(sess->lock);803 pj_grp_lock_release(sess->grp_lock); 810 804 return status; 811 805 } … … 858 852 } 859 853 860 pj_ lock_release(sess->lock);854 pj_grp_lock_release(sess->grp_lock); 861 855 return PJ_SUCCESS; 862 856 … … 875 869 invalidate_perm(sess, perm); 876 870 } 877 pj_ lock_release(sess->lock);871 pj_grp_lock_release(sess->grp_lock); 878 872 return status; 879 873 } … … 946 940 947 941 /* Lock session now */ 948 pj_ lock_acquire(sess->lock);942 pj_grp_lock_acquire(sess->grp_lock); 949 943 950 944 /* Lookup permission first */ … … 961 955 0); 962 956 if (status != PJ_SUCCESS) { 963 pj_ lock_release(sess->lock);957 pj_grp_lock_release(sess->grp_lock); 964 958 return status; 965 959 } … … 1036 1030 1037 1031 on_return: 1038 pj_ lock_release(sess->lock);1032 pj_grp_lock_release(sess->grp_lock); 1039 1033 return status; 1040 1034 } … … 1056 1050 PJ_ASSERT_RETURN(sess->state == PJ_TURN_STATE_READY, PJ_EINVALIDOP); 1057 1051 1058 pj_ lock_acquire(sess->lock);1052 pj_grp_lock_acquire(sess->grp_lock); 1059 1053 1060 1054 /* Create blank ChannelBind request */ … … 1099 1093 1100 1094 on_return: 1101 pj_ lock_release(sess->lock);1095 pj_grp_lock_release(sess->grp_lock); 1102 1096 return status; 1103 1097 } … … 1122 1116 1123 1117 /* Start locking the session */ 1124 pj_ lock_acquire(sess->lock);1118 pj_grp_lock_acquire(sess->grp_lock); 1125 1119 1126 1120 is_datagram = (sess->conn_type==PJ_TURN_TP_UDP); … … 1194 1188 1195 1189 on_return: 1196 pj_ lock_release(sess->lock);1190 pj_grp_lock_release(sess->grp_lock); 1197 1191 return status; 1198 1192 } … … 1386 1380 /* Cancel existing keep-alive timer, if any */ 1387 1381 pj_assert(sess->timer.id != TIMER_DESTROY); 1388 1389 if (sess->timer.id != TIMER_NONE) { 1390 pj_timer_heap_cancel(sess->timer_heap, &sess->timer); 1391 sess->timer.id = TIMER_NONE; 1382 if (sess->timer.id == TIMER_KEEP_ALIVE) { 1383 pj_timer_heap_cancel_if_active(sess->timer_heap, &sess->timer, 1384 TIMER_NONE); 1392 1385 } 1393 1386 1394 1387 /* Start keep-alive timer once allocation succeeds */ 1395 timeout.sec = sess->ka_interval; 1396 timeout.msec = 0; 1397 1398 sess->timer.id = TIMER_KEEP_ALIVE; 1399 pj_timer_heap_schedule(sess->timer_heap, &sess->timer, &timeout); 1400 1401 set_state(sess, PJ_TURN_STATE_READY); 1388 if (sess->state < PJ_TURN_STATE_DEALLOCATING) { 1389 timeout.sec = sess->ka_interval; 1390 timeout.msec = 0; 1391 1392 pj_timer_heap_schedule_w_grp_lock(sess->timer_heap, &sess->timer, 1393 &timeout, TIMER_KEEP_ALIVE, 1394 sess->grp_lock); 1395 1396 set_state(sess, PJ_TURN_STATE_READY); 1397 } 1402 1398 } 1403 1399 … … 1949 1945 PJ_UNUSED_ARG(th); 1950 1946 1951 pj_ lock_acquire(sess->lock);1947 pj_grp_lock_acquire(sess->grp_lock); 1952 1948 1953 1949 eid = (enum timer_id_t) e->id; … … 2026 2022 delay.msec = 0; 2027 2023 2028 sess->timer.id = TIMER_KEEP_ALIVE; 2029 pj_timer_heap_schedule(sess->timer_heap, &sess->timer, &delay); 2030 } 2031 2032 pj_lock_release(sess->lock); 2024 pj_timer_heap_schedule_w_grp_lock(sess->timer_heap, &sess->timer, 2025 &delay, TIMER_KEEP_ALIVE, 2026 sess->grp_lock); 2027 } 2033 2028 2034 2029 } else if (eid == TIMER_DESTROY) { 2035 2030 /* Time to destroy */ 2036 pj_lock_release(sess->lock);2037 2031 do_destroy(sess); 2038 2032 } else { 2039 2033 pj_assert(!"Unknown timer event"); 2040 pj_lock_release(sess->lock); 2041 } 2042 } 2043 2034 } 2035 2036 pj_grp_lock_release(sess->grp_lock); 2037 } 2038 -
pjproject/trunk/pjnath/src/pjnath/turn_sock.c
r4343 r4360 47 47 void *user_data; 48 48 49 pj_lock_t *lock; 49 pj_bool_t is_destroying; 50 pj_grp_lock_t *grp_lock; 50 51 51 52 pj_turn_alloc_param alloc_param; … … 53 54 pj_turn_sock_cfg setting; 54 55 55 pj_bool_t destroy_request;56 56 pj_timer_entry timer; 57 57 … … 94 94 95 95 96 static void turn_sock_on_destroy(void *comp); 96 97 static void destroy(pj_turn_sock *turn_sock); 97 98 static void timer_cb(pj_timer_heap_t *th, pj_timer_entry *e); … … 169 170 } 170 171 171 /* Create lock */ 172 status = pj_lock_create_recursive_mutex(pool, turn_sock->obj_name, 173 &turn_sock->lock); 174 if (status != PJ_SUCCESS) { 175 destroy(turn_sock); 176 return status; 177 } 172 /* Session lock */ 173 if (setting && setting->grp_lock) { 174 turn_sock->grp_lock = setting->grp_lock; 175 } else { 176 status = pj_grp_lock_create(pool, NULL, &turn_sock->grp_lock); 177 if (status != PJ_SUCCESS) { 178 pj_pool_release(pool); 179 return status; 180 } 181 } 182 183 pj_grp_lock_add_ref(turn_sock->grp_lock); 184 pj_grp_lock_add_handler(turn_sock->grp_lock, pool, turn_sock, 185 &turn_sock_on_destroy); 178 186 179 187 /* Init timer */ … … 187 195 sess_cb.on_state = &turn_on_state; 188 196 status = pj_turn_session_create(cfg, pool->obj_name, af, conn_type, 189 &sess_cb, 0, turn_sock, &turn_sock->sess); 197 turn_sock->grp_lock, &sess_cb, 0, 198 turn_sock, &turn_sock->sess); 190 199 if (status != PJ_SUCCESS) { 191 200 destroy(turn_sock); … … 204 213 * Destroy. 205 214 */ 206 static void destroy(pj_turn_sock *turn_sock) 207 { 208 if (turn_sock->lock) { 209 pj_lock_acquire(turn_sock->lock); 210 } 211 212 if (turn_sock->sess) { 213 pj_turn_session_set_user_data(turn_sock->sess, NULL); 214 pj_turn_session_shutdown(turn_sock->sess); 215 turn_sock->sess = NULL; 216 } 217 218 if (turn_sock->active_sock) { 219 pj_activesock_set_user_data(turn_sock->active_sock, NULL); 220 pj_activesock_close(turn_sock->active_sock); 221 turn_sock->active_sock = NULL; 222 } 223 224 if (turn_sock->lock) { 225 pj_lock_release(turn_sock->lock); 226 pj_lock_destroy(turn_sock->lock); 227 turn_sock->lock = NULL; 228 } 215 static void turn_sock_on_destroy(void *comp) 216 { 217 pj_turn_sock *turn_sock = (pj_turn_sock*) comp; 229 218 230 219 if (turn_sock->pool) { 231 220 pj_pool_t *pool = turn_sock->pool; 221 PJ_LOG(4,(turn_sock->obj_name, "TURN socket destroyed")); 232 222 turn_sock->pool = NULL; 233 223 pj_pool_release(pool); … … 235 225 } 236 226 227 static void destroy(pj_turn_sock *turn_sock) 228 { 229 PJ_LOG(4,(turn_sock->obj_name, "TURN socket destroy request, ref_cnt=%d", 230 pj_grp_lock_get_ref(turn_sock->grp_lock))); 231 232 pj_grp_lock_acquire(turn_sock->grp_lock); 233 if (turn_sock->is_destroying) { 234 pj_grp_lock_release(turn_sock->grp_lock); 235 return; 236 } 237 238 turn_sock->is_destroying = PJ_TRUE; 239 if (turn_sock->sess) 240 pj_turn_session_shutdown(turn_sock->sess); 241 if (turn_sock->active_sock) 242 pj_activesock_close(turn_sock->active_sock); 243 pj_grp_lock_dec_ref(turn_sock->grp_lock); 244 pj_grp_lock_release(turn_sock->grp_lock); 245 } 237 246 238 247 PJ_DEF(void) pj_turn_sock_destroy(pj_turn_sock *turn_sock) 239 248 { 240 pj_lock_acquire(turn_sock->lock); 241 turn_sock->destroy_request = PJ_TRUE; 249 pj_grp_lock_acquire(turn_sock->grp_lock); 250 if (turn_sock->is_destroying) { 251 pj_grp_lock_release(turn_sock->grp_lock); 252 return; 253 } 242 254 243 255 if (turn_sock->sess) { … … 247 259 * destroy ourselves. 248 260 */ 249 pj_lock_release(turn_sock->lock);250 261 } else { 251 pj_lock_release(turn_sock->lock);252 262 destroy(turn_sock); 253 263 } 254 264 265 pj_grp_lock_release(turn_sock->grp_lock); 255 266 } 256 267 … … 268 279 switch (eid) { 269 280 case TIMER_DESTROY: 270 PJ_LOG(5,(turn_sock->obj_name, "Destroying TURN"));271 281 destroy(turn_sock); 272 282 break; … … 338 348 PJ_DEF(pj_status_t) pj_turn_sock_lock(pj_turn_sock *turn_sock) 339 349 { 340 return pj_ lock_acquire(turn_sock->lock);350 return pj_grp_lock_acquire(turn_sock->grp_lock); 341 351 } 342 352 … … 346 356 PJ_DEF(pj_status_t) pj_turn_sock_unlock(pj_turn_sock *turn_sock) 347 357 { 348 return pj_ lock_release(turn_sock->lock);358 return pj_grp_lock_release(turn_sock->grp_lock); 349 359 } 350 360 … … 381 391 PJ_ASSERT_RETURN(turn_sock && domain, PJ_EINVAL); 382 392 PJ_ASSERT_RETURN(turn_sock->sess, PJ_EINVALIDOP); 393 394 pj_grp_lock_acquire(turn_sock->grp_lock); 383 395 384 396 /* Copy alloc param. We will call session_alloc() only after the … … 396 408 if (status != PJ_SUCCESS) { 397 409 sess_fail(turn_sock, "Error setting credential", status); 410 pj_grp_lock_release(turn_sock->grp_lock); 398 411 return status; 399 412 } … … 405 418 if (status != PJ_SUCCESS) { 406 419 sess_fail(turn_sock, "Error setting TURN server", status); 420 pj_grp_lock_release(turn_sock->grp_lock); 407 421 return status; 408 422 } … … 411 425 * to RESOLVED state. 412 426 */ 413 427 pj_grp_lock_release(turn_sock->grp_lock); 414 428 return PJ_SUCCESS; 415 429 } … … 473 487 return PJ_FALSE; 474 488 489 pj_grp_lock_acquire(turn_sock->grp_lock); 490 475 491 /* TURN session may have already been destroyed here. 476 492 * See ticket #1557 (http://trac.pjsip.org/repos/ticket/1557). … … 478 494 if (!turn_sock->sess) { 479 495 sess_fail(turn_sock, "TURN session already destroyed", status); 496 pj_grp_lock_release(turn_sock->grp_lock); 480 497 return PJ_FALSE; 481 498 } … … 483 500 if (status != PJ_SUCCESS) { 484 501 sess_fail(turn_sock, "TCP connect() error", status); 502 pj_grp_lock_release(turn_sock->grp_lock); 485 503 return PJ_FALSE; 486 504 } … … 501 519 if (status != PJ_SUCCESS) { 502 520 sess_fail(turn_sock, "Error sending ALLOCATE", status); 521 pj_grp_lock_release(turn_sock->grp_lock); 503 522 return PJ_FALSE; 504 523 } 505 524 525 pj_grp_lock_release(turn_sock->grp_lock); 506 526 return PJ_TRUE; 507 527 } … … 563 583 564 584 turn_sock = (pj_turn_sock*) pj_activesock_get_user_data(asock); 565 pj_ lock_acquire(turn_sock->lock);566 567 if (status == PJ_SUCCESS && turn_sock->sess ) {585 pj_grp_lock_acquire(turn_sock->grp_lock); 586 587 if (status == PJ_SUCCESS && turn_sock->sess && !turn_sock->is_destroying) { 568 588 /* Report incoming packet to TURN session, repeat while we have 569 589 * "packet" in the buffer (required for stream-oriented transports) … … 615 635 616 636 on_return: 617 pj_ lock_release(turn_sock->lock);637 pj_grp_lock_release(turn_sock->grp_lock); 618 638 619 639 return ret; … … 635 655 pj_status_t status; 636 656 637 if (turn_sock == NULL ) {657 if (turn_sock == NULL || turn_sock->is_destroying) { 638 658 /* We've been destroyed */ 639 659 // https://trac.pjsip.org/repos/ticket/1316 … … 681 701 pj_turn_sock *turn_sock = (pj_turn_sock*) 682 702 pj_turn_session_get_user_data(sess); 683 if (turn_sock == NULL ) {703 if (turn_sock == NULL || turn_sock->is_destroying) { 684 704 /* We've been destroyed */ 685 705 return; … … 730 750 int sock_type; 731 751 pj_sock_t sock; 752 pj_activesock_cfg asock_cfg; 732 753 pj_activesock_cb asock_cb; 733 754 pj_sockaddr bound_addr, *cfg_bind_addr; … … 791 812 792 813 /* Create active socket */ 814 pj_activesock_cfg_default(&asock_cfg); 815 asock_cfg.grp_lock = turn_sock->grp_lock; 816 793 817 pj_bzero(&asock_cb, sizeof(asock_cb)); 794 818 asock_cb.on_data_read = &on_data_read; 795 819 asock_cb.on_connect_complete = &on_connect_complete; 796 820 status = pj_activesock_create(turn_sock->pool, sock, 797 sock_type, NULL,821 sock_type, &asock_cfg, 798 822 turn_sock->cfg.ioqueue, &asock_cb, 799 823 turn_sock, … … 836 860 pj_turn_session_set_user_data(sess, NULL); 837 861 838 if (turn_sock->timer.id) { 839 pj_timer_heap_cancel(turn_sock->cfg.timer_heap, &turn_sock->timer); 840 turn_sock->timer.id = 0; 841 } 842 843 turn_sock->timer.id = TIMER_DESTROY; 844 pj_timer_heap_schedule(turn_sock->cfg.timer_heap, &turn_sock->timer, 845 &delay); 846 } 847 } 848 849 862 pj_timer_heap_cancel_if_active(turn_sock->cfg.timer_heap, 863 &turn_sock->timer, 0); 864 pj_timer_heap_schedule_w_grp_lock(turn_sock->cfg.timer_heap, 865 &turn_sock->timer, 866 &delay, TIMER_DESTROY, 867 turn_sock->grp_lock); 868 } 869 } 870 871 -
pjproject/trunk/pjnath/src/pjturn-srv/allocation.c
r3553 r4360 339 339 sess_cb.on_rx_indication = &stun_on_rx_indication; 340 340 status = pj_stun_session_create(&srv->core.stun_cfg, alloc->obj_name, 341 &sess_cb, PJ_FALSE, &alloc->sess);341 &sess_cb, PJ_FALSE, NULL, &alloc->sess); 342 342 if (status != PJ_SUCCESS) { 343 343 goto on_error; -
pjproject/trunk/pjnath/src/pjturn-srv/server.c
r3553 r4360 156 156 157 157 status = pj_stun_session_create(&srv->core.stun_cfg, srv->obj_name, 158 &sess_cb, PJ_FALSE, &srv->core.stun_sess); 158 &sess_cb, PJ_FALSE, NULL, 159 &srv->core.stun_sess); 159 160 if (status != PJ_SUCCESS) { 160 161 goto on_error;
Note: See TracChangeset
for help on using the changeset viewer.