- Timestamp:
- Mar 5, 2013 11:59:54 AM (12 years ago)
- Location:
- pjproject/trunk
- Files:
-
- 12 edited
Legend:
- Unmodified
- Added
- Removed
-
pjproject/trunk/pjsip-apps/src/samples/stateful_proxy.c
r3553 r4420 274 274 pjsip_tx_data *cancel; 275 275 276 pj_ mutex_lock(uas_data->uac_tsx->mutex);276 pj_grp_lock_acquire(uas_data->uac_tsx->grp_lock); 277 277 278 278 pjsip_endpt_create_cancel(global.endpt, uas_data->uac_tsx->last_tx, … … 280 280 pjsip_endpt_send_request(global.endpt, cancel, -1, NULL, NULL); 281 281 282 pj_ mutex_unlock(uas_data->uac_tsx->mutex);282 pj_grp_lock_release(uas_data->uac_tsx->grp_lock); 283 283 } 284 284 285 285 /* Unlock UAS tsx because it is locked in find_tsx() */ 286 pj_ mutex_unlock(invite_uas->mutex);286 pj_grp_lock_release(invite_uas->grp_lock); 287 287 } 288 288 -
pjproject/trunk/pjsip/include/pjsip/sip_transaction.h
r3553 r4420 86 86 pjsip_module *tsx_user; /**< Transaction user. */ 87 87 pjsip_endpoint *endpt; /**< Endpoint instance. */ 88 pj_mutex_t *mutex; /**< Mutex for this tsx. */ 88 pj_bool_t terminating; /**< terminate() was called */ 89 pj_grp_lock_t *grp_lock; /**< Transaction grp lock. */ 89 90 pj_mutex_t *mutex_b; /**< Second mutex to avoid 90 91 deadlock. It is used to … … 214 215 215 216 /** 217 * Variant of pjsip_tsx_create_uac() with additional parameter to specify 218 * the group lock to use. Group lock can be used to synchronize locking 219 * among several objects to prevent deadlock, and to synchronize the 220 * lifetime of objects sharing the same group lock. 221 * 222 * See pjsip_tsx_create_uac() for general info about this function. 223 * 224 * @param tsx_user Module to be registered as transaction user of the new 225 * transaction, which will receive notification from the 226 * transaction via on_tsx_state() callback. 227 * @param tdata The outgoing request message. 228 * @param grp_lock Optional group lock to use by this transaction. If 229 * the value is NULL, the transaction will create its 230 * own group lock. 231 * @param p_tsx On return will contain the new transaction instance. 232 * 233 * @return PJ_SUCCESS if successfull. 234 */ 235 PJ_DECL(pj_status_t) pjsip_tsx_create_uac2(pjsip_module *tsx_user, 236 pjsip_tx_data *tdata, 237 pj_grp_lock_t *grp_lock, 238 pjsip_transaction **p_tsx); 239 240 /** 216 241 * Create, initialize, and register a new transaction as UAS from the 217 242 * specified incoming request in \c rdata. After calling this function, … … 231 256 pjsip_transaction **p_tsx ); 232 257 258 /** 259 * Variant of pjsip_tsx_create_uas() with additional parameter to specify 260 * the group lock to use. Group lock can be used to synchronize locking 261 * among several objects to prevent deadlock, and to synchronize the 262 * lifetime of objects sharing the same group lock. 263 * 264 * See pjsip_tsx_create_uas() for general info about this function. 265 * 266 * @param tsx_user Module to be registered as transaction user of the new 267 * transaction, which will receive notification from the 268 * transaction via on_tsx_state() callback. 269 * @param rdata The received incoming request. 270 * @param grp_lock Optional group lock to use by this transaction. If 271 * the value is NULL, the transaction will create its 272 * own group lock. 273 * @param p_tsx On return will contain the new transaction instance. 274 * 275 * @return PJ_SUCCESS if successfull. 276 */ 277 PJ_DECL(pj_status_t) pjsip_tsx_create_uas2(pjsip_module *tsx_user, 278 pjsip_rx_data *rdata, 279 pj_grp_lock_t *grp_lock, 280 pjsip_transaction **p_tsx ); 233 281 234 282 /** -
pjproject/trunk/pjsip/src/pjsip-ua/sip_inv.c
r4367 r4420 2911 2911 2912 2912 if (invite_tsx) 2913 pj_ mutex_unlock(invite_tsx->mutex);2913 pj_grp_lock_release(invite_tsx->grp_lock); 2914 2914 } 2915 2915 -
pjproject/trunk/pjsip/src/pjsip/sip_transaction.c
r4208 r4420 28 28 #include <pj/pool.h> 29 29 #include <pj/os.h> 30 #include <pj/rand.h> 30 31 #include <pj/string.h> 31 32 #include <pj/assert.h> … … 91 92 }; 92 93 93 /* Thread Local Storage ID for transaction lock */94 static long pjsip_tsx_lock_tls_id;95 96 94 /* Transaction state names */ 97 95 static const char *state_str[] = … … 123 121 TSX_HAS_RESOLVED_SERVER = 16, 124 122 }; 125 126 /* Transaction lock. */127 typedef struct tsx_lock_data {128 struct tsx_lock_data *prev;129 pjsip_transaction *tsx;130 int is_alive;131 } tsx_lock_data;132 133 123 134 124 /* Timer timeout value constants */ … … 149 139 150 140 /* Prototypes. */ 151 static void lock_tsx(pjsip_transaction *tsx, struct tsx_lock_data *lck);152 static pj_status_t unlock_tsx( pjsip_transaction *tsx,153 struct tsx_lock_data *lck);154 141 static pj_status_t tsx_on_state_null( pjsip_transaction *tsx, 155 142 pjsip_event *event); … … 179 166 const pjsip_transport_state_info *info); 180 167 static pj_status_t tsx_create( pjsip_module *tsx_user, 168 pj_grp_lock_t *grp_lock, 181 169 pjsip_transaction **p_tsx); 182 static pj_status_t tsx_destroy( pjsip_transaction *tsx ); 170 static void tsx_on_destroy(void *arg); 171 static pj_status_t tsx_shutdown( pjsip_transaction *tsx ); 183 172 static void tsx_resched_retransmission( pjsip_transaction *tsx ); 184 173 static pj_status_t tsx_retransmit( pjsip_transaction *tsx, int resched); … … 271 260 { 272 261 #define SEPARATOR '$' 273 char *key, *p , *end;262 char *key, *p; 274 263 int len; 275 264 pj_size_t len_required; 276 pjsip_uri *req_uri;277 265 pj_str_t *host; 278 266 … … 284 272 285 273 host = &rdata->msg_info.via->sent_by.host; 286 req_uri = (pjsip_uri*)rdata->msg_info.msg->line.req.uri;287 274 288 275 /* Calculate length required. */ … … 294 281 16; /* Separator+Allowance. */ 295 282 key = p = (char*) pj_pool_alloc(pool, len_required); 296 end = p + len_required;297 283 298 284 /* Add role. */ … … 452 438 timeout_timer_val = td_timer_val; 453 439 454 /* Initialize TLS ID for transaction lock. */455 status = pj_thread_local_alloc(&pjsip_tsx_lock_tls_id);456 if (status != PJ_SUCCESS)457 return status;458 459 pj_thread_local_set(pjsip_tsx_lock_tls_id, NULL);460 461 440 /* 462 441 * Initialize transaction layer structure. … … 483 462 } 484 463 485 /* Create mutex. */464 /* Create group lock. */ 486 465 status = pj_mutex_create_recursive(pool, "tsxlayer", &mod_tsx_layer.mutex); 487 466 if (status != PJ_SUCCESS) { … … 666 645 */ 667 646 PJ_TODO(FIX_RACE_CONDITION_HERE); 647 PJ_RACE_ME(5); 648 668 649 if (tsx && lock) 669 pj_ mutex_lock(tsx->mutex);650 pj_grp_lock_acquire(tsx->grp_lock); 670 651 671 652 return tsx; … … 712 693 pjsip_tsx_terminate(tsx, PJSIP_SC_SERVICE_UNAVAILABLE); 713 694 mod_tsx_layer_unregister_tsx(tsx); 714 tsx_ destroy(tsx);695 tsx_shutdown(tsx); 715 696 } 716 697 it = next; … … 735 716 /* Release pool. */ 736 717 pjsip_endpt_release_pool(mod_tsx_layer.endpt, mod_tsx_layer.pool); 737 738 /* Free TLS */739 pj_thread_local_free(pjsip_tsx_lock_tls_id);740 718 741 719 /* Mark as unregistered. */ … … 814 792 */ 815 793 PJ_TODO(FIX_RACE_CONDITION_HERE); 794 PJ_RACE_ME(5); 816 795 817 796 /* Pass the message to the transaction. */ … … 863 842 */ 864 843 PJ_TODO(FIX_RACE_CONDITION_HERE); 844 PJ_RACE_ME(5); 865 845 866 846 /* Pass the message to the transaction. */ … … 929 909 ***************************************************************************** 930 910 **/ 931 /*932 * Lock transaction and set the value of Thread Local Storage.933 */934 static void lock_tsx(pjsip_transaction *tsx, struct tsx_lock_data *lck)935 {936 struct tsx_lock_data *prev_data;937 938 pj_mutex_lock(tsx->mutex);939 prev_data = (struct tsx_lock_data *)940 pj_thread_local_get(pjsip_tsx_lock_tls_id);941 lck->prev = prev_data;942 lck->tsx = tsx;943 lck->is_alive = 1;944 pj_thread_local_set(pjsip_tsx_lock_tls_id, lck);945 }946 947 948 /*949 * Unlock transaction.950 * This will selectively unlock the mutex ONLY IF the transaction has not been951 * destroyed. The function knows whether the transaction has been destroyed952 * because when transaction is destroyed the is_alive flag for the transaction953 * will be set to zero.954 */955 static pj_status_t unlock_tsx( pjsip_transaction *tsx,956 struct tsx_lock_data *lck)957 {958 pj_assert( (void*)pj_thread_local_get(pjsip_tsx_lock_tls_id) == lck);959 pj_assert( lck->tsx == tsx );960 pj_thread_local_set(pjsip_tsx_lock_tls_id, lck->prev);961 if (lck->is_alive)962 pj_mutex_unlock(tsx->mutex);963 964 return lck->is_alive ? PJ_SUCCESS : PJSIP_ETSXDESTROYED;965 }966 967 968 911 /* Lock transaction for accessing the timeout timer only. */ 969 912 static void lock_timer(pjsip_transaction *tsx) … … 982 925 */ 983 926 static pj_status_t tsx_create( pjsip_module *tsx_user, 927 pj_grp_lock_t *grp_lock, 984 928 pjsip_transaction **p_tsx) 985 929 { … … 1010 954 tsx->timeout_timer.cb = &tsx_timer_callback; 1011 955 1012 status = pj_mutex_create_recursive(pool, tsx->obj_name, &tsx->mutex); 1013 if (status != PJ_SUCCESS) { 1014 pjsip_endpt_release_pool(mod_tsx_layer.endpt, pool); 1015 return status; 1016 } 956 if (grp_lock) { 957 tsx->grp_lock = grp_lock; 958 } else { 959 status = pj_grp_lock_create(pool, NULL, &tsx->grp_lock); 960 if (status != PJ_SUCCESS) { 961 pjsip_endpt_release_pool(mod_tsx_layer.endpt, pool); 962 return status; 963 } 964 } 965 966 pj_grp_lock_add_ref(tsx->grp_lock); 967 pj_grp_lock_add_handler(tsx->grp_lock, tsx->pool, tsx, &tsx_on_destroy); 1017 968 1018 969 status = pj_mutex_create_simple(pool, tsx->obj_name, &tsx->mutex_b); 1019 970 if (status != PJ_SUCCESS) { 1020 pj_mutex_destroy(tsx->mutex); 1021 pjsip_endpt_release_pool(mod_tsx_layer.endpt, pool); 971 tsx_shutdown(tsx); 1022 972 return status; 1023 973 } … … 1027 977 } 1028 978 1029 1030 /* Destroy transaction. */ 1031 static pj_status_t tsx_destroy( pjsip_transaction *tsx ) 1032 { 1033 struct tsx_lock_data *lck; 1034 979 /* Really destroy transaction, when grp_lock reference is zero */ 980 static void tsx_on_destroy( void *arg ) 981 { 982 pjsip_transaction *tsx = (pjsip_transaction*)arg; 983 984 PJ_LOG(5,(tsx->obj_name, "Transaction destroyed!")); 985 986 pj_mutex_destroy(tsx->mutex_b); 987 pjsip_endpt_release_pool(tsx->endpt, tsx->pool); 988 } 989 990 /* Shutdown transaction. */ 991 static pj_status_t tsx_shutdown( pjsip_transaction *tsx ) 992 { 1035 993 /* Release the transport */ 1036 994 tsx_update_transport(tsx, NULL); 1037 995 1038 /* Decrement reference counter in transport selector */ 1039 pjsip_tpselector_dec_ref(&tsx->tp_sel); 996 /* Decrement reference counter in transport selector, only if 997 * we haven't been called before */ 998 if (!tsx->terminating) { 999 pjsip_tpselector_dec_ref(&tsx->tp_sel); 1000 } 1040 1001 1041 1002 /* Free last transmitted message. */ … … 1057 1018 /* Clear some pending flags. */ 1058 1019 tsx->transport_flag &= ~(TSX_HAS_PENDING_RESCHED | TSX_HAS_PENDING_SEND); 1020 1059 1021 1060 1022 /* Refuse to destroy transaction if it has pending resolving. */ … … 1064 1026 PJ_LOG(4,(tsx->obj_name, "Will destroy later because transport is " 1065 1027 "in progress")); 1066 return PJ_EBUSY; 1067 } 1068 1069 /* Clear TLS, so that mutex will not be unlocked */ 1070 lck = (struct tsx_lock_data*) pj_thread_local_get(pjsip_tsx_lock_tls_id); 1071 while (lck) { 1072 if (lck->tsx == tsx) { 1073 lck->is_alive = 0; 1074 } 1075 lck = lck->prev; 1076 } 1077 1078 pj_mutex_destroy(tsx->mutex_b); 1079 pj_mutex_destroy(tsx->mutex); 1080 1081 PJ_LOG(5,(tsx->obj_name, "Transaction destroyed!")); 1082 1083 pjsip_endpt_release_pool(tsx->endpt, tsx->pool); 1028 } 1029 1030 if (!tsx->terminating) { 1031 tsx->terminating = PJ_TRUE; 1032 pj_grp_lock_dec_ref(tsx->grp_lock); 1033 } 1034 1035 /* No acccess to tsx after this, it may have been destroyed */ 1084 1036 1085 1037 return PJ_SUCCESS; … … 1094 1046 pjsip_event event; 1095 1047 pjsip_transaction *tsx = (pjsip_transaction*) entry->user_data; 1096 struct tsx_lock_data lck;1097 1048 1098 1049 PJ_UNUSED_ARG(theap); … … 1108 1059 1109 1060 /* Dispatch event to transaction. */ 1110 lock_tsx(tsx, &lck);1061 pj_grp_lock_acquire(tsx->grp_lock); 1111 1062 (*tsx->state_handler)(tsx, &event); 1112 unlock_tsx(tsx, &lck);1063 pj_grp_lock_release(tsx->grp_lock); 1113 1064 1114 1065 pj_log_pop_indent(); … … 1209 1160 1210 1161 /* Destroy transaction. */ 1211 tsx_ destroy(tsx);1162 tsx_shutdown(tsx); 1212 1163 } 1213 1164 … … 1221 1172 PJ_DEF(pj_status_t) pjsip_tsx_create_uac( pjsip_module *tsx_user, 1222 1173 pjsip_tx_data *tdata, 1174 pjsip_transaction **p_tsx) 1175 { 1176 return pjsip_tsx_create_uac2(tsx_user, tdata, NULL, p_tsx); 1177 } 1178 1179 PJ_DEF(pj_status_t) pjsip_tsx_create_uac2(pjsip_module *tsx_user, 1180 pjsip_tx_data *tdata, 1181 pj_grp_lock_t *grp_lock, 1223 1182 pjsip_transaction **p_tsx) 1224 1183 { … … 1228 1187 pjsip_via_hdr *via; 1229 1188 pjsip_host_info dst_info; 1230 struct tsx_lock_data lck;1231 1189 pj_status_t status; 1232 1190 … … 1252 1210 1253 1211 /* Create transaction instance. */ 1254 status = tsx_create( tsx_user, &tsx);1212 status = tsx_create( tsx_user, grp_lock, &tsx); 1255 1213 if (status != PJ_SUCCESS) 1256 1214 return status; … … 1258 1216 1259 1217 /* Lock transaction. */ 1260 lock_tsx(tsx, &lck);1218 pj_grp_lock_acquire(tsx->grp_lock); 1261 1219 1262 1220 /* Role is UAC. */ … … 1324 1282 status = pjsip_get_request_dest(tdata, &dst_info); 1325 1283 if (status != PJ_SUCCESS) { 1326 unlock_tsx(tsx, &lck);1327 tsx_ destroy(tsx);1284 pj_grp_lock_release(tsx->grp_lock); 1285 tsx_shutdown(tsx); 1328 1286 return status; 1329 1287 } … … 1336 1294 pj_assert(!"Bug in branch_param generator (i.e. not unique)"); 1337 1295 */ 1338 unlock_tsx(tsx, &lck);1339 tsx_ destroy(tsx);1296 pj_grp_lock_release(tsx->grp_lock); 1297 tsx_shutdown(tsx); 1340 1298 return status; 1341 1299 } … … 1343 1301 1344 1302 /* Unlock transaction and return. */ 1345 unlock_tsx(tsx, &lck);1303 pj_grp_lock_release(tsx->grp_lock); 1346 1304 1347 1305 pj_log_push_indent(); … … 1360 1318 PJ_DEF(pj_status_t) pjsip_tsx_create_uas( pjsip_module *tsx_user, 1361 1319 pjsip_rx_data *rdata, 1320 pjsip_transaction **p_tsx) 1321 { 1322 return pjsip_tsx_create_uas2(tsx_user, rdata, NULL, p_tsx); 1323 } 1324 1325 PJ_DEF(pj_status_t) pjsip_tsx_create_uas2(pjsip_module *tsx_user, 1326 pjsip_rx_data *rdata, 1327 pj_grp_lock_t *grp_lock, 1362 1328 pjsip_transaction **p_tsx) 1363 1329 { … … 1367 1333 pjsip_cseq_hdr *cseq; 1368 1334 pj_status_t status; 1369 struct tsx_lock_data lck;1370 1335 1371 1336 /* Validate arguments. */ … … 1405 1370 * Create transaction instance. 1406 1371 */ 1407 status = tsx_create( tsx_user, &tsx);1372 status = tsx_create( tsx_user, grp_lock, &tsx); 1408 1373 if (status != PJ_SUCCESS) 1409 1374 return status; … … 1411 1376 1412 1377 /* Lock transaction. */ 1413 lock_tsx(tsx, &lck);1378 pj_grp_lock_acquire(tsx->grp_lock); 1414 1379 1415 1380 /* Role is UAS */ … … 1428 1393 PJSIP_ROLE_UAS, &tsx->method, rdata); 1429 1394 if (status != PJ_SUCCESS) { 1430 unlock_tsx(tsx, &lck);1431 tsx_destroy(tsx);1395 pj_grp_lock_release(tsx->grp_lock); 1396 tsx_shutdown(tsx); 1432 1397 return status; 1433 1398 } … … 1455 1420 status = pjsip_get_response_addr( tsx->pool, rdata, &tsx->res_addr ); 1456 1421 if (status != PJ_SUCCESS) { 1457 unlock_tsx(tsx, &lck);1458 tsx_ destroy(tsx);1422 pj_grp_lock_release(tsx->grp_lock); 1423 tsx_shutdown(tsx); 1459 1424 return status; 1460 1425 } … … 1477 1442 status = mod_tsx_layer_register_tsx(tsx); 1478 1443 if (status != PJ_SUCCESS) { 1479 unlock_tsx(tsx, &lck);1480 tsx_ destroy(tsx);1444 pj_grp_lock_release(tsx->grp_lock); 1445 tsx_shutdown(tsx); 1481 1446 return status; 1482 1447 } … … 1486 1451 1487 1452 /* Unlock transaction and return. */ 1488 unlock_tsx(tsx, &lck);1453 pj_grp_lock_release(tsx->grp_lock); 1489 1454 1490 1455 pj_log_push_indent(); … … 1505 1470 const pjsip_tpselector *sel) 1506 1471 { 1507 struct tsx_lock_data lck;1508 1509 1472 /* Must be UAC transaction */ 1510 1473 PJ_ASSERT_RETURN(tsx && sel, PJ_EINVAL); 1511 1474 1512 1475 /* Start locking the transaction. */ 1513 lock_tsx(tsx, &lck);1476 pj_grp_lock_acquire(tsx->grp_lock); 1514 1477 1515 1478 /* Decrement reference counter of previous transport selector */ … … 1523 1486 1524 1487 /* Unlock transaction. */ 1525 unlock_tsx(tsx, &lck);1488 pj_grp_lock_release(tsx->grp_lock); 1526 1489 1527 1490 return PJ_SUCCESS; … … 1548 1511 PJ_DEF(pj_status_t) pjsip_tsx_terminate( pjsip_transaction *tsx, int code ) 1549 1512 { 1550 struct tsx_lock_data lck;1551 1552 1513 PJ_ASSERT_RETURN(tsx != NULL, PJ_EINVAL); 1553 1514 … … 1561 1522 pj_log_push_indent(); 1562 1523 1563 lock_tsx(tsx, &lck);1524 pj_grp_lock_acquire(tsx->grp_lock); 1564 1525 tsx_set_status_code(tsx, code, NULL); 1565 1526 tsx_set_state( tsx, PJSIP_TSX_STATE_TERMINATED, PJSIP_EVENT_USER, NULL); 1566 unlock_tsx(tsx, &lck);1527 pj_grp_lock_release(tsx->grp_lock); 1567 1528 1568 1529 pj_log_pop_indent(); … … 1579 1540 PJ_DEF(pj_status_t) pjsip_tsx_stop_retransmit(pjsip_transaction *tsx) 1580 1541 { 1581 struct tsx_lock_data lck;1582 1583 1542 PJ_ASSERT_RETURN(tsx != NULL, PJ_EINVAL); 1584 1543 PJ_ASSERT_RETURN(tsx->role == PJSIP_ROLE_UAC && … … 1590 1549 pj_log_push_indent(); 1591 1550 1592 lock_tsx(tsx, &lck);1551 pj_grp_lock_acquire(tsx->grp_lock); 1593 1552 /* Cancel retransmission timer. */ 1594 1553 if (tsx->retransmit_timer.id != 0) { … … 1596 1555 tsx->retransmit_timer.id = 0; 1597 1556 } 1598 unlock_tsx(tsx, &lck);1557 pj_grp_lock_release(tsx->grp_lock); 1599 1558 1600 1559 pj_log_pop_indent(); … … 1618 1577 PJ_EINVALIDOP); 1619 1578 1620 /* Note: must not call lock_tsx() as that would introduce deadlock.1621 * See #1121.1579 /* Note: must not call pj_grp_lock_acquire(tsx->grp_lock) as 1580 * that would introduce deadlock. See #1121. 1622 1581 */ 1623 1582 lock_timer(tsx); … … 1660 1619 { 1661 1620 pjsip_event event; 1662 struct tsx_lock_data lck;1663 1621 pj_status_t status; 1664 1622 … … 1676 1634 1677 1635 /* Dispatch to transaction. */ 1678 lock_tsx(tsx, &lck);1636 pj_grp_lock_acquire(tsx->grp_lock); 1679 1637 1680 1638 /* Set transport selector to tdata */ … … 1684 1642 status = (*tsx->state_handler)(tsx, &event); 1685 1643 1686 unlock_tsx(tsx, &lck);1644 pj_grp_lock_release(tsx->grp_lock); 1687 1645 1688 1646 /* Only decrement reference counter when it returns success. … … 1707 1665 { 1708 1666 pjsip_event event; 1709 struct tsx_lock_data lck;1710 pj_status_t status;1711 1667 1712 1668 PJ_LOG(5,(tsx->obj_name, "Incoming %s in state %s", … … 1721 1677 1722 1678 /* Dispatch to transaction. */ 1723 lock_tsx(tsx, &lck);1724 status =(*tsx->state_handler)(tsx, &event);1725 unlock_tsx(tsx, &lck);1679 pj_grp_lock_acquire(tsx->grp_lock); 1680 (*tsx->state_handler)(tsx, &event); 1681 pj_grp_lock_release(tsx->grp_lock); 1726 1682 1727 1683 pj_log_pop_indent(); … … 1735 1691 pjsip_transaction *tsx = (pjsip_transaction*) send_state->token; 1736 1692 pjsip_tx_data *tdata = send_state->tdata; 1737 struct tsx_lock_data lck;1738 1693 1739 1694 /* Check if transaction has cancelled itself from this transmit … … 1749 1704 } 1750 1705 1706 pj_grp_lock_acquire(tsx->grp_lock); 1707 1751 1708 /* Reset */ 1752 1709 tdata->mod_data[mod_tsx_layer.mod.id] = NULL; 1753 1710 tsx->pending_tx = NULL; 1754 1755 lock_tsx(tsx, &lck);1756 1711 1757 1712 if (sent > 0) { … … 1783 1738 tsx_set_state( tsx, PJSIP_TSX_STATE_DESTROYED, 1784 1739 PJSIP_EVENT_UNKNOWN, NULL ); 1785 unlock_tsx(tsx, &lck);1740 pj_grp_lock_release(tsx->grp_lock); 1786 1741 return; 1787 1742 } … … 1825 1780 err =pj_strerror(-sent, errmsg, sizeof(errmsg)); 1826 1781 1827 PJ_LOG(2,(tsx->obj_name, 1782 PJ_LOG(2,(tsx->obj_name, 1828 1783 "Failed to send %s! err=%d (%s)", 1829 1784 pjsip_tx_data_get_info(send_state->tdata), -sent, … … 1863 1818 1864 1819 } else { 1865 char errmsg[PJ_ERR_MSG_SIZE]; 1866 1867 PJ_LOG(2,(tsx->obj_name, 1868 "Temporary failure in sending %s, " 1869 "will try next server. Err=%d (%s)", 1870 pjsip_tx_data_get_info(send_state->tdata), -sent, 1871 pj_strerror(-sent, errmsg, sizeof(errmsg)).ptr)); 1820 PJ_PERROR(2,(tsx->obj_name, -sent, 1821 "Temporary failure in sending %s, " 1822 "will try next server", 1823 pjsip_tx_data_get_info(send_state->tdata))); 1872 1824 1873 1825 /* Reset retransmission count */ … … 1894 1846 } 1895 1847 1896 unlock_tsx(tsx, &lck);1848 pj_grp_lock_release(tsx->grp_lock); 1897 1849 } 1898 1850 … … 1904 1856 if (sent < 0) { 1905 1857 pjsip_transaction *tsx = (pjsip_transaction*) token; 1906 struct tsx_lock_data lck;1907 1858 char errmsg[PJ_ERR_MSG_SIZE]; 1908 1859 pj_str_t err; … … 1913 1864 1914 1865 PJ_LOG(2,(tsx->obj_name, "Transport failed to send %s! Err=%d (%s)", 1915 1916 1917 lock_tsx(tsx, &lck);1866 pjsip_tx_data_get_info(tdata), -sent, errmsg)); 1867 1868 pj_grp_lock_acquire(tsx->grp_lock); 1918 1869 1919 1870 /* Release transport. */ … … 1925 1876 PJSIP_EVENT_TRANSPORT_ERROR, tdata ); 1926 1877 1927 unlock_tsx(tsx, &lck);1878 pj_grp_lock_release(tsx->grp_lock); 1928 1879 } 1929 1880 } … … 1941 1892 if (state == PJSIP_TP_STATE_DISCONNECTED) { 1942 1893 pjsip_transaction *tsx; 1943 struct tsx_lock_data lck;1944 1894 1945 1895 pj_assert(tp && info && info->user_data); … … 1947 1897 tsx = (pjsip_transaction*)info->user_data; 1948 1898 1949 lock_tsx(tsx, &lck);1899 pj_grp_lock_acquire(tsx->grp_lock); 1950 1900 1951 1901 /* Terminate transaction when transport disconnected */ … … 1960 1910 } 1961 1911 1962 unlock_tsx(tsx, &lck);1912 pj_grp_lock_release(tsx->grp_lock); 1963 1913 } 1964 1914 } … … 1992 1942 1993 1943 if (status != PJ_SUCCESS) { 1994 char errmsg[PJ_ERR_MSG_SIZE]; 1995 1996 PJ_LOG(2,(tsx->obj_name, 1997 "Error sending %s: Err=%d (%s)", 1998 pjsip_tx_data_get_info(tdata), status, 1999 pj_strerror(status, errmsg, sizeof(errmsg)).ptr)); 1944 PJ_PERROR(2,(tsx->obj_name, status, 1945 "Error sending %s", 1946 pjsip_tx_data_get_info(tdata))); 2000 1947 2001 1948 /* On error, release transport to force using full transport … … 2110 2057 pjsip_tx_data *tdata) 2111 2058 { 2112 struct tsx_lock_data lck;2113 2059 pj_status_t status; 2114 2060 2115 lock_tsx(tsx, &lck);2061 pj_grp_lock_acquire(tsx->grp_lock); 2116 2062 if (tdata == NULL) { 2117 2063 tdata = tsx->last_tx; 2118 2064 } 2119 2065 status = tsx_send_msg(tsx, tdata); 2120 unlock_tsx(tsx, &lck);2066 pj_grp_lock_release(tsx->grp_lock); 2121 2067 2122 2068 /* Only decrement reference counter when it returns success. -
pjproject/trunk/pjsip/src/pjsip/sip_ua_layer.c
r4208 r4420 546 546 if (tsx) { 547 547 dlg = (pjsip_dialog*) tsx->mod_data[mod_ua.mod.id]; 548 pj_ mutex_unlock(tsx->mutex);548 pj_grp_lock_release(tsx->grp_lock); 549 549 550 550 /* Dlg may be NULL on some extreme condition -
pjproject/trunk/pjsip/src/test/regc_test.c
r4094 r4420 348 348 return -210; 349 349 } 350 if (client_result.code != client_cfg->code) { 350 if (client_result.code != client_cfg->code && 351 client_cfg->code != 502 && client_cfg->code != 503 && 352 client_result.code != 502 && client_result.code != 503) 353 { 351 354 PJ_LOG(3,(THIS_FILE, " error: expecting code=%d, got code=%d", 352 355 client_cfg->code, client_result.code)); -
pjproject/trunk/pjsip/src/test/test.c
r3553 r4420 48 48 49 49 pjsip_endpoint *endpt; 50 pj_caching_pool caching_pool; 50 51 int log_level = 3; 51 52 int param_log_decor = PJ_LOG_HAS_NEWLINE | PJ_LOG_HAS_TIME | 52 PJ_LOG_HAS_MICRO_SEC ;53 PJ_LOG_HAS_MICRO_SEC | PJ_LOG_HAS_INDENT; 53 54 54 55 static pj_oshandle_t fd_report; … … 224 225 { 225 226 pj_status_t rc; 226 pj_caching_pool caching_pool;227 227 const char *filename; 228 228 unsigned tsx_test_cnt=0; … … 370 370 #endif 371 371 372 /* 373 * Better be last because it recreates the endpt 374 */ 375 #if INCLUDE_TSX_DESTROY_TEST 376 DO_TEST(tsx_destroy_test()); 377 #endif 372 378 373 379 on_return: -
pjproject/trunk/pjsip/src/test/test.h
r3553 r4420 24 24 25 25 extern pjsip_endpoint *endpt; 26 extern pj_caching_pool caching_pool; 26 27 27 28 #define TEST_UDP_PORT 15060 … … 65 66 #define INCLUDE_RESOLVE_TEST INCLUDE_TRANSPORT_GROUP 66 67 #define INCLUDE_TSX_TEST INCLUDE_TSX_GROUP 68 #define INCLUDE_TSX_DESTROY_TEST INCLUDE_TSX_GROUP 67 69 #define INCLUDE_INV_OA_TEST INCLUDE_INV_GROUP 68 70 #define INCLUDE_REGC_TEST INCLUDE_REGC_GROUP … … 76 78 int txdata_test(void); 77 79 int tsx_bench(void); 80 int tsx_destroy_test(void); 78 81 int transport_udp_test(void); 79 82 int transport_loop_test(void); -
pjproject/trunk/pjsip/src/test/tsx_basic_test.c
r3553 r4420 126 126 return -40; 127 127 } 128 pj_ mutex_unlock(tsx->mutex);128 pj_grp_lock_release(tsx->grp_lock); 129 129 } 130 130 … … 156 156 return 0; 157 157 } 158 159 /**************************************************************************/ 160 161 struct tsx_test_state 162 { 163 int pool_cnt; 164 }; 165 166 static void save_tsx_test_state(struct tsx_test_state *st) 167 { 168 st->pool_cnt = caching_pool.used_count; 169 } 170 171 static pj_status_t check_tsx_test_state(struct tsx_test_state *st) 172 { 173 if (caching_pool.used_count > st->pool_cnt) 174 return -1; 175 176 return 0; 177 } 178 179 static void destroy_endpt() 180 { 181 pjsip_endpt_destroy(endpt); 182 endpt = NULL; 183 } 184 185 static pj_status_t init_endpt() 186 { 187 pj_str_t ns = { "10.187.27.172", 13}; /* just a random, unreachable IP */ 188 pj_dns_resolver *resolver; 189 pj_status_t rc; 190 191 rc = pjsip_endpt_create(&caching_pool.factory, "endpt", &endpt); 192 if (rc != PJ_SUCCESS) { 193 app_perror("pjsip_endpt_create", rc); 194 return rc; 195 } 196 197 /* Start transaction layer module. */ 198 rc = pjsip_tsx_layer_init_module(endpt); 199 if (rc != PJ_SUCCESS) { 200 app_perror("tsx_layer_init", rc); 201 return rc; 202 } 203 204 rc = pjsip_udp_transport_start(endpt, NULL, NULL, 1, NULL); 205 if (rc != PJ_SUCCESS) { 206 app_perror("udp init", rc); 207 return rc; 208 } 209 210 rc = pjsip_tcp_transport_start(endpt, NULL, 1, NULL); 211 if (rc != PJ_SUCCESS) { 212 app_perror("tcp init", rc); 213 return rc; 214 } 215 216 rc = pjsip_endpt_create_resolver(endpt, &resolver); 217 if (rc != PJ_SUCCESS) { 218 app_perror("create resolver", rc); 219 return rc; 220 } 221 222 pj_dns_resolver_set_ns(resolver, 1, &ns, NULL); 223 224 rc = pjsip_endpt_set_resolver(endpt, resolver); 225 if (rc != PJ_SUCCESS) { 226 app_perror("set resolver", rc); 227 return rc; 228 } 229 230 return PJ_SUCCESS; 231 } 232 233 static int tsx_create_and_send_req(void *arg) 234 { 235 pj_str_t dst_uri = pj_str((char*)arg); 236 pj_str_t from_uri = pj_str((char*)"<sip:user@host>"); 237 pjsip_tx_data *tdata; 238 pj_status_t status; 239 240 status = pjsip_endpt_create_request(endpt, &pjsip_options_method, 241 &dst_uri, &from_uri, &dst_uri, 242 NULL, NULL, -1, NULL, 243 &tdata); 244 if (status != PJ_SUCCESS) 245 return status; 246 247 status = pjsip_endpt_send_request(endpt, tdata, -1, NULL, NULL); 248 if (status != PJ_SUCCESS) 249 return status; 250 251 return PJ_SUCCESS; 252 } 253 254 int tsx_destroy_test() 255 { 256 struct tsx_test_state state; 257 struct test_desc 258 { 259 const char *title; 260 int (*func)(void*); 261 void *arg; 262 int sleep_before_unload; 263 int sleep_after_unload; 264 } test_entries[] = 265 { 266 { 267 "normal unable to resolve", 268 &tsx_create_and_send_req, 269 "sip:user@somehost", 270 10000, 271 1 272 }, 273 { 274 "resolve and destroy, wait", 275 &tsx_create_and_send_req, 276 "sip:user@somehost", 277 1, 278 10000 279 }, 280 { 281 "tcp connect and destroy", 282 &tsx_create_and_send_req, 283 "sip:user@10.125.36.63:58517;transport=tcp", 284 60000, 285 1000 286 }, 287 { 288 "tcp connect and destroy", 289 &tsx_create_and_send_req, 290 "sip:user@10.125.36.63:58517;transport=tcp", 291 1, 292 60000 293 }, 294 295 }; 296 int rc; 297 unsigned i; 298 const int INDENT = 2; 299 300 pj_log_add_indent(INDENT); 301 destroy_endpt(); 302 303 for (i=0; i<PJ_ARRAY_SIZE(test_entries); ++i) { 304 struct test_desc *td = &test_entries[i]; 305 306 PJ_LOG(3,(THIS_FILE, "%s", td->title)); 307 308 pj_log_add_indent(INDENT); 309 save_tsx_test_state(&state); 310 311 rc = init_endpt(); 312 if (rc != PJ_SUCCESS) { 313 pj_log_add_indent(-INDENT*2); 314 return -10; 315 } 316 317 rc = td->func(td->arg); 318 if (rc != PJ_SUCCESS) { 319 pj_log_add_indent(-INDENT*2); 320 return -20; 321 } 322 323 flush_events(td->sleep_before_unload); 324 pjsip_tsx_layer_destroy(); 325 flush_events(td->sleep_after_unload); 326 destroy_endpt(); 327 328 rc = check_tsx_test_state(&state); 329 if (rc != PJ_SUCCESS) { 330 init_endpt(); 331 pj_log_add_indent(-INDENT*2); 332 return -30; 333 } 334 335 pj_log_add_indent(-INDENT); 336 } 337 338 init_endpt(); 339 340 pj_log_add_indent(-INDENT); 341 return 0; 342 } 343 -
pjproject/trunk/pjsip/src/test/tsx_bench.c
r3553 r4420 80 80 for (i=0; i<working_set; ++i) { 81 81 if (tsx[i]) { 82 pj_timer_heap_t *th; 83 82 84 pjsip_tsx_terminate(tsx[i], 601); 83 85 tsx[i] = NULL; 86 87 th = pjsip_endpt_get_timer_heap(endpt); 88 pj_timer_heap_poll(th, NULL); 84 89 } 85 90 } … … 178 183 for (i=0; i<working_set; ++i) { 179 184 if (tsx[i]) { 185 pj_timer_heap_t *th; 186 180 187 pjsip_tsx_terminate(tsx[i], 601); 181 188 tsx[i] = NULL; 189 190 th = pjsip_endpt_get_timer_heap(endpt); 191 pj_timer_heap_poll(th, NULL); 192 182 193 } 183 194 } … … 209 220 PJ_LOG(3,(THIS_FILE, " test %d of %d..", 210 221 i+1, REPEAT)); 222 PJ_LOG(3,(THIS_FILE, " number of current tsx: %d", 223 pjsip_tsx_layer_get_tsx_count())); 211 224 status = uac_tsx_bench(WORKING_SET, &usec[i]); 212 225 if (status != PJ_SUCCESS) … … 247 260 PJ_LOG(3,(THIS_FILE, " test %d of %d..", 248 261 i+1, REPEAT)); 262 PJ_LOG(3,(THIS_FILE, " number of current tsx: %d", 263 pjsip_tsx_layer_get_tsx_count())); 249 264 status = uas_tsx_bench(WORKING_SET, &usec[i]); 250 265 if (status != PJ_SUCCESS) -
pjproject/trunk/pjsip/src/test/tsx_uac_test.c
r4208 r4420 221 221 222 222 /* Test the status code. */ 223 if (tsx->status_code != PJSIP_SC_TSX_TRANSPORT_ERROR) { 224 PJ_LOG(3,(THIS_FILE, 225 " error: status code is %d instead of %d", 226 tsx->status_code, PJSIP_SC_TSX_TRANSPORT_ERROR)); 223 if (tsx->status_code != PJSIP_SC_TSX_TRANSPORT_ERROR && 224 tsx->status_code != PJSIP_SC_BAD_GATEWAY) 225 { 226 PJ_LOG(3,(THIS_FILE, 227 " error: status code is %d instead of %d or %d", 228 tsx->status_code, PJSIP_SC_TSX_TRANSPORT_ERROR, 229 PJSIP_SC_BAD_GATEWAY)); 227 230 test_complete = -720; 228 231 } … … 689 692 if (tsx) { 690 693 pjsip_tsx_terminate(tsx, PJSIP_SC_REQUEST_TERMINATED); 691 pj_ mutex_unlock(tsx->mutex);694 pj_grp_lock_release(tsx->grp_lock); 692 695 } else { 693 696 PJ_LOG(3,(THIS_FILE, " error: uac transaction not found!")); … … 1028 1031 if (tsx) { 1029 1032 pjsip_tsx_terminate(tsx, PJSIP_SC_REQUEST_TERMINATED); 1030 pj_ mutex_unlock(tsx->mutex);1033 pj_grp_lock_release(tsx->grp_lock); 1031 1034 flush_events(1000); 1032 1035 } -
pjproject/trunk/pjsip/src/test/tsx_uas_test.c
r4208 r4420 226 226 // Some tests do expect failure! 227 227 //PJ_LOG(3,(THIS_FILE," error: timer unable to send response")); 228 pj_ mutex_unlock(tsx->mutex);228 pj_grp_lock_release(tsx->grp_lock); 229 229 pjsip_tx_data_dec_ref(r->tdata); 230 230 return; 231 231 } 232 232 233 pj_ mutex_unlock(tsx->mutex);233 pj_grp_lock_release(tsx->grp_lock); 234 234 } 235 235 … … 314 314 315 315 pjsip_tsx_terminate(tsx, status_code); 316 pj_ mutex_unlock(tsx->mutex);316 pj_grp_lock_release(tsx->grp_lock); 317 317 } 318 318 … … 1260 1260 if (tsx) { 1261 1261 pjsip_tsx_terminate(tsx, PJSIP_SC_REQUEST_TERMINATED); 1262 pj_ mutex_unlock(tsx->mutex);1262 pj_grp_lock_release(tsx->grp_lock); 1263 1263 flush_events(1000); 1264 1264 }
Note: See TracChangeset
for help on using the changeset viewer.