3 | | Helgrind issue !#1 |
4 | | |
5 | | Description: Data race over possible concurrent read/write access for thread's quit flag.[[br]] |
6 | | One thread tries to write the quit flag while another tries to read it.[[br]] |
7 | | Status: false positive, this is intentional behaviour. |
8 | | |
9 | | {{{ |
10 | | ==26641== ---Thread-Announcement------------------------------------------ |
11 | | ==26641== |
12 | | ==26641== Thread #6 was created |
13 | | ==26641== at 0x5953FCE: clone (clone.S:74) |
14 | | ==26641== by 0x5352199: do_clone.constprop.3 (createthread.c:75) |
15 | | ==26641== by 0x53538BA: pthread_create@@GLIBC_2.2.5 (createthread.c:245) |
16 | | ==26641== by 0x4C30C90: pthread_create_WRK (hg_intercepts.c:269) |
17 | | ==26641== by 0x56704B: pj_thread_create (os_core_unix.c:616) |
18 | | ==26641== by 0x49CC57: pjmedia_endpt_create (endpoint.c:169) |
19 | | ==26641== by 0x43102A: pjsua_media_subsys_init (pjsua_media.c:80) |
20 | | ==26641== by 0x42B576: pjsua_init (pjsua_core.c:1058) |
21 | | ==26641== by 0x40800A: app_init (pjsua_app.c:1346) |
22 | | ==26641== by 0x408F1A: pjsua_app_init (pjsua_app.c:1881) |
23 | | ==26641== by 0x405AA9: main_func (main.c:108) |
24 | | ==26641== by 0x568914: pj_run_app (os_core_unix.c:1930) |
25 | | ==26641== |
26 | | ==26641== Possible data race during write of size 4 at 0x8182E8 by thread #1 |
27 | | ==26641== Locks held: none |
28 | | ==26641== at 0x42AA5E: pjsua_stop_worker_threads (pjsua_core.c:730) |
29 | | ==26641== by 0x42C753: pjsua_destroy2 (pjsua_core.c:1548) |
30 | | ==26641== by 0x42CDC9: pjsua_destroy (pjsua_core.c:1775) |
31 | | ==26641== by 0x409291: app_destroy (pjsua_app.c:2011) |
32 | | ==26641== by 0x409304: pjsua_app_destroy (pjsua_app.c:2035) |
33 | | ==26641== by 0x405ADF: main_func (main.c:116) |
34 | | ==26641== by 0x568914: pj_run_app (os_core_unix.c:1930) |
35 | | ==26641== by 0x405B32: main (main.c:129) |
36 | | ==26641== |
37 | | ==26641== This conflicts with a previous read of size 4 by thread #7 |
38 | | ==26641== Locks held: none |
39 | | ==26641== at 0x42A978: worker_thread (pjsua_core.c:691) |
40 | | ==26641== by 0x566E30: thread_main (os_core_unix.c:523) |
41 | | ==26641== by 0x4C30E26: mythread_wrapper (hg_intercepts.c:233) |
42 | | ==26641== by 0x5353181: start_thread (pthread_create.c:312) |
43 | | ==26641== by 0x595400C: clone (clone.S:111) |
44 | | }}} |
45 | | |
46 | | Helgrind issue !#2 |
47 | | |
48 | | Lock order inversion involving rwmutex_lock_read()[[br]] |
| 3 | 1. Data race over possible concurrent read/write access for thread's quit flag.[[br]] |
| 4 | Description: One thread tries to write the quit flag while another tries to read it.[[br]] |
| 5 | Status: false positive, this is intentional behaviour. |
| 6 | |
| 7 | {{{ |
| 8 | ==26641== ---Thread-Announcement------------------------------------------ |
| 9 | ==26641== |
| 10 | ==26641== Thread #6 was created |
| 11 | ==26641== at 0x5953FCE: clone (clone.S:74) |
| 12 | ==26641== by 0x5352199: do_clone.constprop.3 (createthread.c:75) |
| 13 | ==26641== by 0x53538BA: pthread_create@@GLIBC_2.2.5 (createthread.c:245) |
| 14 | ==26641== by 0x4C30C90: pthread_create_WRK (hg_intercepts.c:269) |
| 15 | ==26641== by 0x56704B: pj_thread_create (os_core_unix.c:616) |
| 16 | ==26641== by 0x49CC57: pjmedia_endpt_create (endpoint.c:169) |
| 17 | ==26641== by 0x43102A: pjsua_media_subsys_init (pjsua_media.c:80) |
| 18 | ==26641== by 0x42B576: pjsua_init (pjsua_core.c:1058) |
| 19 | ==26641== by 0x40800A: app_init (pjsua_app.c:1346) |
| 20 | ==26641== by 0x408F1A: pjsua_app_init (pjsua_app.c:1881) |
| 21 | ==26641== by 0x405AA9: main_func (main.c:108) |
| 22 | ==26641== by 0x568914: pj_run_app (os_core_unix.c:1930) |
| 23 | ==26641== |
| 24 | ==26641== Possible data race during write of size 4 at 0x8182E8 by thread #1 |
| 25 | ==26641== Locks held: none |
| 26 | ==26641== at 0x42AA5E: pjsua_stop_worker_threads (pjsua_core.c:730) |
| 27 | ==26641== by 0x42C753: pjsua_destroy2 (pjsua_core.c:1548) |
| 28 | ==26641== by 0x42CDC9: pjsua_destroy (pjsua_core.c:1775) |
| 29 | ==26641== by 0x409291: app_destroy (pjsua_app.c:2011) |
| 30 | ==26641== by 0x409304: pjsua_app_destroy (pjsua_app.c:2035) |
| 31 | ==26641== by 0x405ADF: main_func (main.c:116) |
| 32 | ==26641== by 0x568914: pj_run_app (os_core_unix.c:1930) |
| 33 | ==26641== by 0x405B32: main (main.c:129) |
| 34 | ==26641== |
| 35 | ==26641== This conflicts with a previous read of size 4 by thread #7 |
| 36 | ==26641== Locks held: none |
| 37 | ==26641== at 0x42A978: worker_thread (pjsua_core.c:691) |
| 38 | ==26641== by 0x566E30: thread_main (os_core_unix.c:523) |
| 39 | ==26641== by 0x4C30E26: mythread_wrapper (hg_intercepts.c:233) |
| 40 | ==26641== by 0x5353181: start_thread (pthread_create.c:312) |
| 41 | ==26641== by 0x595400C: clone (clone.S:111) |
| 42 | }}} |
| 43 | |
| 44 | 2. Lock order inversion involving rwmutex_lock_read() |
| 330 | |
| 331 | 7. Destroying a locked mutex in sip dialog |
| 332 | Status: won't fix. When calling pj_mutex_destroy() there is actually no requirement that the mutex should be unlocked. If the mutex destroy fails, pj_mutex_destroy() will automatically retry several times by unlocking the mutex first. |
| 333 | {{{ |
| 334 | ==27298== Thread #9 unlocked an invalid lock at 0x6B1B408 |
| 335 | ==27298== at 0x4C325C0: pthread_mutex_unlock (hg_intercepts.c:632) |
| 336 | ==27298== by 0x578334: pj_mutex_destroy (os_core_unix.c:1413) |
| 337 | ==27298== by 0x4F8D7A: destroy_dialog (sip_dialog.c:114) |
| 338 | ==27298== by 0x4FA4C9: unregister_and_destroy_dialog (sip_dialog.c:760) |
| 339 | ==27298== by 0x4FA94B: pjsip_dlg_dec_lock (sip_dialog.c:896) |
| 340 | ==27298== by 0x4FCD4A: pjsip_dlg_on_tsx_state (sip_dialog.c:2030) |
| 341 | ==27298== by 0x4FD674: mod_ua_on_tsx_state (sip_ua_layer.c:178) |
| 342 | ==27298== by 0x4F4771: tsx_set_state (sip_transaction.c:1210) |
| 343 | ==27298== by 0x4F8103: tsx_on_state_completed_uas (sip_transaction.c:3150) |
| 344 | ==27298== by 0x4F4591: tsx_timer_callback (sip_transaction.c:1153) |
| 345 | ==27298== by 0x58808F: pj_timer_heap_poll (timer.c:643) |
| 346 | ==27298== by 0x4DF50A: pjsip_endpt_handle_events2 (sip_endpoint.c:712) |
| 347 | }}} |