@@ -423,13 +423,6 @@ buffer_manager_allocate_buffer_for_thread (
423423
424424 // Allocating a buffer requires us to take the lock.
425425 EP_SPIN_LOCK_ENTER (& buffer_manager -> rt_lock , section1 )
426-
427- // if we are deallocating then give up, see the comments in ep_buffer_manager_suspend_write_event () for why this is important.
428- if ((bool )ep_rt_volatile_load_uint32_t (& buffer_manager -> write_event_suspending )) {
429- * write_suspended = true;
430- ep_raise_error_holding_spin_lock (section1 );
431- }
432-
433426 thread_buffer_list = ep_thread_session_state_get_buffer_list (thread_session_state );
434427 if (thread_buffer_list == NULL ) {
435428 thread_buffer_list = ep_buffer_list_alloc (buffer_manager , ep_thread_session_state_get_thread (thread_session_state ));
@@ -797,8 +790,6 @@ ep_buffer_manager_alloc (
797790 instance -> session = session ;
798791 instance -> size_of_all_buffers = 0 ;
799792
800- ep_rt_volatile_store_uint32_t (& instance -> write_event_suspending , (uint32_t )false);
801-
802793#ifdef EP_CHECKED_BUILD
803794 instance -> num_buffers_allocated = 0 ;
804795 instance -> num_buffers_stolen = 0 ;
@@ -837,8 +828,6 @@ ep_buffer_manager_free (EventPipeBufferManager * buffer_manager)
837828{
838829 ep_return_void_if_nok (buffer_manager != NULL );
839830
840- ep_rt_volatile_store_uint32_t (& buffer_manager -> write_event_suspending , (uint32_t )true);
841-
842831 ep_buffer_manager_deallocate_buffers (buffer_manager );
843832
844833 ep_rt_wait_event_free (& buffer_manager -> rt_wait_event );
@@ -931,10 +920,6 @@ ep_buffer_manager_write_event (
931920 thread_lock = ep_thread_get_rt_lock_ref (current_thread );
932921
933922 EP_SPIN_LOCK_ENTER (thread_lock , section1 )
934- if (ep_rt_volatile_load_uint32_t_without_barrier (& buffer_manager -> write_event_suspending ) != (uint32_t )false)
935- // This session is suspending, we need to avoid initializing any session state and exit
936- ep_raise_error_holding_spin_lock (section1 );
937-
938923 session_state = ep_thread_get_or_create_session_state (current_thread , session );
939924 ep_raise_error_if_nok_holding_spin_lock (session_state != NULL , section1 );
940925
@@ -977,24 +962,13 @@ ep_buffer_manager_write_event (
977962
978963 thread_lock = ep_thread_get_rt_lock_ref (current_thread );
979964 EP_SPIN_LOCK_ENTER (thread_lock , section3 )
980- if (ep_rt_volatile_load_uint32_t_without_barrier (& buffer_manager -> write_event_suspending ) != (uint32_t )false) {
981- // After leaving the manager's lock in buffer_manager_allocated_buffer_for_thread some other thread decided to suspend writes.
982- // We need to immediately return the buffer we just took without storing it or writing to it.
983- // suspend_write_event () is spinning waiting for this buffer to be relinquished.
984- ep_buffer_convert_to_read_only (buffer );
985-
986- // We treat this as the write_event() call occurring after this session stopped listening for events, effectively the
987- // same as if ep_event_is_enabled returned false.
988- ep_raise_error_holding_spin_lock (section3 );
989- } else {
990965 ep_thread_session_state_set_write_buffer (session_state , buffer );
991966 // Try to write the event after we allocated a buffer.
992967 // This is the first time if the thread had no buffers before the call to this function.
993968 // This is the second time if this thread did have one or more buffers, but they were full.
994969 alloc_new_buffer = !ep_buffer_write_event (buffer , event_thread , session , ep_event , payload , activity_id , related_activity_id , stack );
995970 EP_ASSERT (!alloc_new_buffer );
996971 ep_thread_session_state_increment_sequence_number (session_state );
997- }
998972 EP_SPIN_LOCK_EXIT (thread_lock , section3 )
999973 }
1000974 }
@@ -1034,73 +1008,33 @@ ep_buffer_manager_suspend_write_event (
10341008 ep_rt_thread_array_alloc (& thread_array );
10351009 EP_SPIN_LOCK_ENTER (& buffer_manager -> rt_lock , section1 );
10361010 EP_ASSERT (ep_buffer_manager_ensure_consistency (buffer_manager ) == true);
1037- ep_rt_volatile_store_uint32_t (& buffer_manager -> write_event_suspending , (uint32_t )true);
1038-
1039- // From this point until write_event_suspending is reset to false it is impossible
1040- // for new EventPipeThreadSessionStates to be added to the thread_session_state_list or
1041- // for new EventBuffers to be added to an existing EventPipeBufferList. The only
1042- // way ep_buffer_manager_allocate_buffer_for_thread is allowed to add one is by:
1043- // 1) take rt_lock - ep_buffer_manager_allocate_buffer_for_thread can't own it now because this thread owns it,
1044- // but after this thread gives it up lower in this function it could be acquired.
1045- // 2) observe write_event_suspending = false - that won't happen, acquiring rt_lock
1046- // guarantees ep_buffer_manager_allocate_buffer_for_thread will observe all the memory changes this
1047- // thread made prior to releasing m_lock and we've already set it true.
1048- // This ensures that we iterate over the list of threads below we've got the complete list.
1011+ // Find all threads that have used this buffer manager.
10491012 ep_rt_thread_session_state_list_iterator_t thread_session_state_list_iterator ;
10501013 ep_rt_thread_session_state_list_iterator_begin (& buffer_manager -> thread_session_state_list , & thread_session_state_list_iterator );
10511014 while (!ep_rt_thread_session_state_list_iterator_end (& buffer_manager -> thread_session_state_list , & thread_session_state_list_iterator )) {
1052- ep_rt_thread_array_append (& thread_array , ep_thread_session_state_get_thread (ep_rt_thread_session_state_list_iterator_value (& thread_session_state_list_iterator )));
1015+ EventPipeThread * thread = ep_thread_session_state_get_thread (ep_rt_thread_session_state_list_iterator_value (& thread_session_state_list_iterator ));
1016+ ep_rt_thread_array_append (& thread_array , thread );
10531017 ep_rt_thread_session_state_list_iterator_next (& buffer_manager -> thread_session_state_list , & thread_session_state_list_iterator );
1018+
1019+ // Once EventPipeSession::SuspendWriteEvent completes, we shouldn't have any
1020+ // in progress writes left.
1021+ EP_ASSERT (ep_thread_get_session_write_in_progress (thread ) != session_index );
10541022 }
10551023 EP_SPIN_LOCK_EXIT (& buffer_manager -> rt_lock , section1 );
10561024
1057- // Iterate through all the threads, forcing them to finish writes in progress inside EventPipeThread::m_lock,
1058- // relinquish any buffers stored in EventPipeThread::m_pWriteBuffer and prevent storing new ones.
1025+ // Iterate through all the threads, forcing them to relinquish any buffers stored in
1026+ // EventPipeThread's write buffer and prevent storing new ones.
10591027 ep_rt_thread_array_iterator_t thread_array_iterator ;
10601028 ep_rt_thread_array_iterator_begin (& thread_array , & thread_array_iterator );
10611029 while (!ep_rt_thread_array_iterator_end (& thread_array , & thread_array_iterator )) {
10621030 EventPipeThread * thread = ep_rt_thread_array_iterator_value (& thread_array_iterator );
10631031 EP_SPIN_LOCK_ENTER (ep_thread_get_rt_lock_ref (thread ), section2 )
10641032 EventPipeThreadSessionState * thread_session_state = ep_thread_get_session_state (thread , buffer_manager -> session );
10651033 ep_thread_session_state_set_write_buffer (thread_session_state , NULL );
1066- // From this point until write_event_suspending is reset to false it is impossible
1067- // for this thread to set the write buffer to a non-null value which in turn means
1068- // it can't write events into any buffer. To do this it would need to both:
1069- // 1) Acquire the thread lock - it can't right now but it will be able to do so after
1070- // we release the lock below
1071- // 2) Observe write_event_suspending = false - that won't happen, acquiring the thread
1072- // lock guarantees ep_buffer_manager_write_event will observe all the memory
1073- // changes this thread made prior to releasing the thread
1074- // lock and we already set it true.
10751034 EP_SPIN_LOCK_EXIT (ep_thread_get_rt_lock_ref (thread ), section2 )
10761035 ep_rt_thread_array_iterator_next (& thread_array , & thread_array_iterator );
10771036 }
10781037
1079- // Wait for any straggler ep_buffer_manager_write_event threads that may have already allocated a buffer but
1080- // hadn't yet relinquished it.
1081- ep_rt_thread_session_state_list_iterator_t thread_session_state_list_iterator ;
1082- EP_SPIN_LOCK_ENTER (& buffer_manager -> rt_lock , section3 )
1083- ep_rt_thread_session_state_list_iterator_begin (& buffer_manager -> thread_session_state_list , & thread_session_state_list_iterator );
1084- while (!ep_rt_thread_session_state_list_iterator_end (& buffer_manager -> thread_session_state_list , & thread_session_state_list_iterator )) {
1085- EventPipeBufferList * buffer_list = ep_thread_session_state_get_buffer_list (ep_rt_thread_session_state_list_iterator_value (& thread_session_state_list_iterator ));
1086- if (buffer_list ) {
1087- EventPipeThread * const event_pipe_thread = ep_buffer_list_get_thread (buffer_list );
1088- if (event_pipe_thread ) {
1089- EP_YIELD_WHILE (ep_thread_get_session_write_in_progress (event_pipe_thread ) == session_index );
1090- // It still guarantees that the thread has returned its buffer, but it also now guarantees that
1091- // that the thread has returned from ep_session_write_event () and has relinquished the session pointer
1092- // This yield is guaranteed to eventually finish because threads will eventually exit write_event ()
1093- // setting the flag back to -1. If the thread could quickly re-enter WriteEvent and set the flag
1094- // back to this_session_id we could theoretically get unlucky and never observe the gap, but
1095- // setting s_pSessions[this_session_id] = NULL above guaranteed that can't happen indefinately.
1096- // Sooner or later the thread is going to see the NULL value and once it does it won't store
1097- // this_session_id into the flag again.
1098- }
1099- }
1100- ep_rt_thread_session_state_list_iterator_next (& buffer_manager -> thread_session_state_list , & thread_session_state_list_iterator );
1101- }
1102- EP_SPIN_LOCK_EXIT (& buffer_manager -> rt_lock , section3 )
1103-
11041038ep_on_exit :
11051039 ep_requires_lock_held ();
11061040 ep_rt_thread_array_free (& thread_array );
@@ -1338,15 +1272,6 @@ ep_buffer_manager_deallocate_buffers (EventPipeBufferManager *buffer_manager)
13381272 // Take the buffer manager manipulation lock
13391273 EP_SPIN_LOCK_ENTER (& buffer_manager -> rt_lock , section1 )
13401274 EP_ASSERT (ep_buffer_manager_ensure_consistency (buffer_manager ));
1341- EP_ASSERT ((bool )ep_rt_volatile_load_uint32_t (& buffer_manager -> write_event_suspending ));
1342-
1343- // This m_writeEventSuspending flag + locks ensures that no thread will touch any of the
1344- // state we are dismantling here. This includes:
1345- // a) EventPipeThread m_sessions[session_id]
1346- // b) EventPipeThreadSessionState
1347- // c) EventPipeBufferList
1348- // d) EventPipeBuffer
1349- // e) EventPipeBufferManager.m_pThreadSessionStateList
13501275
13511276 ep_rt_thread_session_state_list_iterator_t thread_session_state_list_iterator ;
13521277 ep_rt_thread_session_state_list_iterator_begin (& buffer_manager -> thread_session_state_list , & thread_session_state_list_iterator );
0 commit comments