From ac4ac267f027905eabb96efe3472c36e818f2c96 Mon Sep 17 00:00:00 2001 From: Jerry D'Antonio Date: Thu, 14 May 2015 09:13:31 -0400 Subject: [PATCH 1/5] Fixed Yardoc problems. --- lib/concurrent/agent.rb | 13 ++++++++++++- .../atomic/thread_local_var/weak_key_map.rb | 4 ++-- lib/concurrent/delay.rb | 13 +------------ 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/lib/concurrent/agent.rb b/lib/concurrent/agent.rb index 1e84d6c4d..482dfdd8d 100644 --- a/lib/concurrent/agent.rb +++ b/lib/concurrent/agent.rb @@ -21,7 +21,18 @@ class Agent # # @param [Object] initial the initial value # - # @!macro executor_and_deref_options + # @!macro [attach] executor_and_deref_options + # + # @param [Hash] opts the options used to define the behavior at update and deref + # and to specify the executor on which to perform actions + # @option opts [Executor] :executor when set use the given `Executor` instance. + # Three special values are also supported: `:task` returns the global task pool, + # `:operation` returns the global operation pool, and `:immediate` returns a new + # `ImmediateExecutor` object. + # @option opts [Boolean] :dup_on_deref (false) call `#dup` before returning the data + # @option opts [Boolean] :freeze_on_deref (false) call `#freeze` before returning the data + # @option opts [Proc] :copy_on_deref (nil) call the given `Proc` passing + # the internal value and returning the value returned from the proc def initialize(initial, opts = {}) @value = initial @rescuers = [] diff --git a/lib/concurrent/atomic/thread_local_var/weak_key_map.rb b/lib/concurrent/atomic/thread_local_var/weak_key_map.rb index 3b09cfbf2..7938fba00 100644 --- a/lib/concurrent/atomic/thread_local_var/weak_key_map.rb +++ b/lib/concurrent/atomic/thread_local_var/weak_key_map.rb @@ -164,9 +164,9 @@ def object end end end - end - private_constant :WeakReference + private_constant :WeakReference + end # The classes behave similar to Hashes, but the keys in the map are not strong references # and can be reclaimed by the garbage collector at any time. When a key is reclaimed, the diff --git a/lib/concurrent/delay.rb b/lib/concurrent/delay.rb index 64d48f57d..2e661c2ee 100644 --- a/lib/concurrent/delay.rb +++ b/lib/concurrent/delay.rb @@ -51,18 +51,7 @@ class Delay < Synchronization::Object # Create a new `Delay` in the `:pending` state. # - # @!macro [attach] executor_and_deref_options - # - # @param [Hash] opts the options used to define the behavior at update and deref - # and to specify the executor on which to perform actions - # @option opts [Executor] :executor when set use the given `Executor` instance. - # Three special values are also supported: `:task` returns the global task pool, - # `:operation` returns the global operation pool, and `:immediate` returns a new - # `ImmediateExecutor` object. - # @option opts [Boolean] :dup_on_deref (false) call `#dup` before returning the data - # @option opts [Boolean] :freeze_on_deref (false) call `#freeze` before returning the data - # @option opts [Proc] :copy_on_deref (nil) call the given `Proc` passing - # the internal value and returning the value returned from the proc + # @!macro executor_and_deref_options # # @yield the delayed operation to perform # From 54c2b5a422256d6a63a6e4529d32763c6361a13f Mon Sep 17 00:00:00 2001 From: Jerry D'Antonio Date: Thu, 14 May 2015 09:29:15 -0400 Subject: [PATCH 2/5] AtomicBoolean and AtomicFixnum follow new subclass declaration convention. --- lib/concurrent/atomic/atomic_boolean.rb | 81 ++++++++++++------------- lib/concurrent/atomic/atomic_fixnum.rb | 65 ++++++++++---------- 2 files changed, 70 insertions(+), 76 deletions(-) diff --git a/lib/concurrent/atomic/atomic_boolean.rb b/lib/concurrent/atomic/atomic_boolean.rb index bdfafc621..7f4e094f2 100644 --- a/lib/concurrent/atomic/atomic_boolean.rb +++ b/lib/concurrent/atomic/atomic_boolean.rb @@ -62,7 +62,7 @@ def true? synchronize { @value } end - # @!macro atomic_boolean_method_false_question + # @!macro [attach] atomic_boolean_method_false_question # # Is the current value `false` # @@ -91,10 +91,12 @@ def make_false protected + # @!visibility private def ns_initialize(initial) @value = !!initial end + # @!visibility private def ns_make_value(value) old = @value @value = value @@ -102,46 +104,41 @@ def ns_make_value(value) end end - if Concurrent.on_jruby? - - class AtomicBoolean < JavaAtomicBoolean - end - - elsif defined?(CAtomicBoolean) - - # @!macro atomic_boolean - class CAtomicBoolean - - # @!method initialize - # @!macro atomic_boolean_method_initialize - - # @!method value - # @!macro atomic_boolean_method_value_get - - # @!method value= - # @!macro atomic_boolean_method_value_set - - # @!method true? - # @!macro atomic_boolean_method_true_question - - # @!method false? - # @!macro atomic_boolean_method_false_question - - # @!method make_true - # @!macro atomic_boolean_method_make_true - - # @!method make_false - # @!macro atomic_boolean_method_make_false - end - - # @!macro atomic_boolean - class AtomicBoolean < CAtomicBoolean - end - - else - - # @!macro atomic_boolean - class AtomicBoolean < MutexAtomicBoolean - end + AtomicBooleanImplementation = case + when Concurrent.on_jruby? + JavaAtomicBoolean + when defined?(CAtomicBoolean) + CAtomicBoolean + else + MutexAtomicBoolean + end + private_constant :AtomicBooleanImplementation + + # @!macro atomic_boolean + # + # @see Concurrent::MutexAtomicBoolean + class AtomicBoolean < AtomicBooleanImplementation + + # @!method initialize(initial = false) + # @!macro atomic_boolean_method_initialize + + # @!method value + # @!macro atomic_boolean_method_value_get + + # @!method value=(value) + # @!macro atomic_boolean_method_value_set + + # @!method true? + # @!macro atomic_boolean_method_true_question + + # @!method false? + # @!macro atomic_boolean_method_false_question + + # @!method make_true + # @!macro atomic_boolean_method_make_true + + # @!method make_false + # @!macro atomic_boolean_method_make_false + end end diff --git a/lib/concurrent/atomic/atomic_fixnum.rb b/lib/concurrent/atomic/atomic_fixnum.rb index e75580ab4..824b3a2a0 100644 --- a/lib/concurrent/atomic/atomic_fixnum.rb +++ b/lib/concurrent/atomic/atomic_fixnum.rb @@ -90,7 +90,7 @@ def decrement # @param [Fixnum] expect the expected value # @param [Fixnum] update the new value # - # @return [Boolean] true if the value was updated else false + # @return [Fixnum] true if the value was updated else false def compare_and_set(expect, update) synchronize do if @value == expect @@ -104,17 +104,20 @@ def compare_and_set(expect, update) protected + # @!visibility private def ns_initialize(initial) ns_set(initial) end private + # @!visibility private def ns_set(value) range_check!(value) @value = value end + # @!visibility private def range_check!(value) if !value.is_a?(Fixnum) raise ArgumentError.new('value value must be a Fixnum') @@ -128,44 +131,38 @@ def range_check!(value) end end - if Concurrent.on_jruby? - - # @!macro atomic_fixnum - class AtomicFixnum < JavaAtomicFixnum - end - - elsif defined?(CAtomicFixnum) - - # @!macro atomic_fixnum - class CAtomicFixnum - - # @!method initialize - # @!macro atomic_fixnum_method_initialize - - # @!method value - # @!macro atomic_fixnum_method_value_get + AtomicFixnumImplementation = case + when Concurrent.on_jruby? + JavaAtomicFixnum + when defined?(CAtomicFixnum) + CAtomicFixnum + else + MutexAtomicFixnum + end + private_constant :AtomicFixnumImplementation + + # @!macro atomic_fixnum + # + # @see Concurrent::MutexAtomicFixnum + class AtomicFixnum < AtomicFixnumImplementation + + # @!method initialize(initial = 0) + # @!macro atomic_fixnum_method_initialize - # @!method value= - # @!macro atomic_fixnum_method_value_set + # @!method value + # @!macro atomic_fixnum_method_value_get - # @!method increment - # @!macro atomic_fixnum_method_increment + # @!method value=(value) + # @!macro atomic_fixnum_method_value_set - # @!method decrement - # @!macro atomic_fixnum_method_decrement + # @!method increment + # @!macro atomic_fixnum_method_increment - # @!method compare_and_set - # @!macro atomic_fixnum_method_compare_and_set - end + # @!method decrement + # @!macro atomic_fixnum_method_decrement - # @!macro atomic_fixnum - class AtomicFixnum < CAtomicFixnum - end + # @!method compare_and_set(expect, update) + # @!macro atomic_fixnum_method_compare_and_set - else - - # @!macro atomic_fixnum - class AtomicFixnum < MutexAtomicFixnum - end end end From 62d9ba15cec08b81d7bf6758d2ff706787014125 Mon Sep 17 00:00:00 2001 From: Jerry D'Antonio Date: Thu, 14 May 2015 10:02:12 -0400 Subject: [PATCH 3/5] ThreadLocalVar follows new subclass declaration convention. --- lib/concurrent/atomic/thread_local_var.rb | 177 +++++++--- .../atomic/thread_local_var/weak_key_map.rb | 323 +++++++++--------- .../atomic/thread_local_var_spec.rb | 8 +- 3 files changed, 288 insertions(+), 220 deletions(-) diff --git a/lib/concurrent/atomic/thread_local_var.rb b/lib/concurrent/atomic/thread_local_var.rb index 162088d8d..a765f87c2 100644 --- a/lib/concurrent/atomic/thread_local_var.rb +++ b/lib/concurrent/atomic/thread_local_var.rb @@ -2,7 +2,8 @@ module Concurrent - # @!macro [attach] abstract_thread_local_var + # @!macro [attach] thread_local_var + # # A `ThreadLocalVar` is a variable where the value is different for each thread. # Each variable may have a default value, but when you modify the variable only # the current thread will ever see that change. @@ -29,60 +30,28 @@ module Concurrent # end # # v.value #=> 14 + # + # @see https://docs.oracle.com/javase/7/docs/api/java/lang/ThreadLocal.html Java ThreadLocal class AbstractThreadLocalVar - module ThreadLocalRubyStorage - - protected - - def allocate_storage - @storage = WeakKeyMap.new - end - - def get - @storage[Thread.current] - end - - def set(value, &block) - key = Thread.current - - @storage[key] = value - - if block_given? - begin - block.call - ensure - @storage.delete key - end - end - end - end - - module ThreadLocalJavaStorage - - protected - - def allocate_storage - @var = java.lang.ThreadLocal.new - end - - def get - @var.get - end - - def set(value) - @var.set(value) - end - - end - NIL_SENTINEL = Object.new + private_constant :NIL_SENTINEL + # @!macro [attach] thread_local_var_method_initialize + # + # Creates a thread local variable. + # + # @param [Object] default the default value when otherwise unset def initialize(default = nil) @default = default allocate_storage end + # @!macro [attach] thread_local_var_method_get + # + # Returns the value in the current thread's copy of this thread-local variable. + # + # @return [Object] the current value def value value = get @@ -95,10 +64,24 @@ def value end end + # @!macro [attach] thread_local_var_method_set + # + # Sets the current thread's copy of this thread-local variable to the specified value. + # + # @param [Object] value the value to set + # @return [Object] the new value def value=(value) bind value end + # @!macro [attach] thread_local_var_method_bind + # + # Bind the given value to thread local storage during + # execution of the given block. + # + # @param [Object] value the value to bind + # @yield the operation to be performed with the bound variable + # @return [Object] the value def bind(value, &block) if value.nil? stored_value = NIL_SENTINEL @@ -106,19 +89,107 @@ def bind(value, &block) stored_value = value end - set stored_value, &block + set(stored_value, &block) value end + protected + + # @!visibility private + def allocate_storage + raise NotImplementedError + end + + # @!visibility private + def get + raise NotImplementedError + end + + # @!visibility private + def set(value) + raise NotImplementedError + end end - # @!macro abstract_thread_local_var - class ThreadLocalVar < AbstractThreadLocalVar - if Concurrent.on_jruby? - include ThreadLocalJavaStorage - else - include ThreadLocalRubyStorage + class RubyThreadLocalVar < AbstractThreadLocalVar + + protected + + # @!visibility private + def allocate_storage + @storage = WeakKeyMap.new + end + + # @!visibility private + def get + @storage[Thread.current] end + + # @!visibility private + def set(value) + key = Thread.current + + @storage[key] = value + + if block_given? + begin + yield + ensure + @storage.delete(key) + end + end + end + end + + if Concurrent.on_jruby? + + class JavaThreadLocalVar < AbstractThreadLocalVar + + protected + + # @!visibility private + def allocate_storage + @var = java.lang.ThreadLocal.new + end + + # @!visibility private + def get + @var.get + end + + # @!visibility private + def set(value) + @var.set(value) + end + end + end + + ThreadLocalVarImplementation = case + when Concurrent.on_jruby? + JavaThreadLocalVar + else + RubyThreadLocalVar + end + private_constant :AtomicBooleanImplementation + + # @!macro thread_local_var + # + # @see Concurrent::AbstractThreadLocalVar + # @see Concurrent::RubyThreadLocalVar + class ThreadLocalVar < ThreadLocalVarImplementation + + # @!method initialize(default = nil) + # @!macro thread_local_var_method_initialize + + # @!method value + # @!macro thread_local_var_method_get + + # @!method value=(value) + # @!macro thread_local_var_method_set + + # @!method bind(value, &block) + # @!macro thread_local_var_method_bind + end end diff --git a/lib/concurrent/atomic/thread_local_var/weak_key_map.rb b/lib/concurrent/atomic/thread_local_var/weak_key_map.rb index 7938fba00..8db2b920d 100644 --- a/lib/concurrent/atomic/thread_local_var/weak_key_map.rb +++ b/lib/concurrent/atomic/thread_local_var/weak_key_map.rb @@ -23,216 +23,213 @@ module Concurrent class AbstractThreadLocalVar - module ThreadLocalRubyStorage + begin + require 'weakref' - begin - require 'weakref' - - # @!visibility private - class WeakReference + # @!visibility private + class WeakReference - # The object id of the object being referenced. - attr_reader :referenced_object_id + # The object id of the object being referenced. + attr_reader :referenced_object_id - # This implementation of a weak reference simply wraps the standard WeakRef implementation - # that comes with the Ruby standard library. - def initialize(obj) - @referenced_object_id = obj.__id__ - @ref = ::WeakRef.new(obj) - end + # This implementation of a weak reference simply wraps the standard WeakRef implementation + # that comes with the Ruby standard library. + def initialize(obj) + @referenced_object_id = obj.__id__ + @ref = ::WeakRef.new(obj) + end - def object - @ref.__getobj__ - rescue => e - # Jruby implementation uses RefError while MRI uses WeakRef::RefError - if (defined?(RefError) && e.is_a?(RefError)) || (defined?(::WeakRef::RefError) && e.is_a?(::WeakRef::RefError)) - nil - else - raise e - end + def object + @ref.__getobj__ + rescue => e + # Jruby implementation uses RefError while MRI uses WeakRef::RefError + if (defined?(RefError) && e.is_a?(RefError)) || (defined?(::WeakRef::RefError) && e.is_a?(::WeakRef::RefError)) + nil + else + raise e end end + end - rescue LoadError - - require 'monitor' - - # This is a pure ruby implementation of a weak reference. It is much more - # efficient than the WeakRef implementation bundled in MRI 1.8 and 1.9 - # subclass Delegator which is very heavy to instantiate and utilizes a - # because it does not fair amount of memory under Ruby 1.8. - # - # @!visibility private - class WeakReference - - # The object id of the object being referenced. - attr_reader :referenced_object_id - - # @!visibility private - class ReferencePointer - def initialize(object) - @referenced_object_id = object.__id__ - add_backreference(object) - end + rescue LoadError - def cleanup - obj = ObjectSpace._id2ref(@referenced_object_id) rescue nil - remove_backreference(obj) if obj - end + require 'monitor' - def object - obj = ObjectSpace._id2ref(@referenced_object_id) - obj if verify_backreferences(obj) - rescue RangeError - nil - end - - private + # This is a pure ruby implementation of a weak reference. It is much more + # efficient than the WeakRef implementation bundled in MRI 1.8 and 1.9 + # subclass Delegator which is very heavy to instantiate and utilizes a + # because it does not fair amount of memory under Ruby 1.8. + # + # @!visibility private + class WeakReference - # Verify that the object is the same one originally set for the weak reference. - def verify_backreferences(obj) - return nil unless supports_backreference?(obj) - backreferences = obj.instance_variable_get(:@__weak_backreferences__) if obj.instance_variable_defined?(:@__weak_backreferences__) - backreferences && backreferences.include?(object_id) - end + # The object id of the object being referenced. + attr_reader :referenced_object_id - # Add a backreference to the object. - def add_backreference(obj) - return unless supports_backreference?(obj) - backreferences = obj.instance_variable_get(:@__weak_backreferences__) if obj.instance_variable_defined?(:@__weak_backreferences__) - unless backreferences - backreferences = [] - obj.instance_variable_set(:@__weak_backreferences__, backreferences) - end - backreferences << object_id - end + # @!visibility private + class ReferencePointer + def initialize(object) + @referenced_object_id = object.__id__ + add_backreference(object) + end - # Remove backreferences from the object. - def remove_backreference(obj) - return unless supports_backreference?(obj) - backreferences = obj.instance_variable_get(:@__weak_backreferences__) if obj.instance_variable_defined?(:@__weak_backreferences__) - if backreferences - backreferences.dup.delete(object_id) - obj.send(:remove_instance_variable, :@__weak_backreferences__) if backreferences.empty? - end - end + def cleanup + obj = ObjectSpace._id2ref(@referenced_object_id) rescue nil + remove_backreference(obj) if obj + end - def supports_backreference?(obj) - obj.respond_to?(:instance_variable_get) && obj.respond_to?(:instance_variable_defined?) - rescue NoMethodError - false - end + def object + obj = ObjectSpace._id2ref(@referenced_object_id) + obj if verify_backreferences(obj) + rescue RangeError + nil end - private_constant :ReferencePointer + private - @@weak_references = {} - @@lock = Monitor.new + # Verify that the object is the same one originally set for the weak reference. + def verify_backreferences(obj) + return nil unless supports_backreference?(obj) + backreferences = obj.instance_variable_get(:@__weak_backreferences__) if obj.instance_variable_defined?(:@__weak_backreferences__) + backreferences && backreferences.include?(object_id) + end - # Finalizer that cleans up weak references when references are destroyed. - @@reference_finalizer = lambda do |object_id| - @@lock.synchronize do - reference_pointer = @@weak_references.delete(object_id) - reference_pointer.cleanup if reference_pointer + # Add a backreference to the object. + def add_backreference(obj) + return unless supports_backreference?(obj) + backreferences = obj.instance_variable_get(:@__weak_backreferences__) if obj.instance_variable_defined?(:@__weak_backreferences__) + unless backreferences + backreferences = [] + obj.instance_variable_set(:@__weak_backreferences__, backreferences) end + backreferences << object_id end - # Create a new weak reference to an object. The existence of the weak reference - # will not prevent the garbage collector from reclaiming the referenced object. - def initialize(obj) - @referenced_object_id = obj.__id__ - @@lock.synchronize do - @reference_pointer = ReferencePointer.new(obj) - @@weak_references[self.object_id] = @reference_pointer + # Remove backreferences from the object. + def remove_backreference(obj) + return unless supports_backreference?(obj) + backreferences = obj.instance_variable_get(:@__weak_backreferences__) if obj.instance_variable_defined?(:@__weak_backreferences__) + if backreferences + backreferences.dup.delete(object_id) + obj.send(:remove_instance_variable, :@__weak_backreferences__) if backreferences.empty? end - ObjectSpace.define_finalizer(self, @@reference_finalizer) end - # Get the reference object. If the object has already been garbage collected, - # then this method will return nil. - def object - if @reference_pointer - obj = @reference_pointer.object - unless obj - @@lock.synchronize do - @@weak_references.delete(object_id) - @reference_pointer.cleanup - @reference_pointer = nil - end - end - obj - end + def supports_backreference?(obj) + obj.respond_to?(:instance_variable_get) && obj.respond_to?(:instance_variable_defined?) + rescue NoMethodError + false end end - private_constant :WeakReference - end + private_constant :ReferencePointer - # The classes behave similar to Hashes, but the keys in the map are not strong references - # and can be reclaimed by the garbage collector at any time. When a key is reclaimed, the - # map entry will be removed. - # - # @!visibility private - class WeakKeyMap - - # Create a new map. Values added to the hash will be cleaned up by the garbage - # collector if there are no other reference except in the map. - def initialize - @values = {} - @references_to_keys_map = {} - @lock = Monitor.new - @reference_cleanup = lambda{|object_id| remove_reference_to(object_id)} - end + @@weak_references = {} + @@lock = Monitor.new - # Get a value from the map by key. If the value has been reclaimed by the garbage - # collector, this will return nil. - def [](key) - @lock.synchronize do - rkey = ref_key(key) - @values[rkey] if rkey + # Finalizer that cleans up weak references when references are destroyed. + @@reference_finalizer = lambda do |object_id| + @@lock.synchronize do + reference_pointer = @@weak_references.delete(object_id) + reference_pointer.cleanup if reference_pointer end end - # Add a key/value to the map. - def []=(key, value) - ObjectSpace.define_finalizer(key, @reference_cleanup) - @lock.synchronize do - @references_to_keys_map[key.__id__] = WeakReference.new(key) - @values[key.__id__] = value + # Create a new weak reference to an object. The existence of the weak reference + # will not prevent the garbage collector from reclaiming the referenced object. + def initialize(obj) + @referenced_object_id = obj.__id__ + @@lock.synchronize do + @reference_pointer = ReferencePointer.new(obj) + @@weak_references[self.object_id] = @reference_pointer end + ObjectSpace.define_finalizer(self, @@reference_finalizer) end - # Remove the value associated with the key from the map. - def delete(key) - @lock.synchronize do - rkey = ref_key(key) - if rkey - @references_to_keys_map.delete(rkey) - @values.delete(rkey) - else - nil + # Get the reference object. If the object has already been garbage collected, + # then this method will return nil. + def object + if @reference_pointer + obj = @reference_pointer.object + unless obj + @@lock.synchronize do + @@weak_references.delete(object_id) + @reference_pointer.cleanup + @reference_pointer = nil + end end + obj end end + end - # Get an array of keys that have not yet been garbage collected. - def keys - @values.keys.collect{|rkey| @references_to_keys_map[rkey].object}.compact + private_constant :WeakReference + end + + # The classes behave similar to Hashes, but the keys in the map are not strong references + # and can be reclaimed by the garbage collector at any time. When a key is reclaimed, the + # map entry will be removed. + # + # @!visibility private + class WeakKeyMap + + # Create a new map. Values added to the hash will be cleaned up by the garbage + # collector if there are no other reference except in the map. + def initialize + @values = {} + @references_to_keys_map = {} + @lock = Monitor.new + @reference_cleanup = lambda{|object_id| remove_reference_to(object_id)} + end + + # Get a value from the map by key. If the value has been reclaimed by the garbage + # collector, this will return nil. + def [](key) + @lock.synchronize do + rkey = ref_key(key) + @values[rkey] if rkey end + end - private + # Add a key/value to the map. + def []=(key, value) + ObjectSpace.define_finalizer(key, @reference_cleanup) + @lock.synchronize do + @references_to_keys_map[key.__id__] = WeakReference.new(key) + @values[key.__id__] = value + end + end - def ref_key (key) - ref = @references_to_keys_map[key.__id__] - if ref && ref.object - ref.referenced_object_id + # Remove the value associated with the key from the map. + def delete(key) + @lock.synchronize do + rkey = ref_key(key) + if rkey + @references_to_keys_map.delete(rkey) + @values.delete(rkey) else nil end end end - private_constant :WeakKeyMap + # Get an array of keys that have not yet been garbage collected. + def keys + @values.keys.collect{|rkey| @references_to_keys_map[rkey].object}.compact + end + + private + + def ref_key (key) + ref = @references_to_keys_map[key.__id__] + if ref && ref.object + ref.referenced_object_id + else + nil + end + end end + + private_constant :WeakKeyMap end end diff --git a/spec/concurrent/atomic/thread_local_var_spec.rb b/spec/concurrent/atomic/thread_local_var_spec.rb index e769fabaa..e31202632 100644 --- a/spec/concurrent/atomic/thread_local_var_spec.rb +++ b/spec/concurrent/atomic/thread_local_var_spec.rb @@ -29,12 +29,12 @@ module Concurrent end if Concurrent.on_jruby? - it 'uses ThreadLocalJavaStorage' do - expect(subject.class.ancestors).to include(Concurrent::AbstractThreadLocalVar::ThreadLocalJavaStorage) + it 'extends JavaThreadLocalVar' do + expect(subject.class.ancestors).to include(Concurrent::JavaThreadLocalVar) end else - it 'uses ThreadLocalNewStorage' do - expect(subject.class.ancestors).to include(Concurrent::AbstractThreadLocalVar::ThreadLocalRubyStorage) + it 'extends ThreadLocalNewStorage' do + expect(subject.class.ancestors).to include(Concurrent::RubyThreadLocalVar) end end end From 52967d554c6b1cb788035d45eca9d1b225a14d25 Mon Sep 17 00:00:00 2001 From: Jerry D'Antonio Date: Thu, 14 May 2015 11:04:49 -0400 Subject: [PATCH 4/5] Semaphore follows new subclass declaration convention. --- lib/concurrent/atomic/semaphore.rb | 59 ++++++++++++++++++++++-------- 1 file changed, 44 insertions(+), 15 deletions(-) diff --git a/lib/concurrent/atomic/semaphore.rb b/lib/concurrent/atomic/semaphore.rb index 0657e0fcd..4a7ac1cc8 100644 --- a/lib/concurrent/atomic/semaphore.rb +++ b/lib/concurrent/atomic/semaphore.rb @@ -1,7 +1,17 @@ require 'concurrent/synchronization' module Concurrent + + # @!macro [attach] semaphore + # + # A counting semaphore. Conceptually, a semaphore maintains a set of + # permits. Each {#acquire} blocks if necessary until a permit is + # available, and then takes it. Each {#release} adds a permit, potentially + # releasing a blocking acquirer. + # However, no actual permit objects are used; the Semaphore just keeps a + # count of the number available and acts accordingly. class MutexSemaphore < Synchronization::Object + # @!macro [attach] semaphore_method_initialize # # Create a new `Semaphore` with the initial `count`. @@ -129,12 +139,14 @@ def reduce_permits(reduction) protected + # @!visibility private def ns_initialize(count) @free = count end private + # @!visibility private def try_acquire_now(permits) if @free >= permits @free -= permits @@ -144,28 +156,45 @@ def try_acquire_now(permits) end end + # @!visibility private def try_acquire_timed(permits, timeout) ns_wait_until(timeout) { try_acquire_now(permits) } end end - if Concurrent.on_jruby? + SemaphoreImplementation = case + when Concurrent.on_jruby? + JavaSemaphore + else + MutexSemaphore + end + private_constant :SemaphoreImplementation - # @!macro semaphore - # - # A counting semaphore. Conceptually, a semaphore maintains a set of - # permits. Each {#acquire} blocks if necessary until a permit is - # available, and then takes it. Each {#release} adds a permit, potentially - # releasing a blocking acquirer. - # However, no actual permit objects are used; the Semaphore just keeps a - # count of the number available and acts accordingly. - class Semaphore < JavaSemaphore - end + # @!macro semaphore + # + # @see Concurrent::MutexSemaphore + class Semaphore < SemaphoreImplementation - else + # @!method initialize(count) + # @!macro semaphore_method_initialize + + # @!method acquire(permits = 1) + # @!macro semaphore_method_acquire + + # @!method available_permits + # @!macro semaphore_method_available_permits + + # @!method drain_permits + # @!macro semaphore_method_drain_permits + + # @!method try_acquire(permits = 1, timeout = nil) + # @!macro semaphore_method_try_acquire + + # @!method release(permits = 1) + # @!macro semaphore_method_release + + # @!method reduce_permits(reduction) + # @!macro semaphore_method_reduce_permits - # @!macro semaphore - class Semaphore < MutexSemaphore - end end end From 8509ca77ee9db67f1198d6878f8292a6cd2e908c Mon Sep 17 00:00:00 2001 From: Jerry D'Antonio Date: Thu, 14 May 2015 11:05:12 -0400 Subject: [PATCH 5/5] Executors now follow new subclass declaration convention. --- lib/concurrent/executor/cached_thread_pool.rb | 68 ++++---- lib/concurrent/executor/fixed_thread_pool.rb | 164 +++++++++--------- .../executor/ruby_thread_pool_executor.rb | 23 ++- .../executor/single_thread_executor.rb | 46 ++--- .../executor/thread_pool_executor.rb | 78 +++++---- .../executor/ruby_cached_thread_pool_spec.rb | 25 ++- 6 files changed, 216 insertions(+), 188 deletions(-) diff --git a/lib/concurrent/executor/cached_thread_pool.rb b/lib/concurrent/executor/cached_thread_pool.rb index ab50f8ce0..df09fe65a 100644 --- a/lib/concurrent/executor/cached_thread_pool.rb +++ b/lib/concurrent/executor/cached_thread_pool.rb @@ -4,37 +4,41 @@ module Concurrent if Concurrent.on_jruby? require 'concurrent/executor/java_cached_thread_pool' - # @!macro [attach] cached_thread_pool - # A thread pool that dynamically grows and shrinks to fit the current workload. - # New threads are created as needed, existing threads are reused, and threads - # that remain idle for too long are killed and removed from the pool. These - # pools are particularly suited to applications that perform a high volume of - # short-lived tasks. - # - # On creation a `CachedThreadPool` has zero running threads. New threads are - # created on the pool as new operations are `#post`. The size of the pool - # will grow until `#max_length` threads are in the pool or until the number - # of threads exceeds the number of running and pending operations. When a new - # operation is post to the pool the first available idle thread will be tasked - # with the new operation. - # - # Should a thread crash for any reason the thread will immediately be removed - # from the pool. Similarly, threads which remain idle for an extended period - # of time will be killed and reclaimed. Thus these thread pools are very - # efficient at reclaiming unused resources. - # - # The API and behavior of this class are based on Java's `CachedThreadPool` - # - # @see Concurrent::RubyCachedThreadPool - # @see Concurrent::JavaCachedThreadPool - # - # @!macro thread_pool_options - class CachedThreadPool < JavaCachedThreadPool - end - else - # @!macro cached_thread_pool - # @!macro thread_pool_options - class CachedThreadPool < RubyCachedThreadPool - end + end + + CachedThreadPoolImplementation = case + when Concurrent.on_jruby? + JavaCachedThreadPool + else + RubyCachedThreadPool + end + private_constant :CachedThreadPoolImplementation + + # @!macro [attach] cached_thread_pool + # A thread pool that dynamically grows and shrinks to fit the current workload. + # New threads are created as needed, existing threads are reused, and threads + # that remain idle for too long are killed and removed from the pool. These + # pools are particularly suited to applications that perform a high volume of + # short-lived tasks. + # + # On creation a `CachedThreadPool` has zero running threads. New threads are + # created on the pool as new operations are `#post`. The size of the pool + # will grow until `#max_length` threads are in the pool or until the number + # of threads exceeds the number of running and pending operations. When a new + # operation is post to the pool the first available idle thread will be tasked + # with the new operation. + # + # Should a thread crash for any reason the thread will immediately be removed + # from the pool. Similarly, threads which remain idle for an extended period + # of time will be killed and reclaimed. Thus these thread pools are very + # efficient at reclaiming unused resources. + # + # The API and behavior of this class are based on Java's `CachedThreadPool` + # + # @see Concurrent::RubyCachedThreadPool + # @see Concurrent::JavaCachedThreadPool + # + # @!macro thread_pool_options + class CachedThreadPool < CachedThreadPoolImplementation end end diff --git a/lib/concurrent/executor/fixed_thread_pool.rb b/lib/concurrent/executor/fixed_thread_pool.rb index 88d1406cd..542162389 100644 --- a/lib/concurrent/executor/fixed_thread_pool.rb +++ b/lib/concurrent/executor/fixed_thread_pool.rb @@ -3,88 +3,90 @@ module Concurrent if Concurrent.on_jruby? - require 'concurrent/executor/java_fixed_thread_pool' + end + + FixedThreadPoolImplementation = case + when Concurrent.on_jruby? + JavaFixedThreadPool + else + RubyFixedThreadPool + end + private_constant :FixedThreadPoolImplementation - # @!macro [attach] fixed_thread_pool - # - # A thread pool with a set number of threads. The number of threads in the pool - # is set on construction and remains constant. When all threads are busy new - # tasks `#post` to the thread pool are enqueued until a thread becomes available. - # Should a thread crash for any reason the thread will immediately be removed - # from the pool and replaced. - # - # The API and behavior of this class are based on Java's `FixedThreadPool` - # - # @see Concurrent::RubyFixedThreadPool - # @see Concurrent::JavaFixedThreadPool - # - # @!macro [attach] thread_pool_options - # - # Thread pools support several configuration options: - # - # * `idletime`: The number of seconds that a thread may be idle before being reclaimed. - # * `max_queue`: The maximum number of tasks that may be waiting in the work queue at - # any one time. When the queue size reaches `max_queue` subsequent tasks will be - # rejected in accordance with the configured `fallback_policy`. - # * `auto_terminate`: When true (default) an `at_exit` handler will be registered which - # will stop the thread pool when the application exits. See below for more information - # on shutting down thread pools. - # * `fallback_policy`: The policy defining how rejected tasks are handled. - # - # Three fallback policies are supported: - # - # * `:abort`: Raise a `RejectedExecutionError` exception and discard the task. - # * `:discard`: Discard the task and return false. - # * `:caller_runs`: Execute the task on the calling thread. - # - # **Shutting Down Thread Pools** - # - # Killing a thread pool while tasks are still being processed, either by calling - # the `#kill` method or at application exit, will have unpredictable results. There - # is no way for the thread pool to know what resources are being used by the - # in-progress tasks. When those tasks are killed the impact on those resources - # cannot be predicted. The *best* practice is to explicitly shutdown all thread - # pools using the provided methods: - # - # * Call `#shutdown` to initiate an orderly termination of all in-progress tasks - # * Call `#wait_for_termination` with an appropriate timeout interval an allow - # the orderly shutdown to complete - # * Call `#kill` *only when* the thread pool fails to shutdown in the allotted time - # - # On some runtime platforms (most notably the JVM) the application will not - # exit until all thread pools have been shutdown. To prevent applications from - # "hanging" on exit all thread pools include an `at_exit` handler that will - # stop the thread pool when the application exists. This handler uses a brute - # force method to stop the pool and makes no guarantees regarding resources being - # used by any tasks still running. Registration of this `at_exit` handler can be - # prevented by setting the thread pool's constructor `:auto_terminate` option to - # `false` when the thread pool is created. All thread pools support this option. - # - # ```ruby - # pool1 = Concurrent::FixedThreadPool.new(5) # an `at_exit` handler will be registered - # pool2 = Concurrent::FixedThreadPool.new(5, auto_terminate: false) # prevent `at_exit` handler registration - # ``` - # - # @note Failure to properly shutdown a thread pool can lead to unpredictable results. - # Please read *Shutting Down Thread Pools* for more information. - # - # @note When running on the JVM (JRuby) this class will inherit from `JavaThreadPoolExecutor`. - # On all other platforms it will inherit from `RubyThreadPoolExecutor`. - # - # @see Concurrent::RubyThreadPoolExecutor - # @see Concurrent::JavaThreadPoolExecutor - # - # @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html Java Tutorials: Thread Pools - # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html Java Executors class - # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html Java ExecutorService interface - # @see http://ruby-doc.org//core-2.2.0/Kernel.html#method-i-at_exit Kernel#at_exit - class FixedThreadPool < JavaFixedThreadPool - end - else - # @!macro fixed_thread_pool - # @!macro thread_pool_options - class FixedThreadPool < RubyFixedThreadPool - end + # @!macro [attach] fixed_thread_pool + # + # A thread pool with a set number of threads. The number of threads in the pool + # is set on construction and remains constant. When all threads are busy new + # tasks `#post` to the thread pool are enqueued until a thread becomes available. + # Should a thread crash for any reason the thread will immediately be removed + # from the pool and replaced. + # + # The API and behavior of this class are based on Java's `FixedThreadPool` + # + # @see Concurrent::RubyFixedThreadPool + # @see Concurrent::JavaFixedThreadPool + # + # @!macro [attach] thread_pool_options + # + # Thread pools support several configuration options: + # + # * `idletime`: The number of seconds that a thread may be idle before being reclaimed. + # * `max_queue`: The maximum number of tasks that may be waiting in the work queue at + # any one time. When the queue size reaches `max_queue` subsequent tasks will be + # rejected in accordance with the configured `fallback_policy`. + # * `auto_terminate`: When true (default) an `at_exit` handler will be registered which + # will stop the thread pool when the application exits. See below for more information + # on shutting down thread pools. + # * `fallback_policy`: The policy defining how rejected tasks are handled. + # + # Three fallback policies are supported: + # + # * `:abort`: Raise a `RejectedExecutionError` exception and discard the task. + # * `:discard`: Discard the task and return false. + # * `:caller_runs`: Execute the task on the calling thread. + # + # **Shutting Down Thread Pools** + # + # Killing a thread pool while tasks are still being processed, either by calling + # the `#kill` method or at application exit, will have unpredictable results. There + # is no way for the thread pool to know what resources are being used by the + # in-progress tasks. When those tasks are killed the impact on those resources + # cannot be predicted. The *best* practice is to explicitly shutdown all thread + # pools using the provided methods: + # + # * Call `#shutdown` to initiate an orderly termination of all in-progress tasks + # * Call `#wait_for_termination` with an appropriate timeout interval an allow + # the orderly shutdown to complete + # * Call `#kill` *only when* the thread pool fails to shutdown in the allotted time + # + # On some runtime platforms (most notably the JVM) the application will not + # exit until all thread pools have been shutdown. To prevent applications from + # "hanging" on exit all thread pools include an `at_exit` handler that will + # stop the thread pool when the application exists. This handler uses a brute + # force method to stop the pool and makes no guarantees regarding resources being + # used by any tasks still running. Registration of this `at_exit` handler can be + # prevented by setting the thread pool's constructor `:auto_terminate` option to + # `false` when the thread pool is created. All thread pools support this option. + # + # ```ruby + # pool1 = Concurrent::FixedThreadPool.new(5) # an `at_exit` handler will be registered + # pool2 = Concurrent::FixedThreadPool.new(5, auto_terminate: false) # prevent `at_exit` handler registration + # ``` + # + # @note Failure to properly shutdown a thread pool can lead to unpredictable results. + # Please read *Shutting Down Thread Pools* for more information. + # + # @note When running on the JVM (JRuby) this class will inherit from `JavaFixedThreadPool`. + # On all other platforms it will inherit from `RubyFixedThreadPool`. + # + # @see Concurrent::RubyFixedThreadPool + # @see Concurrent::JavaFixedThreadPool + # + # @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html Java Tutorials: Thread Pools + # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html Java Executors class + # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html Java ExecutorService interface + # @see http://ruby-doc.org//core-2.2.0/Kernel.html#method-i-at_exit Kernel#at_exit + class FixedThreadPool < FixedThreadPoolImplementation end end diff --git a/lib/concurrent/executor/ruby_thread_pool_executor.rb b/lib/concurrent/executor/ruby_thread_pool_executor.rb index 233e61ca0..7f5d70ad0 100644 --- a/lib/concurrent/executor/ruby_thread_pool_executor.rb +++ b/lib/concurrent/executor/ruby_thread_pool_executor.rb @@ -128,6 +128,7 @@ def worker_died(worker) protected + # @!visibility private def ns_initialize(opts) @min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i @max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i @@ -155,10 +156,12 @@ def ns_initialize(opts) @next_gc_time = Concurrent.monotonic_time + @gc_interval end + # @!visibility private def ns_limited_queue? @max_queue != 0 end + # @!visibility private def ns_execute(*args, &task) if ns_assign_worker(*args, &task) || ns_enqueue(*args, &task) @scheduled_task_count += 1 @@ -172,6 +175,7 @@ def ns_execute(*args, &task) alias_method :execute, :ns_execute + # @!visibility private def ns_shutdown_execution if @pool.empty? # nothing to do @@ -187,7 +191,7 @@ def ns_shutdown_execution alias_method :shutdown_execution, :ns_shutdown_execution - # @api private + # @!visibility private def ns_kill_execution # TODO log out unprocessed tasks in queue # TODO try to shutdown first? @@ -200,6 +204,8 @@ def ns_kill_execution # tries to assign task to a worker, tries to get one from @ready or to create new one # @return [true, false] if task is assigned to a worker + # + # @!visibility private def ns_assign_worker(*args, &task) # keep growing if the pool is not at the minimum yet worker = (@ready.pop if @pool.size >= @min_length) || ns_add_busy_worker @@ -213,6 +219,8 @@ def ns_assign_worker(*args, &task) # tries to enqueue task # @return [true, false] if enqueued + # + # @!visibility private def ns_enqueue(*args, &task) if !ns_limited_queue? || @queue.size < @max_queue @queue << [task, args] @@ -222,6 +230,7 @@ def ns_enqueue(*args, &task) end end + # @!visibility private def ns_worker_died(worker) ns_remove_busy_worker worker replacement_worker = ns_add_busy_worker @@ -230,6 +239,8 @@ def ns_worker_died(worker) # creates new worker which has to receive work to do after it's added # @return [nil, Worker] nil of max capacity is reached + # + # @!visibility private def ns_add_busy_worker return if @pool.size >= @max_length @@ -239,6 +250,8 @@ def ns_add_busy_worker end # handle ready worker, giving it new job or assigning back to @ready + # + # @!visibility private def ns_ready_worker(worker, success = true) @completed_task_count += 1 if success task_and_args = @queue.shift @@ -255,6 +268,8 @@ def ns_ready_worker(worker, success = true) end # returns back worker to @ready which was not idle for enough time + # + # @!visibility private def ns_worker_not_old_enough(worker) # let's put workers coming from idle_test back to the start (as the oldest worker) @ready.unshift(worker) @@ -262,6 +277,8 @@ def ns_worker_not_old_enough(worker) end # removes a worker which is not in not tracked in @ready + # + # @!visibility private def ns_remove_busy_worker(worker) @pool.delete(worker) stopped_event.set if @pool.empty? && !running? @@ -269,6 +286,8 @@ def ns_remove_busy_worker(worker) end # try oldest worker if it is idle for enough time, it's returned back at the start + # + # @!visibility private def ns_prune_pool return if @pool.size <= @min_length @@ -345,5 +364,7 @@ def run_task(pool, task, args) throw :stop end end + + private_constant :Worker end end diff --git a/lib/concurrent/executor/single_thread_executor.rb b/lib/concurrent/executor/single_thread_executor.rb index 85fa2e5ec..5e7f9a745 100644 --- a/lib/concurrent/executor/single_thread_executor.rb +++ b/lib/concurrent/executor/single_thread_executor.rb @@ -3,29 +3,31 @@ module Concurrent if Concurrent.on_jruby? - require 'concurrent/executor/java_single_thread_executor' + end + + SingleThreadExecutorImplementation = case + when Concurrent.on_jruby? + JavaSingleThreadExecutor + else + RubySingleThreadExecutor + end + private_constant :SingleThreadExecutorImplementation - # @!macro [attach] single_thread_executor - # - # A thread pool with a set number of threads. The number of threads in the pool - # is set on construction and remains constant. When all threads are busy new - # tasks `#post` to the thread pool are enqueued until a thread becomes available. - # Should a thread crash for any reason the thread will immediately be removed - # from the pool and replaced. - # - # The API and behavior of this class are based on Java's `SingleThreadExecutor` - # - # @see Concurrent::RubySingleThreadExecutor - # @see Concurrent::JavaSingleThreadExecutor - # - # @!macro thread_pool_options - class SingleThreadExecutor < JavaSingleThreadExecutor - end - else - # @!macro single_thread_executor - # @!macro thread_pool_options - class SingleThreadExecutor < RubySingleThreadExecutor - end + # @!macro [attach] single_thread_executor + # + # A thread pool with a set number of threads. The number of threads in the pool + # is set on construction and remains constant. When all threads are busy new + # tasks `#post` to the thread pool are enqueued until a thread becomes available. + # Should a thread crash for any reason the thread will immediately be removed + # from the pool and replaced. + # + # The API and behavior of this class are based on Java's `SingleThreadExecutor` + # + # @see Concurrent::RubySingleThreadExecutor + # @see Concurrent::JavaSingleThreadExecutor + # + # @!macro thread_pool_options + class SingleThreadExecutor < SingleThreadExecutorImplementation end end diff --git a/lib/concurrent/executor/thread_pool_executor.rb b/lib/concurrent/executor/thread_pool_executor.rb index d97e9f09f..90f538b47 100644 --- a/lib/concurrent/executor/thread_pool_executor.rb +++ b/lib/concurrent/executor/thread_pool_executor.rb @@ -4,42 +4,46 @@ module Concurrent if Concurrent.on_jruby? require 'concurrent/executor/java_thread_pool_executor' - # @!macro [attach] thread_pool_executor - # - # An abstraction composed of one or more threads and a task queue. Tasks - # (blocks or `proc` objects) are submit to the pool and added to the queue. - # The threads in the pool remove the tasks and execute them in the order - # they were received. When there are more tasks queued than there are - # threads to execute them the pool will create new threads, up to the - # configured maximum. Similarly, threads that are idle for too long will - # be garbage collected, down to the configured minimum options. Should a - # thread crash it, too, will be garbage collected. - # - # `ThreadPoolExecutor` is based on the Java class of the same name. From - # the official Java documentationa; - # - # > Thread pools address two different problems: they usually provide - # > improved performance when executing large numbers of asynchronous tasks, - # > due to reduced per-task invocation overhead, and they provide a means - # > of bounding and managing the resources, including threads, consumed - # > when executing a collection of tasks. Each ThreadPoolExecutor also - # > maintains some basic statistics, such as the number of completed tasks. - # > - # > To be useful across a wide range of contexts, this class provides many - # > adjustable parameters and extensibility hooks. However, programmers are - # > urged to use the more convenient Executors factory methods - # > [CachedThreadPool] (unbounded thread pool, with automatic thread reclamation), - # > [FixedThreadPool] (fixed size thread pool) and [SingleThreadExecutor] (single - # > background thread), that preconfigure settings for the most common usage - # > scenarios. - # - # @!macro thread_pool_options - class ThreadPoolExecutor < JavaThreadPoolExecutor - end - else - # @!macro thread_pool_executor - # @!macro thread_pool_options - class ThreadPoolExecutor < RubyThreadPoolExecutor - end + end + + ThreadPoolExecutorImplementation = case + when Concurrent.on_jruby? + JavaThreadPoolExecutor + else + RubyThreadPoolExecutor + end + private_constant :ThreadPoolExecutorImplementation + + # @!macro [attach] thread_pool_executor + # + # An abstraction composed of one or more threads and a task queue. Tasks + # (blocks or `proc` objects) are submit to the pool and added to the queue. + # The threads in the pool remove the tasks and execute them in the order + # they were received. When there are more tasks queued than there are + # threads to execute them the pool will create new threads, up to the + # configured maximum. Similarly, threads that are idle for too long will + # be garbage collected, down to the configured minimum options. Should a + # thread crash it, too, will be garbage collected. + # + # `ThreadPoolExecutor` is based on the Java class of the same name. From + # the official Java documentationa; + # + # > Thread pools address two different problems: they usually provide + # > improved performance when executing large numbers of asynchronous tasks, + # > due to reduced per-task invocation overhead, and they provide a means + # > of bounding and managing the resources, including threads, consumed + # > when executing a collection of tasks. Each ThreadPoolExecutor also + # > maintains some basic statistics, such as the number of completed tasks. + # > + # > To be useful across a wide range of contexts, this class provides many + # > adjustable parameters and extensibility hooks. However, programmers are + # > urged to use the more convenient Executors factory methods + # > [CachedThreadPool] (unbounded thread pool, with automatic thread reclamation), + # > [FixedThreadPool] (fixed size thread pool) and [SingleThreadExecutor] (single + # > background thread), that preconfigure settings for the most common usage + # > scenarios. + # + # @!macro thread_pool_options + class ThreadPoolExecutor < ThreadPoolExecutorImplementation end end diff --git a/spec/concurrent/executor/ruby_cached_thread_pool_spec.rb b/spec/concurrent/executor/ruby_cached_thread_pool_spec.rb index 2807073a5..226b79c05 100644 --- a/spec/concurrent/executor/ruby_cached_thread_pool_spec.rb +++ b/spec/concurrent/executor/ruby_cached_thread_pool_spec.rb @@ -6,8 +6,8 @@ module Concurrent subject do described_class.new( - fallback_policy: :discard, - gc_interval: 0 + fallback_policy: :discard, + gc_interval: 0 ) end @@ -70,16 +70,15 @@ module Concurrent end end - - context 'stress' do + context 'stress', notravis: true do configurations = [ - { min_threads: 2, - max_threads: ThreadPoolExecutor::DEFAULT_MAX_POOL_SIZE, - auto_terminate: false, - idletime: 0.1, # 1 minute - max_queue: 0, # unlimited - fallback_policy: :caller_runs, # shouldn't matter -- 0 max queue - gc_interval: 0.1 }, + { min_threads: 2, + max_threads: ThreadPoolExecutor::DEFAULT_MAX_POOL_SIZE, + auto_terminate: false, + idletime: 0.1, # 1 minute + max_queue: 0, # unlimited + fallback_policy: :caller_runs, # shouldn't matter -- 0 max queue + gc_interval: 0.1 }, { min_threads: 2, max_threads: 4, auto_terminate: false, @@ -89,7 +88,6 @@ module Concurrent gc_interval: 0.1 } ] - configurations.each do |config| specify do pool = RubyThreadPoolExecutor.new(config) @@ -106,9 +104,6 @@ module Concurrent end end end - end - - end end