From b0338417c73c6446597ec93e24b3ce780ab13190 Mon Sep 17 00:00:00 2001 From: Douglas Eichelberger Date: Sun, 26 Nov 2023 09:33:42 -0800 Subject: [PATCH 1/8] Vendor Object#blank? --- Library/Homebrew/extend/blank.rb | 200 +++++++++++++++++++++++++++++++ Library/Homebrew/global.rb | 2 +- 2 files changed, 201 insertions(+), 1 deletion(-) create mode 100644 Library/Homebrew/extend/blank.rb diff --git a/Library/Homebrew/extend/blank.rb b/Library/Homebrew/extend/blank.rb new file mode 100644 index 0000000000000..87b0e3510c6ea --- /dev/null +++ b/Library/Homebrew/extend/blank.rb @@ -0,0 +1,200 @@ +# typed: true +# frozen_string_literal: true + +require "concurrent/map" + +class Object + # An object is blank if it's false, empty, or a whitespace string. + # For example, +nil+, '', ' ', [], {}, and +false+ are all blank. + # + # This simplifies + # + # !address || address.empty? + # + # to + # + # address.blank? + # + # @return [true, false] + def blank? + respond_to?(:empty?) ? !!T.unsafe(self).empty? : false + end + + # An object is present if it's not blank. + # + # @return [true, false] + def present? + !blank? + end + + # Returns the receiver if it's present otherwise returns +nil+. + # object.presence is equivalent to + # + # object.present? ? object : nil + # + # For example, something like + # + # state = params[:state] if params[:state].present? + # country = params[:country] if params[:country].present? + # region = state || country || 'US' + # + # becomes + # + # region = params[:state].presence || params[:country].presence || 'US' + # + # @return [Object] + def presence + self if present? + end +end + +class NilClass + # +nil+ is blank: + # + # nil.blank? # => true + # + # @return [true] + def blank? + true + end + + def present? # :nodoc: + false + end +end + +class FalseClass + # +false+ is blank: + # + # false.blank? # => true + # + # @return [true] + def blank? + true + end + + def present? # :nodoc: + false + end +end + +class TrueClass + # +true+ is not blank: + # + # true.blank? # => false + # + # @return [false] + def blank? + false + end + + def present? # :nodoc: + true + end +end + +class Array + # An array is blank if it's empty: + # + # [].blank? # => true + # [1,2,3].blank? # => false + # + # @return [true, false] + alias_method :blank?, :empty? + + def present? # :nodoc: + !empty? + end +end + +class Hash + # A hash is blank if it's empty: + # + # {}.blank? # => true + # { key: 'value' }.blank? # => false + # + # @return [true, false] + alias_method :blank?, :empty? + + def present? # :nodoc: + !empty? + end +end + +class Symbol + # A Symbol is blank if it's empty: + # + # :''.blank? # => true + # :symbol.blank? # => false + alias_method :blank?, :empty? + + def present? # :nodoc: + !empty? + end +end + +class String + BLANK_RE = /\A[[:space:]]*\z/ + ENCODED_BLANKS = Concurrent::Map.new do |h, enc| + h[enc] = Regexp.new(BLANK_RE.source.encode(enc), BLANK_RE.options | Regexp::FIXEDENCODING) + end + + # A string is blank if it's empty or contains whitespaces only: + # + # ''.blank? # => true + # ' '.blank? # => true + # "\t\n\r".blank? # => true + # ' blah '.blank? # => false + # + # Unicode whitespace is supported: + # + # "\u00a0".blank? # => true + # + # @return [true, false] + def blank? + # The regexp that matches blank strings is expensive. For the case of empty + # strings we can speed up this method (~3.5x) with an empty? call. The + # penalty for the rest of strings is marginal. + empty? || + begin + BLANK_RE.match?(self) + rescue Encoding::CompatibilityError + ENCODED_BLANKS[self.encoding].match?(self) + end + end + + def present? # :nodoc: + !blank? + end +end + +class Numeric # :nodoc: + # No number is blank: + # + # 1.blank? # => false + # 0.blank? # => false + # + # @return [false] + def blank? + false + end + + def present? + true + end +end + +class Time # :nodoc: + # No Time is blank: + # + # Time.now.blank? # => false + # + # @return [false] + def blank? + false + end + + def present? + true + end +end diff --git a/Library/Homebrew/global.rb b/Library/Homebrew/global.rb index 487b40710eb05..396adbdd90176 100644 --- a/Library/Homebrew/global.rb +++ b/Library/Homebrew/global.rb @@ -12,13 +12,13 @@ # Only require "core_ext" here to ensure we're only requiring the minimum of # what we need. +require "extend/blank" require "active_support/core_ext/array/access" require "active_support/core_ext/enumerable" require "active_support/core_ext/file/atomic" require "active_support/core_ext/hash/deep_merge" require "active_support/core_ext/hash/except" require "active_support/core_ext/hash/keys" -require "active_support/core_ext/object/blank" require "active_support/core_ext/string/exclude" require "active_support/core_ext/string/filters" require "active_support/core_ext/string/indent" From 61667a8023b26b922794098c42811edc88d1de01 Mon Sep 17 00:00:00 2001 From: Douglas Eichelberger Date: Sun, 26 Nov 2023 09:56:17 -0800 Subject: [PATCH 2/8] Ignore non-required dependencies --- .gitignore | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index 21b10a5f1e9cd..1e87ae707b573 100644 --- a/.gitignore +++ b/.gitignore @@ -70,7 +70,6 @@ !**/vendor/bundle/ruby/*/gems/activesupport-*/lib/active_support/core_ext/hash/except.rb !**/vendor/bundle/ruby/*/gems/activesupport-*/lib/active_support/core_ext/hash/keys.rb !**/vendor/bundle/ruby/*/gems/activesupport-*/lib/active_support/core_ext/hash/slice.rb -!**/vendor/bundle/ruby/*/gems/activesupport-*/lib/active_support/core_ext/object/blank.rb !**/vendor/bundle/ruby/*/gems/activesupport-*/lib/active_support/core_ext/object/deep_dup.rb !**/vendor/bundle/ruby/*/gems/activesupport-*/lib/active_support/core_ext/object/duplicable.rb !**/vendor/bundle/ruby/*/gems/activesupport-*/lib/active_support/core_ext/string/exclude.rb @@ -78,8 +77,6 @@ !**/vendor/bundle/ruby/*/gems/activesupport-*/lib/active_support/core_ext/string/indent.rb # Ignore partially included gems where we don't need all files -**/vendor/bundle/ruby/*/gems/concurrent-ruby-*/lib/*/*.jar -**/vendor/bundle/ruby/*/gems/i18n-*/lib/i18n/tests* **/vendor/gems/mechanize-*/.* **/vendor/gems/mechanize-*/*.md **/vendor/gems/mechanize-*/*.rdoc @@ -102,12 +99,14 @@ **/vendor/bundle/ruby/*/gems/coderay-*/ **/vendor/bundle/ruby/*/gems/colorize-*/ **/vendor/bundle/ruby/*/gems/commander-*/ +**/vendor/bundle/ruby/*/gems/concurrent-ruby-*/ **/vendor/bundle/ruby/*/gems/diff-lcs-*/ **/vendor/bundle/ruby/*/gems/docile-*/ **/vendor/bundle/ruby/*/gems/ecma-re-validator-*/ **/vendor/bundle/ruby/*/gems/hana-*/ **/vendor/bundle/ruby/*/gems/highline-*/ **/vendor/bundle/ruby/*/gems/hpricot-*/ +**/vendor/bundle/ruby/*/gems/i18n-*/ **/vendor/bundle/ruby/*/gems/jaro_winkler-*/ **/vendor/bundle/ruby/*/gems/json-*/ **/vendor/bundle/ruby/*/gems/json_schemer-*/ From ccbb05d5aa14f971c882a4936b1db7e1befb12d3 Mon Sep 17 00:00:00 2001 From: Douglas Eichelberger Date: Sun, 26 Nov 2023 09:57:48 -0800 Subject: [PATCH 3/8] Remove concurrent/map Object#blank? cache --- Library/Homebrew/extend/blank.rb | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/Library/Homebrew/extend/blank.rb b/Library/Homebrew/extend/blank.rb index 87b0e3510c6ea..435268026a584 100644 --- a/Library/Homebrew/extend/blank.rb +++ b/Library/Homebrew/extend/blank.rb @@ -1,8 +1,6 @@ # typed: true # frozen_string_literal: true -require "concurrent/map" - class Object # An object is blank if it's false, empty, or a whitespace string. # For example, +nil+, '', ' ', [], {}, and +false+ are all blank. @@ -135,9 +133,6 @@ def present? # :nodoc: class String BLANK_RE = /\A[[:space:]]*\z/ - ENCODED_BLANKS = Concurrent::Map.new do |h, enc| - h[enc] = Regexp.new(BLANK_RE.source.encode(enc), BLANK_RE.options | Regexp::FIXEDENCODING) - end # A string is blank if it's empty or contains whitespaces only: # @@ -159,7 +154,7 @@ def blank? begin BLANK_RE.match?(self) rescue Encoding::CompatibilityError - ENCODED_BLANKS[self.encoding].match?(self) + Regexp.new(BLANK_RE.source.encode(self.encoding), BLANK_RE.options | Regexp::FIXEDENCODING).match?(self) end end From c7c539efa566de64efd488c5b1a57f83ab67ad03 Mon Sep 17 00:00:00 2001 From: Douglas Eichelberger Date: Sun, 26 Nov 2023 10:05:14 -0800 Subject: [PATCH 4/8] Strict typing --- Library/Homebrew/extend/blank.rb | 38 +++++++++++++++--------------- Library/Homebrew/extend/object.rbi | 6 ----- Library/Homebrew/global.rb | 2 +- 3 files changed, 20 insertions(+), 26 deletions(-) delete mode 100644 Library/Homebrew/extend/object.rbi diff --git a/Library/Homebrew/extend/blank.rb b/Library/Homebrew/extend/blank.rb index 435268026a584..f9c9a2b2dc315 100644 --- a/Library/Homebrew/extend/blank.rb +++ b/Library/Homebrew/extend/blank.rb @@ -1,4 +1,4 @@ -# typed: true +# typed: strict # frozen_string_literal: true class Object @@ -12,15 +12,13 @@ class Object # to # # address.blank? - # - # @return [true, false] + sig { returns(T::Boolean) } def blank? respond_to?(:empty?) ? !!T.unsafe(self).empty? : false end # An object is present if it's not blank. - # - # @return [true, false] + sig { returns(T::Boolean) } def present? !blank? end @@ -39,8 +37,7 @@ def present? # becomes # # region = params[:state].presence || params[:country].presence || 'US' - # - # @return [Object] + sig { returns(T.nilable(T.self_type)) } def presence self if present? end @@ -50,12 +47,12 @@ class NilClass # +nil+ is blank: # # nil.blank? # => true - # - # @return [true] + sig { returns(TrueClass) } def blank? true end + sig { returns(FalseClass) } def present? # :nodoc: false end @@ -65,12 +62,12 @@ class FalseClass # +false+ is blank: # # false.blank? # => true - # - # @return [true] + sig { returns(TrueClass) } def blank? true end + sig { returns(FalseClass) } def present? # :nodoc: false end @@ -80,12 +77,12 @@ class TrueClass # +true+ is not blank: # # true.blank? # => false - # - # @return [false] + sig { returns(FalseClass) } def blank? false end + sig { returns(TrueClass) } def present? # :nodoc: true end @@ -100,6 +97,7 @@ class Array # @return [true, false] alias_method :blank?, :empty? + sig { returns(T::Boolean) } def present? # :nodoc: !empty? end @@ -114,6 +112,7 @@ class Hash # @return [true, false] alias_method :blank?, :empty? + sig { returns(T::Boolean) } def present? # :nodoc: !empty? end @@ -126,6 +125,7 @@ class Symbol # :symbol.blank? # => false alias_method :blank?, :empty? + sig { returns(T::Boolean) } def present? # :nodoc: !empty? end @@ -144,8 +144,7 @@ class String # Unicode whitespace is supported: # # "\u00a0".blank? # => true - # - # @return [true, false] + sig { returns(T::Boolean) } def blank? # The regexp that matches blank strings is expensive. For the case of empty # strings we can speed up this method (~3.5x) with an empty? call. The @@ -158,6 +157,7 @@ def blank? end end + sig { returns(T::Boolean) } def present? # :nodoc: !blank? end @@ -168,12 +168,12 @@ class Numeric # :nodoc: # # 1.blank? # => false # 0.blank? # => false - # - # @return [false] + sig { returns(FalseClass) } def blank? false end + sig { returns(TrueClass) } def present? true end @@ -183,12 +183,12 @@ class Time # :nodoc: # No Time is blank: # # Time.now.blank? # => false - # - # @return [false] + sig { returns(FalseClass) } def blank? false end + sig { returns(TrueClass) } def present? true end diff --git a/Library/Homebrew/extend/object.rbi b/Library/Homebrew/extend/object.rbi deleted file mode 100644 index cc3940556854d..0000000000000 --- a/Library/Homebrew/extend/object.rbi +++ /dev/null @@ -1,6 +0,0 @@ -# typed: strict - -class Object - sig { returns(T::Boolean) } - def present?; end -end diff --git a/Library/Homebrew/global.rb b/Library/Homebrew/global.rb index 396adbdd90176..ea6f1ebec47dd 100644 --- a/Library/Homebrew/global.rb +++ b/Library/Homebrew/global.rb @@ -12,7 +12,6 @@ # Only require "core_ext" here to ensure we're only requiring the minimum of # what we need. -require "extend/blank" require "active_support/core_ext/array/access" require "active_support/core_ext/enumerable" require "active_support/core_ext/file/atomic" @@ -75,6 +74,7 @@ HOMEBREW_BOTTLES_EXTNAME_REGEX = /\.([a-z0-9_]+)\.bottle\.(?:(\d+)\.)?tar\.gz$/.freeze require "extend/module" +require "extend/blank" require "env_config" require "macos_version" require "os" From c36fafbcf204b26e202d7caac21e3c5600be743d Mon Sep 17 00:00:00 2001 From: Douglas Eichelberger Date: Sun, 26 Nov 2023 10:10:43 -0800 Subject: [PATCH 5/8] Style fixes --- Library/.rubocop.yml | 3 +++ Library/Homebrew/extend/blank.rb | 10 +++++----- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/Library/.rubocop.yml b/Library/.rubocop.yml index 6d47f3fad6f66..815fe39c7d1f4 100644 --- a/Library/.rubocop.yml +++ b/Library/.rubocop.yml @@ -232,6 +232,9 @@ Rails/Presence: Enabled: true Rails/Present: Enabled: true + Exclude: + # `present?` is defined as `!blank?` wihin this file + - "Homebrew/extend/blank.rb" Rails/RelativeDateConstant: Enabled: true Rails/SafeNavigation: diff --git a/Library/Homebrew/extend/blank.rb b/Library/Homebrew/extend/blank.rb index f9c9a2b2dc315..735af262765de 100644 --- a/Library/Homebrew/extend/blank.rb +++ b/Library/Homebrew/extend/blank.rb @@ -95,7 +95,7 @@ class Array # [1,2,3].blank? # => false # # @return [true, false] - alias_method :blank?, :empty? + alias blank? empty? sig { returns(T::Boolean) } def present? # :nodoc: @@ -110,7 +110,7 @@ class Hash # { key: 'value' }.blank? # => false # # @return [true, false] - alias_method :blank?, :empty? + alias blank? empty? sig { returns(T::Boolean) } def present? # :nodoc: @@ -123,7 +123,7 @@ class Symbol # # :''.blank? # => true # :symbol.blank? # => false - alias_method :blank?, :empty? + alias blank? empty? sig { returns(T::Boolean) } def present? # :nodoc: @@ -132,7 +132,7 @@ def present? # :nodoc: end class String - BLANK_RE = /\A[[:space:]]*\z/ + BLANK_RE = /\A[[:space:]]*\z/.freeze # A string is blank if it's empty or contains whitespaces only: # @@ -153,7 +153,7 @@ def blank? begin BLANK_RE.match?(self) rescue Encoding::CompatibilityError - Regexp.new(BLANK_RE.source.encode(self.encoding), BLANK_RE.options | Regexp::FIXEDENCODING).match?(self) + Regexp.new(BLANK_RE.source.encode(encoding), BLANK_RE.options | Regexp::FIXEDENCODING).match?(self) end end From ff43ec27934013fc76645724ce6295d756954039 Mon Sep 17 00:00:00 2001 From: Douglas Eichelberger Date: Sun, 26 Nov 2023 10:41:16 -0800 Subject: [PATCH 6/8] Remove unused active-support transitive dependencies --- .../active_support/core_ext/object/blank.rb | 155 -- .../gems/concurrent-ruby-1.2.2/LICENSE.txt | 21 - .../lib/concurrent-ruby/concurrent-ruby.rb | 5 - .../lib/concurrent-ruby/concurrent.rb | 134 - .../lib/concurrent-ruby/concurrent/agent.rb | 588 ----- .../lib/concurrent-ruby/concurrent/array.rb | 56 - .../lib/concurrent-ruby/concurrent/async.rb | 449 ---- .../lib/concurrent-ruby/concurrent/atom.rb | 222 -- .../concurrent/atomic/atomic_boolean.rb | 127 - .../concurrent/atomic/atomic_fixnum.rb | 144 -- .../atomic/atomic_markable_reference.rb | 167 -- .../concurrent/atomic/atomic_reference.rb | 135 - .../concurrent/atomic/count_down_latch.rb | 100 - .../concurrent/atomic/cyclic_barrier.rb | 128 - .../concurrent/atomic/event.rb | 109 - .../concurrent/atomic/fiber_local_var.rb | 109 - .../atomic/java_count_down_latch.rb | 43 - .../concurrent/atomic/locals.rb | 189 -- .../concurrent/atomic/lock_local_var.rb | 28 - .../concurrent/atomic/mutex_atomic_boolean.rb | 68 - .../concurrent/atomic/mutex_atomic_fixnum.rb | 81 - .../atomic/mutex_count_down_latch.rb | 44 - .../concurrent/atomic/mutex_semaphore.rb | 131 - .../concurrent/atomic/read_write_lock.rb | 255 -- .../atomic/reentrant_read_write_lock.rb | 379 --- .../concurrent/atomic/semaphore.rb | 163 -- .../concurrent/atomic/thread_local_var.rb | 111 - .../atomic_reference/atomic_direct_update.rb | 37 - .../atomic_reference/mutex_atomic.rb | 67 - .../atomic_reference/numeric_cas_wrapper.rb | 28 - .../lib/concurrent-ruby/concurrent/atomics.rb | 10 - .../collection/copy_on_notify_observer_set.rb | 107 - .../collection/copy_on_write_observer_set.rb | 111 - .../java_non_concurrent_priority_queue.rb | 84 - .../concurrent/collection/lock_free_stack.rb | 160 -- .../map/atomic_reference_map_backend.rb | 927 ------- .../collection/map/mri_map_backend.rb | 66 - .../map/non_concurrent_map_backend.rb | 148 -- .../map/synchronized_map_backend.rb | 82 - .../collection/map/truffleruby_map_backend.rb | 14 - .../non_concurrent_priority_queue.rb | 143 -- .../ruby_non_concurrent_priority_queue.rb | 160 -- .../concurrent/concern/deprecation.rb | 34 - .../concurrent/concern/dereferenceable.rb | 73 - .../concurrent/concern/logging.rb | 116 - .../concurrent/concern/obligation.rb | 220 -- .../concurrent/concern/observable.rb | 110 - .../concurrent/concurrent_ruby.jar | Bin 135855 -> 0 bytes .../concurrent/configuration.rb | 105 - .../concurrent-ruby/concurrent/constants.rb | 8 - .../concurrent-ruby/concurrent/dataflow.rb | 81 - .../lib/concurrent-ruby/concurrent/delay.rb | 199 -- .../lib/concurrent-ruby/concurrent/errors.rb | 74 - .../concurrent-ruby/concurrent/exchanger.rb | 353 --- .../executor/abstract_executor_service.rb | 131 - .../concurrent/executor/cached_thread_pool.rb | 62 - .../concurrent/executor/executor_service.rb | 185 -- .../concurrent/executor/fixed_thread_pool.rb | 220 -- .../concurrent/executor/immediate_executor.rb | 66 - .../executor/indirect_immediate_executor.rb | 44 - .../executor/java_executor_service.rb | 103 - .../executor/java_single_thread_executor.rb | 30 - .../executor/java_thread_pool_executor.rb | 140 -- .../executor/ruby_executor_service.rb | 82 - .../executor/ruby_single_thread_executor.rb | 21 - .../executor/ruby_thread_pool_executor.rb | 366 --- .../concurrent/executor/safe_task_executor.rb | 35 - .../executor/serial_executor_service.rb | 34 - .../executor/serialized_execution.rb | 107 - .../serialized_execution_delegator.rb | 28 - .../executor/simple_executor_service.rb | 103 - .../executor/single_thread_executor.rb | 57 - .../executor/thread_pool_executor.rb | 88 - .../concurrent/executor/timer_set.rb | 172 -- .../concurrent-ruby/concurrent/executors.rb | 20 - .../lib/concurrent-ruby/concurrent/future.rb | 141 -- .../lib/concurrent-ruby/concurrent/hash.rb | 50 - .../concurrent/immutable_struct.rb | 101 - .../lib/concurrent-ruby/concurrent/ivar.rb | 208 -- .../lib/concurrent-ruby/concurrent/map.rb | 350 --- .../lib/concurrent-ruby/concurrent/maybe.rb | 229 -- .../concurrent/mutable_struct.rb | 239 -- .../lib/concurrent-ruby/concurrent/mvar.rb | 242 -- .../lib/concurrent-ruby/concurrent/options.rb | 42 - .../lib/concurrent-ruby/concurrent/promise.rb | 580 ----- .../concurrent-ruby/concurrent/promises.rb | 2168 ----------------- .../concurrent-ruby/concurrent/re_include.rb | 60 - .../concurrent/scheduled_task.rb | 331 --- .../lib/concurrent-ruby/concurrent/set.rb | 64 - .../concurrent/settable_struct.rb | 139 -- .../concurrent/synchronization.rb | 13 - .../abstract_lockable_object.rb | 102 - .../synchronization/abstract_object.rb | 22 - .../synchronization/abstract_struct.rb | 171 -- .../concurrent/synchronization/condition.rb | 62 - .../synchronization/full_memory_barrier.rb | 29 - .../synchronization/jruby_lockable_object.rb | 15 - .../concurrent/synchronization/lock.rb | 38 - .../synchronization/lockable_object.rb | 75 - .../synchronization/mutex_lockable_object.rb | 89 - .../concurrent/synchronization/object.rb | 151 -- .../synchronization/safe_initialization.rb | 36 - .../concurrent/synchronization/volatile.rb | 101 - .../thread_safe/synchronized_delegator.rb | 47 - .../concurrent/thread_safe/util.rb | 16 - .../concurrent/thread_safe/util/adder.rb | 74 - .../thread_safe/util/cheap_lockable.rb | 81 - .../thread_safe/util/data_structures.rb | 52 - .../thread_safe/util/power_of_two_tuple.rb | 38 - .../concurrent/thread_safe/util/striped64.rb | 246 -- .../concurrent/thread_safe/util/volatile.rb | 75 - .../thread_safe/util/xor_shift_random.rb | 50 - .../concurrent-ruby/concurrent/timer_task.rb | 311 --- .../lib/concurrent-ruby/concurrent/tuple.rb | 82 - .../lib/concurrent-ruby/concurrent/tvar.rb | 222 -- .../concurrent/utility/engine.rb | 45 - .../concurrent/utility/monotonic_time.rb | 19 - .../utility/native_extension_loader.rb | 77 - .../concurrent/utility/native_integer.rb | 54 - .../concurrent/utility/processor_counter.rb | 110 - .../lib/concurrent-ruby/concurrent/version.rb | 3 - .../ruby/2.6.0/gems/i18n-1.14.1/MIT-LICENSE | 20 - .../ruby/2.6.0/gems/i18n-1.14.1/lib/i18n.rb | 435 ---- .../gems/i18n-1.14.1/lib/i18n/backend.rb | 22 - .../gems/i18n-1.14.1/lib/i18n/backend/base.rb | 304 --- .../i18n-1.14.1/lib/i18n/backend/cache.rb | 113 - .../lib/i18n/backend/cache_file.rb | 36 - .../i18n-1.14.1/lib/i18n/backend/cascade.rb | 56 - .../i18n-1.14.1/lib/i18n/backend/chain.rb | 130 - .../i18n-1.14.1/lib/i18n/backend/fallbacks.rb | 115 - .../i18n-1.14.1/lib/i18n/backend/flatten.rb | 118 - .../i18n-1.14.1/lib/i18n/backend/gettext.rb | 83 - .../i18n/backend/interpolation_compiler.rb | 123 - .../i18n-1.14.1/lib/i18n/backend/key_value.rb | 204 -- .../lib/i18n/backend/lazy_loadable.rb | 184 -- .../i18n-1.14.1/lib/i18n/backend/memoize.rb | 54 - .../i18n-1.14.1/lib/i18n/backend/metadata.rb | 71 - .../lib/i18n/backend/pluralization.rb | 96 - .../i18n-1.14.1/lib/i18n/backend/simple.rb | 113 - .../lib/i18n/backend/transliterator.rb | 108 - .../2.6.0/gems/i18n-1.14.1/lib/i18n/config.rb | 165 -- .../gems/i18n-1.14.1/lib/i18n/exceptions.rb | 157 -- .../gems/i18n-1.14.1/lib/i18n/gettext.rb | 28 - .../i18n-1.14.1/lib/i18n/gettext/helpers.rb | 75 - .../i18n-1.14.1/lib/i18n/gettext/po_parser.rb | 329 --- .../i18n-1.14.1/lib/i18n/interpolate/ruby.rb | 53 - .../2.6.0/gems/i18n-1.14.1/lib/i18n/locale.rb | 8 - .../i18n-1.14.1/lib/i18n/locale/fallbacks.rb | 97 - .../gems/i18n-1.14.1/lib/i18n/locale/tag.rb | 28 - .../lib/i18n/locale/tag/parents.rb | 24 - .../lib/i18n/locale/tag/rfc4646.rb | 74 - .../i18n-1.14.1/lib/i18n/locale/tag/simple.rb | 39 - .../gems/i18n-1.14.1/lib/i18n/middleware.rb | 17 - .../2.6.0/gems/i18n-1.14.1/lib/i18n/utils.rb | 55 - .../gems/i18n-1.14.1/lib/i18n/version.rb | 5 - 155 files changed, 20741 deletions(-) delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/activesupport-6.1.7.6/lib/active_support/core_ext/object/blank.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/LICENSE.txt delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent-ruby.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/agent.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/array.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/async.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atom.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/atomic_boolean.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/atomic_fixnum.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/atomic_markable_reference.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/atomic_reference.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/count_down_latch.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/cyclic_barrier.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/event.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/fiber_local_var.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/java_count_down_latch.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/locals.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/lock_local_var.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_boolean.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_fixnum.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/mutex_count_down_latch.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/mutex_semaphore.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/read_write_lock.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/reentrant_read_write_lock.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/semaphore.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/thread_local_var.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic_reference/atomic_direct_update.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic_reference/mutex_atomic.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic_reference/numeric_cas_wrapper.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomics.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/copy_on_notify_observer_set.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/copy_on_write_observer_set.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/java_non_concurrent_priority_queue.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/lock_free_stack.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/atomic_reference_map_backend.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/mri_map_backend.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/non_concurrent_map_backend.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/synchronized_map_backend.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/truffleruby_map_backend.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/non_concurrent_priority_queue.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/ruby_non_concurrent_priority_queue.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/deprecation.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/dereferenceable.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/logging.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/obligation.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/observable.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concurrent_ruby.jar delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/configuration.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/constants.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/dataflow.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/delay.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/errors.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/exchanger.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/abstract_executor_service.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/cached_thread_pool.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/executor_service.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/fixed_thread_pool.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/immediate_executor.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/indirect_immediate_executor.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/java_executor_service.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/java_single_thread_executor.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/java_thread_pool_executor.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/ruby_executor_service.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/ruby_single_thread_executor.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/ruby_thread_pool_executor.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/safe_task_executor.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/serial_executor_service.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/serialized_execution.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/serialized_execution_delegator.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/simple_executor_service.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/single_thread_executor.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/thread_pool_executor.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/timer_set.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executors.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/future.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/hash.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/immutable_struct.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/ivar.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/map.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/maybe.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/mutable_struct.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/mvar.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/options.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/promise.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/promises.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/re_include.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/scheduled_task.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/set.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/settable_struct.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/abstract_lockable_object.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/abstract_object.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/abstract_struct.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/condition.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/full_memory_barrier.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/jruby_lockable_object.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/lock.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/lockable_object.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/mutex_lockable_object.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/object.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/safe_initialization.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/volatile.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/synchronized_delegator.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/adder.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/cheap_lockable.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/data_structures.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/power_of_two_tuple.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/striped64.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/volatile.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/xor_shift_random.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/timer_task.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/tuple.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/tvar.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/engine.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/monotonic_time.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/native_extension_loader.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/native_integer.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/processor_counter.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/version.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/MIT-LICENSE delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/base.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/cache.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/cache_file.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/cascade.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/chain.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/fallbacks.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/flatten.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/gettext.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/interpolation_compiler.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/key_value.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/lazy_loadable.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/memoize.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/metadata.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/pluralization.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/simple.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/transliterator.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/config.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/exceptions.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/gettext.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/gettext/helpers.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/gettext/po_parser.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/interpolate/ruby.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/locale.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/locale/fallbacks.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/locale/tag.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/locale/tag/parents.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/locale/tag/rfc4646.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/locale/tag/simple.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/middleware.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/utils.rb delete mode 100644 Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/version.rb diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/activesupport-6.1.7.6/lib/active_support/core_ext/object/blank.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/activesupport-6.1.7.6/lib/active_support/core_ext/object/blank.rb deleted file mode 100644 index f36fef6cc91bc..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/activesupport-6.1.7.6/lib/active_support/core_ext/object/blank.rb +++ /dev/null @@ -1,155 +0,0 @@ -# frozen_string_literal: true - -require "concurrent/map" - -class Object - # An object is blank if it's false, empty, or a whitespace string. - # For example, +nil+, '', ' ', [], {}, and +false+ are all blank. - # - # This simplifies - # - # !address || address.empty? - # - # to - # - # address.blank? - # - # @return [true, false] - def blank? - respond_to?(:empty?) ? !!empty? : !self - end - - # An object is present if it's not blank. - # - # @return [true, false] - def present? - !blank? - end - - # Returns the receiver if it's present otherwise returns +nil+. - # object.presence is equivalent to - # - # object.present? ? object : nil - # - # For example, something like - # - # state = params[:state] if params[:state].present? - # country = params[:country] if params[:country].present? - # region = state || country || 'US' - # - # becomes - # - # region = params[:state].presence || params[:country].presence || 'US' - # - # @return [Object] - def presence - self if present? - end -end - -class NilClass - # +nil+ is blank: - # - # nil.blank? # => true - # - # @return [true] - def blank? - true - end -end - -class FalseClass - # +false+ is blank: - # - # false.blank? # => true - # - # @return [true] - def blank? - true - end -end - -class TrueClass - # +true+ is not blank: - # - # true.blank? # => false - # - # @return [false] - def blank? - false - end -end - -class Array - # An array is blank if it's empty: - # - # [].blank? # => true - # [1,2,3].blank? # => false - # - # @return [true, false] - alias_method :blank?, :empty? -end - -class Hash - # A hash is blank if it's empty: - # - # {}.blank? # => true - # { key: 'value' }.blank? # => false - # - # @return [true, false] - alias_method :blank?, :empty? -end - -class String - BLANK_RE = /\A[[:space:]]*\z/ - ENCODED_BLANKS = Concurrent::Map.new do |h, enc| - h[enc] = Regexp.new(BLANK_RE.source.encode(enc), BLANK_RE.options | Regexp::FIXEDENCODING) - end - - # A string is blank if it's empty or contains whitespaces only: - # - # ''.blank? # => true - # ' '.blank? # => true - # "\t\n\r".blank? # => true - # ' blah '.blank? # => false - # - # Unicode whitespace is supported: - # - # "\u00a0".blank? # => true - # - # @return [true, false] - def blank? - # The regexp that matches blank strings is expensive. For the case of empty - # strings we can speed up this method (~3.5x) with an empty? call. The - # penalty for the rest of strings is marginal. - empty? || - begin - BLANK_RE.match?(self) - rescue Encoding::CompatibilityError - ENCODED_BLANKS[self.encoding].match?(self) - end - end -end - -class Numeric #:nodoc: - # No number is blank: - # - # 1.blank? # => false - # 0.blank? # => false - # - # @return [false] - def blank? - false - end -end - -class Time #:nodoc: - # No Time is blank: - # - # Time.now.blank? # => false - # - # @return [false] - def blank? - false - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/LICENSE.txt b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/LICENSE.txt deleted file mode 100644 index 1026f28d0be92..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -Copyright (c) Jerry D'Antonio -- released under the MIT license. - -http://www.opensource.org/licenses/mit-license.php - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent-ruby.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent-ruby.rb deleted file mode 100644 index e9a3dea4ab40d..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent-ruby.rb +++ /dev/null @@ -1,5 +0,0 @@ -# This file is here so that there is a file with the same name as the gem that -# can be required by Bundler.require. Applications should normally -# require 'concurrent'. - -require_relative "concurrent" diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent.rb deleted file mode 100644 index 87de46f1b8a1b..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent.rb +++ /dev/null @@ -1,134 +0,0 @@ -require 'concurrent/version' -require 'concurrent/constants' -require 'concurrent/errors' -require 'concurrent/configuration' - -require 'concurrent/atomics' -require 'concurrent/executors' -require 'concurrent/synchronization' - -require 'concurrent/atomic/atomic_markable_reference' -require 'concurrent/atomic/atomic_reference' -require 'concurrent/agent' -require 'concurrent/atom' -require 'concurrent/array' -require 'concurrent/hash' -require 'concurrent/set' -require 'concurrent/map' -require 'concurrent/tuple' -require 'concurrent/async' -require 'concurrent/dataflow' -require 'concurrent/delay' -require 'concurrent/exchanger' -require 'concurrent/future' -require 'concurrent/immutable_struct' -require 'concurrent/ivar' -require 'concurrent/maybe' -require 'concurrent/mutable_struct' -require 'concurrent/mvar' -require 'concurrent/promise' -require 'concurrent/scheduled_task' -require 'concurrent/settable_struct' -require 'concurrent/timer_task' -require 'concurrent/tvar' -require 'concurrent/promises' - -require 'concurrent/thread_safe/synchronized_delegator' -require 'concurrent/thread_safe/util' - -require 'concurrent/options' - -# @!macro internal_implementation_note -# -# @note **Private Implementation:** This abstraction is a private, internal -# implementation detail. It should never be used directly. - -# @!macro monotonic_clock_warning -# -# @note Time calculations on all platforms and languages are sensitive to -# changes to the system clock. To alleviate the potential problems -# associated with changing the system clock while an application is running, -# most modern operating systems provide a monotonic clock that operates -# independently of the system clock. A monotonic clock cannot be used to -# determine human-friendly clock times. A monotonic clock is used exclusively -# for calculating time intervals. Not all Ruby platforms provide access to an -# operating system monotonic clock. On these platforms a pure-Ruby monotonic -# clock will be used as a fallback. An operating system monotonic clock is both -# faster and more reliable than the pure-Ruby implementation. The pure-Ruby -# implementation should be fast and reliable enough for most non-realtime -# operations. At this time the common Ruby platforms that provide access to an -# operating system monotonic clock are MRI 2.1 and above and JRuby (all versions). -# -# @see http://linux.die.net/man/3/clock_gettime Linux clock_gettime(3) - -# @!macro copy_options -# -# ## Copy Options -# -# Object references in Ruby are mutable. This can lead to serious -# problems when the {#value} of an object is a mutable reference. Which -# is always the case unless the value is a `Fixnum`, `Symbol`, or similar -# "primitive" data type. Each instance can be configured with a few -# options that can help protect the program from potentially dangerous -# operations. Each of these options can be optionally set when the object -# instance is created: -# -# * `:dup_on_deref` When true the object will call the `#dup` method on -# the `value` object every time the `#value` method is called -# (default: false) -# * `:freeze_on_deref` When true the object will call the `#freeze` -# method on the `value` object every time the `#value` method is called -# (default: false) -# * `:copy_on_deref` When given a `Proc` object the `Proc` will be run -# every time the `#value` method is called. The `Proc` will be given -# the current `value` as its only argument and the result returned by -# the block will be the return value of the `#value` call. When `nil` -# this option will be ignored (default: nil) -# -# When multiple deref options are set the order of operations is strictly defined. -# The order of deref operations is: -# * `:copy_on_deref` -# * `:dup_on_deref` -# * `:freeze_on_deref` -# -# Because of this ordering there is no need to `#freeze` an object created by a -# provided `:copy_on_deref` block. Simply set `:freeze_on_deref` to `true`. -# Setting both `:dup_on_deref` to `true` and `:freeze_on_deref` to `true` is -# as close to the behavior of a "pure" functional language (like Erlang, Clojure, -# or Haskell) as we are likely to get in Ruby. - -# @!macro deref_options -# -# @option opts [Boolean] :dup_on_deref (false) Call `#dup` before -# returning the data from {#value} -# @option opts [Boolean] :freeze_on_deref (false) Call `#freeze` before -# returning the data from {#value} -# @option opts [Proc] :copy_on_deref (nil) When calling the {#value} -# method, call the given proc passing the internal value as the sole -# argument then return the new value returned from the proc. - -# @!macro executor_and_deref_options -# -# @param [Hash] opts the options used to define the behavior at update and deref -# and to specify the executor on which to perform actions -# @option opts [Executor] :executor when set use the given `Executor` instance. -# Three special values are also supported: `:io` returns the global pool for -# long, blocking (IO) tasks, `:fast` returns the global pool for short, fast -# operations, and `:immediate` returns the global `ImmediateExecutor` object. -# @!macro deref_options - -# @!macro warn.edge -# @api Edge -# @note **Edge Features** are under active development and may change frequently. -# -# - Deprecations are not added before incompatible changes. -# - Edge version: _major_ is always 0, _minor_ bump means incompatible change, -# _patch_ bump means compatible change. -# - Edge features may also lack tests and documentation. -# - Features developed in `concurrent-ruby-edge` are expected to move -# to `concurrent-ruby` when finalised. - - -# {include:file:README.md} -module Concurrent -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/agent.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/agent.rb deleted file mode 100644 index 2d32926ba14d0..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/agent.rb +++ /dev/null @@ -1,588 +0,0 @@ -require 'concurrent/configuration' -require 'concurrent/atomic/atomic_reference' -require 'concurrent/atomic/count_down_latch' -require 'concurrent/atomic/thread_local_var' -require 'concurrent/collection/copy_on_write_observer_set' -require 'concurrent/concern/observable' -require 'concurrent/synchronization/lockable_object' - -module Concurrent - - # `Agent` is inspired by Clojure's [agent](http://clojure.org/agents) - # function. An agent is a shared, mutable variable providing independent, - # uncoordinated, *asynchronous* change of individual values. Best used when - # the value will undergo frequent, complex updates. Suitable when the result - # of an update does not need to be known immediately. `Agent` is (mostly) - # functionally equivalent to Clojure's agent, except where the runtime - # prevents parity. - # - # Agents are reactive, not autonomous - there is no imperative message loop - # and no blocking receive. The state of an Agent should be itself immutable - # and the `#value` of an Agent is always immediately available for reading by - # any thread without any messages, i.e. observation does not require - # cooperation or coordination. - # - # Agent action dispatches are made using the various `#send` methods. These - # methods always return immediately. At some point later, in another thread, - # the following will happen: - # - # 1. The given `action` will be applied to the state of the Agent and the - # `args`, if any were supplied. - # 2. The return value of `action` will be passed to the validator lambda, - # if one has been set on the Agent. - # 3. If the validator succeeds or if no validator was given, the return value - # of the given `action` will become the new `#value` of the Agent. See - # `#initialize` for details. - # 4. If any observers were added to the Agent, they will be notified. See - # `#add_observer` for details. - # 5. If during the `action` execution any other dispatches are made (directly - # or indirectly), they will be held until after the `#value` of the Agent - # has been changed. - # - # If any exceptions are thrown by an action function, no nested dispatches - # will occur, and the exception will be cached in the Agent itself. When an - # Agent has errors cached, any subsequent interactions will immediately throw - # an exception, until the agent's errors are cleared. Agent errors can be - # examined with `#error` and the agent restarted with `#restart`. - # - # The actions of all Agents get interleaved amongst threads in a thread pool. - # At any point in time, at most one action for each Agent is being executed. - # Actions dispatched to an agent from another single agent or thread will - # occur in the order they were sent, potentially interleaved with actions - # dispatched to the same agent from other sources. The `#send` method should - # be used for actions that are CPU limited, while the `#send_off` method is - # appropriate for actions that may block on IO. - # - # Unlike in Clojure, `Agent` cannot participate in `Concurrent::TVar` transactions. - # - # ## Example - # - # ``` - # def next_fibonacci(set = nil) - # return [0, 1] if set.nil? - # set + [set[-2..-1].reduce{|sum,x| sum + x }] - # end - # - # # create an agent with an initial value - # agent = Concurrent::Agent.new(next_fibonacci) - # - # # send a few update requests - # 5.times do - # agent.send{|set| next_fibonacci(set) } - # end - # - # # wait for them to complete - # agent.await - # - # # get the current value - # agent.value #=> [0, 1, 1, 2, 3, 5, 8] - # ``` - # - # ## Observation - # - # Agents support observers through the {Concurrent::Observable} mixin module. - # Notification of observers occurs every time an action dispatch returns and - # the new value is successfully validated. Observation will *not* occur if the - # action raises an exception, if validation fails, or when a {#restart} occurs. - # - # When notified the observer will receive three arguments: `time`, `old_value`, - # and `new_value`. The `time` argument is the time at which the value change - # occurred. The `old_value` is the value of the Agent when the action began - # processing. The `new_value` is the value to which the Agent was set when the - # action completed. Note that `old_value` and `new_value` may be the same. - # This is not an error. It simply means that the action returned the same - # value. - # - # ## Nested Actions - # - # It is possible for an Agent action to post further actions back to itself. - # The nested actions will be enqueued normally then processed *after* the - # outer action completes, in the order they were sent, possibly interleaved - # with action dispatches from other threads. Nested actions never deadlock - # with one another and a failure in a nested action will never affect the - # outer action. - # - # Nested actions can be called using the Agent reference from the enclosing - # scope or by passing the reference in as a "send" argument. Nested actions - # cannot be post using `self` from within the action block/proc/lambda; `self` - # in this context will not reference the Agent. The preferred method for - # dispatching nested actions is to pass the Agent as an argument. This allows - # Ruby to more effectively manage the closing scope. - # - # Prefer this: - # - # ``` - # agent = Concurrent::Agent.new(0) - # agent.send(agent) do |value, this| - # this.send {|v| v + 42 } - # 3.14 - # end - # agent.value #=> 45.14 - # ``` - # - # Over this: - # - # ``` - # agent = Concurrent::Agent.new(0) - # agent.send do |value| - # agent.send {|v| v + 42 } - # 3.14 - # end - # ``` - # - # @!macro agent_await_warning - # - # **NOTE** Never, *under any circumstances*, call any of the "await" methods - # ({#await}, {#await_for}, {#await_for!}, and {#wait}) from within an action - # block/proc/lambda. The call will block the Agent and will always fail. - # Calling either {#await} or {#wait} (with a timeout of `nil`) will - # hopelessly deadlock the Agent with no possibility of recovery. - # - # @!macro thread_safe_variable_comparison - # - # @see http://clojure.org/Agents Clojure Agents - # @see http://clojure.org/state Values and Change - Clojure's approach to Identity and State - class Agent < Synchronization::LockableObject - include Concern::Observable - - ERROR_MODES = [:continue, :fail].freeze - private_constant :ERROR_MODES - - AWAIT_FLAG = ::Object.new - private_constant :AWAIT_FLAG - - AWAIT_ACTION = ->(value, latch) { latch.count_down; AWAIT_FLAG } - private_constant :AWAIT_ACTION - - DEFAULT_ERROR_HANDLER = ->(agent, error) { nil } - private_constant :DEFAULT_ERROR_HANDLER - - DEFAULT_VALIDATOR = ->(value) { true } - private_constant :DEFAULT_VALIDATOR - - Job = Struct.new(:action, :args, :executor, :caller) - private_constant :Job - - # Raised during action processing or any other time in an Agent's lifecycle. - class Error < StandardError - def initialize(message = nil) - message ||= 'agent must be restarted before jobs can post' - super(message) - end - end - - # Raised when a new value obtained during action processing or at `#restart` - # fails validation. - class ValidationError < Error - def initialize(message = nil) - message ||= 'invalid value' - super(message) - end - end - - # The error mode this Agent is operating in. See {#initialize} for details. - attr_reader :error_mode - - # Create a new `Agent` with the given initial value and options. - # - # The `:validator` option must be `nil` or a side-effect free proc/lambda - # which takes one argument. On any intended value change the validator, if - # provided, will be called. If the new value is invalid the validator should - # return `false` or raise an error. - # - # The `:error_handler` option must be `nil` or a proc/lambda which takes two - # arguments. When an action raises an error or validation fails, either by - # returning false or raising an error, the error handler will be called. The - # arguments to the error handler will be a reference to the agent itself and - # the error object which was raised. - # - # The `:error_mode` may be either `:continue` (the default if an error - # handler is given) or `:fail` (the default if error handler nil or not - # given). - # - # If an action being run by the agent throws an error or doesn't pass - # validation the error handler, if present, will be called. After the - # handler executes if the error mode is `:continue` the Agent will continue - # as if neither the action that caused the error nor the error itself ever - # happened. - # - # If the mode is `:fail` the Agent will become {#failed?} and will stop - # accepting new action dispatches. Any previously queued actions will be - # held until {#restart} is called. The {#value} method will still work, - # returning the value of the Agent before the error. - # - # @param [Object] initial the initial value - # @param [Hash] opts the configuration options - # - # @option opts [Symbol] :error_mode either `:continue` or `:fail` - # @option opts [nil, Proc] :error_handler the (optional) error handler - # @option opts [nil, Proc] :validator the (optional) validation procedure - def initialize(initial, opts = {}) - super() - synchronize { ns_initialize(initial, opts) } - end - - # The current value (state) of the Agent, irrespective of any pending or - # in-progress actions. The value is always available and is non-blocking. - # - # @return [Object] the current value - def value - @current.value # TODO (pitr 12-Sep-2015): broken unsafe read? - end - - alias_method :deref, :value - - # When {#failed?} and {#error_mode} is `:fail`, returns the error object - # which caused the failure, else `nil`. When {#error_mode} is `:continue` - # will *always* return `nil`. - # - # @return [nil, Error] the error which caused the failure when {#failed?} - def error - @error.value - end - - alias_method :reason, :error - - # @!macro agent_send - # - # Dispatches an action to the Agent and returns immediately. Subsequently, - # in a thread from a thread pool, the {#value} will be set to the return - # value of the action. Action dispatches are only allowed when the Agent - # is not {#failed?}. - # - # The action must be a block/proc/lambda which takes 1 or more arguments. - # The first argument is the current {#value} of the Agent. Any arguments - # passed to the send method via the `args` parameter will be passed to the - # action as the remaining arguments. The action must return the new value - # of the Agent. - # - # * {#send} and {#send!} should be used for actions that are CPU limited - # * {#send_off}, {#send_off!}, and {#<<} are appropriate for actions that - # may block on IO - # * {#send_via} and {#send_via!} are used when a specific executor is to - # be used for the action - # - # @param [Array] args zero or more arguments to be passed to - # the action - # @param [Proc] action the action dispatch to be enqueued - # - # @yield [agent, value, *args] process the old value and return the new - # @yieldparam [Object] value the current {#value} of the Agent - # @yieldparam [Array] args zero or more arguments to pass to the - # action - # @yieldreturn [Object] the new value of the Agent - # - # @!macro send_return - # @return [Boolean] true if the action is successfully enqueued, false if - # the Agent is {#failed?} - def send(*args, &action) - enqueue_action_job(action, args, Concurrent.global_fast_executor) - end - - # @!macro agent_send - # - # @!macro send_bang_return_and_raise - # @return [Boolean] true if the action is successfully enqueued - # @raise [Concurrent::Agent::Error] if the Agent is {#failed?} - def send!(*args, &action) - raise Error.new unless send(*args, &action) - true - end - - # @!macro agent_send - # @!macro send_return - def send_off(*args, &action) - enqueue_action_job(action, args, Concurrent.global_io_executor) - end - - alias_method :post, :send_off - - # @!macro agent_send - # @!macro send_bang_return_and_raise - def send_off!(*args, &action) - raise Error.new unless send_off(*args, &action) - true - end - - # @!macro agent_send - # @!macro send_return - # @param [Concurrent::ExecutorService] executor the executor on which the - # action is to be dispatched - def send_via(executor, *args, &action) - enqueue_action_job(action, args, executor) - end - - # @!macro agent_send - # @!macro send_bang_return_and_raise - # @param [Concurrent::ExecutorService] executor the executor on which the - # action is to be dispatched - def send_via!(executor, *args, &action) - raise Error.new unless send_via(executor, *args, &action) - true - end - - # Dispatches an action to the Agent and returns immediately. Subsequently, - # in a thread from a thread pool, the {#value} will be set to the return - # value of the action. Appropriate for actions that may block on IO. - # - # @param [Proc] action the action dispatch to be enqueued - # @return [Concurrent::Agent] self - # @see #send_off - def <<(action) - send_off(&action) - self - end - - # Blocks the current thread (indefinitely!) until all actions dispatched - # thus far, from this thread or nested by the Agent, have occurred. Will - # block when {#failed?}. Will never return if a failed Agent is {#restart} - # with `:clear_actions` true. - # - # Returns a reference to `self` to support method chaining: - # - # ``` - # current_value = agent.await.value - # ``` - # - # @return [Boolean] self - # - # @!macro agent_await_warning - def await - wait(nil) - self - end - - # Blocks the current thread until all actions dispatched thus far, from this - # thread or nested by the Agent, have occurred, or the timeout (in seconds) - # has elapsed. - # - # @param [Float] timeout the maximum number of seconds to wait - # @return [Boolean] true if all actions complete before timeout else false - # - # @!macro agent_await_warning - def await_for(timeout) - wait(timeout.to_f) - end - - # Blocks the current thread until all actions dispatched thus far, from this - # thread or nested by the Agent, have occurred, or the timeout (in seconds) - # has elapsed. - # - # @param [Float] timeout the maximum number of seconds to wait - # @return [Boolean] true if all actions complete before timeout - # - # @raise [Concurrent::TimeoutError] when timout is reached - # - # @!macro agent_await_warning - def await_for!(timeout) - raise Concurrent::TimeoutError unless wait(timeout.to_f) - true - end - - # Blocks the current thread until all actions dispatched thus far, from this - # thread or nested by the Agent, have occurred, or the timeout (in seconds) - # has elapsed. Will block indefinitely when timeout is nil or not given. - # - # Provided mainly for consistency with other classes in this library. Prefer - # the various `await` methods instead. - # - # @param [Float] timeout the maximum number of seconds to wait - # @return [Boolean] true if all actions complete before timeout else false - # - # @!macro agent_await_warning - def wait(timeout = nil) - latch = Concurrent::CountDownLatch.new(1) - enqueue_await_job(latch) - latch.wait(timeout) - end - - # Is the Agent in a failed state? - # - # @see #restart - def failed? - !@error.value.nil? - end - - alias_method :stopped?, :failed? - - # When an Agent is {#failed?}, changes the Agent {#value} to `new_value` - # then un-fails the Agent so that action dispatches are allowed again. If - # the `:clear_actions` option is give and true, any actions queued on the - # Agent that were being held while it was failed will be discarded, - # otherwise those held actions will proceed. The `new_value` must pass the - # validator if any, or `restart` will raise an exception and the Agent will - # remain failed with its old {#value} and {#error}. Observers, if any, will - # not be notified of the new state. - # - # @param [Object] new_value the new value for the Agent once restarted - # @param [Hash] opts the configuration options - # @option opts [Symbol] :clear_actions true if all enqueued but unprocessed - # actions should be discarded on restart, else false (default: false) - # @return [Boolean] true - # - # @raise [Concurrent:AgentError] when not failed - def restart(new_value, opts = {}) - clear_actions = opts.fetch(:clear_actions, false) - synchronize do - raise Error.new('agent is not failed') unless failed? - raise ValidationError unless ns_validate(new_value) - @current.value = new_value - @error.value = nil - @queue.clear if clear_actions - ns_post_next_job unless @queue.empty? - end - true - end - - class << self - - # Blocks the current thread (indefinitely!) until all actions dispatched - # thus far to all the given Agents, from this thread or nested by the - # given Agents, have occurred. Will block when any of the agents are - # failed. Will never return if a failed Agent is restart with - # `:clear_actions` true. - # - # @param [Array] agents the Agents on which to wait - # @return [Boolean] true - # - # @!macro agent_await_warning - def await(*agents) - agents.each { |agent| agent.await } - true - end - - # Blocks the current thread until all actions dispatched thus far to all - # the given Agents, from this thread or nested by the given Agents, have - # occurred, or the timeout (in seconds) has elapsed. - # - # @param [Float] timeout the maximum number of seconds to wait - # @param [Array] agents the Agents on which to wait - # @return [Boolean] true if all actions complete before timeout else false - # - # @!macro agent_await_warning - def await_for(timeout, *agents) - end_at = Concurrent.monotonic_time + timeout.to_f - ok = agents.length.times do |i| - break false if (delay = end_at - Concurrent.monotonic_time) < 0 - break false unless agents[i].await_for(delay) - end - !!ok - end - - # Blocks the current thread until all actions dispatched thus far to all - # the given Agents, from this thread or nested by the given Agents, have - # occurred, or the timeout (in seconds) has elapsed. - # - # @param [Float] timeout the maximum number of seconds to wait - # @param [Array] agents the Agents on which to wait - # @return [Boolean] true if all actions complete before timeout - # - # @raise [Concurrent::TimeoutError] when timout is reached - # @!macro agent_await_warning - def await_for!(timeout, *agents) - raise Concurrent::TimeoutError unless await_for(timeout, *agents) - true - end - end - - private - - def ns_initialize(initial, opts) - @error_mode = opts[:error_mode] - @error_handler = opts[:error_handler] - - if @error_mode && !ERROR_MODES.include?(@error_mode) - raise ArgumentError.new('unrecognized error mode') - elsif @error_mode.nil? - @error_mode = @error_handler ? :continue : :fail - end - - @error_handler ||= DEFAULT_ERROR_HANDLER - @validator = opts.fetch(:validator, DEFAULT_VALIDATOR) - @current = Concurrent::AtomicReference.new(initial) - @error = Concurrent::AtomicReference.new(nil) - @caller = Concurrent::ThreadLocalVar.new(nil) - @queue = [] - - self.observers = Collection::CopyOnNotifyObserverSet.new - end - - def enqueue_action_job(action, args, executor) - raise ArgumentError.new('no action given') unless action - job = Job.new(action, args, executor, @caller.value || Thread.current.object_id) - synchronize { ns_enqueue_job(job) } - end - - def enqueue_await_job(latch) - synchronize do - if (index = ns_find_last_job_for_thread) - job = Job.new(AWAIT_ACTION, [latch], Concurrent.global_immediate_executor, - Thread.current.object_id) - ns_enqueue_job(job, index+1) - else - latch.count_down - true - end - end - end - - def ns_enqueue_job(job, index = nil) - # a non-nil index means this is an await job - return false if index.nil? && failed? - index ||= @queue.length - @queue.insert(index, job) - # if this is the only job, post to executor - ns_post_next_job if @queue.length == 1 - true - end - - def ns_post_next_job - @queue.first.executor.post { execute_next_job } - end - - def execute_next_job - job = synchronize { @queue.first } - old_value = @current.value - - @caller.value = job.caller # for nested actions - new_value = job.action.call(old_value, *job.args) - @caller.value = nil - - return if new_value == AWAIT_FLAG - - if ns_validate(new_value) - @current.value = new_value - observers.notify_observers(Time.now, old_value, new_value) - else - handle_error(ValidationError.new) - end - rescue => error - handle_error(error) - ensure - synchronize do - @queue.shift - unless failed? || @queue.empty? - ns_post_next_job - end - end - end - - def ns_validate(value) - @validator.call(value) - rescue - false - end - - def handle_error(error) - # stop new jobs from posting - @error.value = error if @error_mode == :fail - @error_handler.call(self, error) - rescue - # do nothing - end - - def ns_find_last_job_for_thread - @queue.rindex { |job| job.caller == Thread.current.object_id } - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/array.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/array.rb deleted file mode 100644 index 96434a288dc35..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/array.rb +++ /dev/null @@ -1,56 +0,0 @@ -require 'concurrent/utility/engine' -require 'concurrent/thread_safe/util' - -module Concurrent - - # @!macro concurrent_array - # - # A thread-safe subclass of Array. This version locks against the object - # itself for every method call, ensuring only one thread can be reading - # or writing at a time. This includes iteration methods like `#each`. - # - # @note `a += b` is **not** a **thread-safe** operation on - # `Concurrent::Array`. It reads array `a`, then it creates new `Concurrent::Array` - # which is concatenation of `a` and `b`, then it writes the concatenation to `a`. - # The read and write are independent operations they do not form a single atomic - # operation therefore when two `+=` operations are executed concurrently updates - # may be lost. Use `#concat` instead. - # - # @see http://ruby-doc.org/core/Array.html Ruby standard library `Array` - - # @!macro internal_implementation_note - ArrayImplementation = case - when Concurrent.on_cruby? - # Array is thread-safe in practice because CRuby runs - # threads one at a time and does not do context - # switching during the execution of C functions. - ::Array - - when Concurrent.on_jruby? - require 'jruby/synchronized' - - class JRubyArray < ::Array - include JRuby::Synchronized - end - JRubyArray - - when Concurrent.on_truffleruby? - require 'concurrent/thread_safe/util/data_structures' - - class TruffleRubyArray < ::Array - end - - ThreadSafe::Util.make_synchronized_on_truffleruby TruffleRubyArray - TruffleRubyArray - - else - warn 'Possibly unsupported Ruby implementation' - ::Array - end - private_constant :ArrayImplementation - - # @!macro concurrent_array - class Array < ArrayImplementation - end - -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/async.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/async.rb deleted file mode 100644 index f9f8adf00d4e7..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/async.rb +++ /dev/null @@ -1,449 +0,0 @@ -require 'concurrent/configuration' -require 'concurrent/ivar' -require 'concurrent/synchronization/lockable_object' - -module Concurrent - - # A mixin module that provides simple asynchronous behavior to a class, - # turning it into a simple actor. Loosely based on Erlang's - # [gen_server](http://www.erlang.org/doc/man/gen_server.html), but without - # supervision or linking. - # - # A more feature-rich {Concurrent::Actor} is also available when the - # capabilities of `Async` are too limited. - # - # ```cucumber - # Feature: - # As a stateful, plain old Ruby class - # I want safe, asynchronous behavior - # So my long-running methods don't block the main thread - # ``` - # - # The `Async` module is a way to mix simple yet powerful asynchronous - # capabilities into any plain old Ruby object or class, turning each object - # into a simple Actor. Method calls are processed on a background thread. The - # caller is free to perform other actions while processing occurs in the - # background. - # - # Method calls to the asynchronous object are made via two proxy methods: - # `async` (alias `cast`) and `await` (alias `call`). These proxy methods post - # the method call to the object's background thread and return a "future" - # which will eventually contain the result of the method call. - # - # This behavior is loosely patterned after Erlang's `gen_server` behavior. - # When an Erlang module implements the `gen_server` behavior it becomes - # inherently asynchronous. The `start` or `start_link` function spawns a - # process (similar to a thread but much more lightweight and efficient) and - # returns the ID of the process. Using the process ID, other processes can - # send messages to the `gen_server` via the `cast` and `call` methods. Unlike - # Erlang's `gen_server`, however, `Async` classes do not support linking or - # supervision trees. - # - # ## Basic Usage - # - # When this module is mixed into a class, objects of the class become inherently - # asynchronous. Each object gets its own background thread on which to post - # asynchronous method calls. Asynchronous method calls are executed in the - # background one at a time in the order they are received. - # - # To create an asynchronous class, simply mix in the `Concurrent::Async` module: - # - # ``` - # class Hello - # include Concurrent::Async - # - # def hello(name) - # "Hello, #{name}!" - # end - # end - # ``` - # - # Mixing this module into a class provides each object two proxy methods: - # `async` and `await`. These methods are thread safe with respect to the - # enclosing object. The former proxy allows methods to be called - # asynchronously by posting to the object's internal thread. The latter proxy - # allows a method to be called synchronously but does so safely with respect - # to any pending asynchronous method calls and ensures proper ordering. Both - # methods return a {Concurrent::IVar} which can be inspected for the result - # of the proxied method call. Calling a method with `async` will return a - # `:pending` `IVar` whereas `await` will return a `:complete` `IVar`. - # - # ``` - # class Echo - # include Concurrent::Async - # - # def echo(msg) - # print "#{msg}\n" - # end - # end - # - # horn = Echo.new - # horn.echo('zero') # synchronous, not thread-safe - # # returns the actual return value of the method - # - # horn.async.echo('one') # asynchronous, non-blocking, thread-safe - # # returns an IVar in the :pending state - # - # horn.await.echo('two') # synchronous, blocking, thread-safe - # # returns an IVar in the :complete state - # ``` - # - # ## Let It Fail - # - # The `async` and `await` proxy methods have built-in error protection based - # on Erlang's famous "let it fail" philosophy. Instance methods should not be - # programmed defensively. When an exception is raised by a delegated method - # the proxy will rescue the exception, expose it to the caller as the `reason` - # attribute of the returned future, then process the next method call. - # - # ## Calling Methods Internally - # - # External method calls should *always* use the `async` and `await` proxy - # methods. When one method calls another method, the `async` proxy should - # rarely be used and the `await` proxy should *never* be used. - # - # When an object calls one of its own methods using the `await` proxy the - # second call will be enqueued *behind* the currently running method call. - # Any attempt to wait on the result will fail as the second call will never - # run until after the current call completes. - # - # Calling a method using the `await` proxy from within a method that was - # itself called using `async` or `await` will irreversibly deadlock the - # object. Do *not* do this, ever. - # - # ## Instance Variables and Attribute Accessors - # - # Instance variables do not need to be thread-safe so long as they are private. - # Asynchronous method calls are processed in the order they are received and - # are processed one at a time. Therefore private instance variables can only - # be accessed by one thread at a time. This is inherently thread-safe. - # - # When using private instance variables within asynchronous methods, the best - # practice is to read the instance variable into a local variable at the start - # of the method then update the instance variable at the *end* of the method. - # This way, should an exception be raised during method execution the internal - # state of the object will not have been changed. - # - # ### Reader Attributes - # - # The use of `attr_reader` is discouraged. Internal state exposed externally, - # when necessary, should be done through accessor methods. The instance - # variables exposed by these methods *must* be thread-safe, or they must be - # called using the `async` and `await` proxy methods. These two approaches are - # subtly different. - # - # When internal state is accessed via the `async` and `await` proxy methods, - # the returned value represents the object's state *at the time the call is - # processed*, which may *not* be the state of the object at the time the call - # is made. - # - # To get the state *at the current* time, irrespective of an enqueued method - # calls, a reader method must be called directly. This is inherently unsafe - # unless the instance variable is itself thread-safe, preferably using one - # of the thread-safe classes within this library. Because the thread-safe - # classes within this library are internally-locking or non-locking, they can - # be safely used from within asynchronous methods without causing deadlocks. - # - # Generally speaking, the best practice is to *not* expose internal state via - # reader methods. The best practice is to simply use the method's return value. - # - # ### Writer Attributes - # - # Writer attributes should never be used with asynchronous classes. Changing - # the state externally, even when done in the thread-safe way, is not logically - # consistent. Changes to state need to be timed with respect to all asynchronous - # method calls which my be in-process or enqueued. The only safe practice is to - # pass all necessary data to each method as arguments and let the method update - # the internal state as necessary. - # - # ## Class Constants, Variables, and Methods - # - # ### Class Constants - # - # Class constants do not need to be thread-safe. Since they are read-only and - # immutable they may be safely read both externally and from within - # asynchronous methods. - # - # ### Class Variables - # - # Class variables should be avoided. Class variables represent shared state. - # Shared state is anathema to concurrency. Should there be a need to share - # state using class variables they *must* be thread-safe, preferably - # using the thread-safe classes within this library. When updating class - # variables, never assign a new value/object to the variable itself. Assignment - # is not thread-safe in Ruby. Instead, use the thread-safe update functions - # of the variable itself to change the value. - # - # The best practice is to *never* use class variables with `Async` classes. - # - # ### Class Methods - # - # Class methods which are pure functions are safe. Class methods which modify - # class variables should be avoided, for all the reasons listed above. - # - # ## An Important Note About Thread Safe Guarantees - # - # > Thread safe guarantees can only be made when asynchronous method calls - # > are not mixed with direct method calls. Use only direct method calls - # > when the object is used exclusively on a single thread. Use only - # > `async` and `await` when the object is shared between threads. Once you - # > call a method using `async` or `await`, you should no longer call methods - # > directly on the object. Use `async` and `await` exclusively from then on. - # - # @example - # - # class Echo - # include Concurrent::Async - # - # def echo(msg) - # print "#{msg}\n" - # end - # end - # - # horn = Echo.new - # horn.echo('zero') # synchronous, not thread-safe - # # returns the actual return value of the method - # - # horn.async.echo('one') # asynchronous, non-blocking, thread-safe - # # returns an IVar in the :pending state - # - # horn.await.echo('two') # synchronous, blocking, thread-safe - # # returns an IVar in the :complete state - # - # @see Concurrent::Actor - # @see https://en.wikipedia.org/wiki/Actor_model "Actor Model" at Wikipedia - # @see http://www.erlang.org/doc/man/gen_server.html Erlang gen_server - # @see http://c2.com/cgi/wiki?LetItCrash "Let It Crash" at http://c2.com/ - module Async - - # @!method self.new(*args, &block) - # - # Instanciate a new object and ensure proper initialization of the - # synchronization mechanisms. - # - # @param [Array] args Zero or more arguments to be passed to the - # object's initializer. - # @param [Proc] block Optional block to pass to the object's initializer. - # @return [Object] A properly initialized object of the asynchronous class. - - # Check for the presence of a method on an object and determine if a given - # set of arguments matches the required arity. - # - # @param [Object] obj the object to check against - # @param [Symbol] method the method to check the object for - # @param [Array] args zero or more arguments for the arity check - # - # @raise [NameError] the object does not respond to `method` method - # @raise [ArgumentError] the given `args` do not match the arity of `method` - # - # @note This check is imperfect because of the way Ruby reports the arity of - # methods with a variable number of arguments. It is possible to determine - # if too few arguments are given but impossible to determine if too many - # arguments are given. This check may also fail to recognize dynamic behavior - # of the object, such as methods simulated with `method_missing`. - # - # @see http://www.ruby-doc.org/core-2.1.1/Method.html#method-i-arity Method#arity - # @see http://ruby-doc.org/core-2.1.0/Object.html#method-i-respond_to-3F Object#respond_to? - # @see http://www.ruby-doc.org/core-2.1.0/BasicObject.html#method-i-method_missing BasicObject#method_missing - # - # @!visibility private - def self.validate_argc(obj, method, *args) - argc = args.length - arity = obj.method(method).arity - - if arity >= 0 && argc != arity - raise ArgumentError.new("wrong number of arguments (#{argc} for #{arity})") - elsif arity < 0 && (arity = (arity + 1).abs) > argc - raise ArgumentError.new("wrong number of arguments (#{argc} for #{arity}..*)") - end - end - - # @!visibility private - def self.included(base) - base.singleton_class.send(:alias_method, :original_new, :new) - base.extend(ClassMethods) - super(base) - end - - # @!visibility private - module ClassMethods - def new(*args, &block) - obj = original_new(*args, &block) - obj.send(:init_synchronization) - obj - end - ruby2_keywords :new if respond_to?(:ruby2_keywords, true) - end - private_constant :ClassMethods - - # Delegates asynchronous, thread-safe method calls to the wrapped object. - # - # @!visibility private - class AsyncDelegator < Synchronization::LockableObject - safe_initialization! - - # Create a new delegator object wrapping the given delegate. - # - # @param [Object] delegate the object to wrap and delegate method calls to - def initialize(delegate) - super() - @delegate = delegate - @queue = [] - @executor = Concurrent.global_io_executor - @ruby_pid = $$ - end - - # Delegates method calls to the wrapped object. - # - # @param [Symbol] method the method being called - # @param [Array] args zero or more arguments to the method - # - # @return [IVar] the result of the method call - # - # @raise [NameError] the object does not respond to `method` method - # @raise [ArgumentError] the given `args` do not match the arity of `method` - def method_missing(method, *args, &block) - super unless @delegate.respond_to?(method) - Async::validate_argc(@delegate, method, *args) - - ivar = Concurrent::IVar.new - synchronize do - reset_if_forked - @queue.push [ivar, method, args, block] - @executor.post { perform } if @queue.length == 1 - end - - ivar - end - - # Check whether the method is responsive - # - # @param [Symbol] method the method being called - def respond_to_missing?(method, include_private = false) - @delegate.respond_to?(method) || super - end - - # Perform all enqueued tasks. - # - # This method must be called from within the executor. It must not be - # called while already running. It will loop until the queue is empty. - def perform - loop do - ivar, method, args, block = synchronize { @queue.first } - break unless ivar # queue is empty - - begin - ivar.set(@delegate.send(method, *args, &block)) - rescue => error - ivar.fail(error) - end - - synchronize do - @queue.shift - return if @queue.empty? - end - end - end - - def reset_if_forked - if $$ != @ruby_pid - @queue.clear - @ruby_pid = $$ - end - end - end - private_constant :AsyncDelegator - - # Delegates synchronous, thread-safe method calls to the wrapped object. - # - # @!visibility private - class AwaitDelegator - - # Create a new delegator object wrapping the given delegate. - # - # @param [AsyncDelegator] delegate the object to wrap and delegate method calls to - def initialize(delegate) - @delegate = delegate - end - - # Delegates method calls to the wrapped object. - # - # @param [Symbol] method the method being called - # @param [Array] args zero or more arguments to the method - # - # @return [IVar] the result of the method call - # - # @raise [NameError] the object does not respond to `method` method - # @raise [ArgumentError] the given `args` do not match the arity of `method` - def method_missing(method, *args, &block) - ivar = @delegate.send(method, *args, &block) - ivar.wait - ivar - end - - # Check whether the method is responsive - # - # @param [Symbol] method the method being called - def respond_to_missing?(method, include_private = false) - @delegate.respond_to?(method) || super - end - end - private_constant :AwaitDelegator - - # Causes the chained method call to be performed asynchronously on the - # object's thread. The delegated method will return a future in the - # `:pending` state and the method call will have been scheduled on the - # object's thread. The final disposition of the method call can be obtained - # by inspecting the returned future. - # - # @!macro async_thread_safety_warning - # @note The method call is guaranteed to be thread safe with respect to - # all other method calls against the same object that are called with - # either `async` or `await`. The mutable nature of Ruby references - # (and object orientation in general) prevent any other thread safety - # guarantees. Do NOT mix direct method calls with delegated method calls. - # Use *only* delegated method calls when sharing the object between threads. - # - # @return [Concurrent::IVar] the pending result of the asynchronous operation - # - # @raise [NameError] the object does not respond to the requested method - # @raise [ArgumentError] the given `args` do not match the arity of - # the requested method - def async - @__async_delegator__ - end - alias_method :cast, :async - - # Causes the chained method call to be performed synchronously on the - # current thread. The delegated will return a future in either the - # `:fulfilled` or `:rejected` state and the delegated method will have - # completed. The final disposition of the delegated method can be obtained - # by inspecting the returned future. - # - # @!macro async_thread_safety_warning - # - # @return [Concurrent::IVar] the completed result of the synchronous operation - # - # @raise [NameError] the object does not respond to the requested method - # @raise [ArgumentError] the given `args` do not match the arity of the - # requested method - def await - @__await_delegator__ - end - alias_method :call, :await - - # Initialize the internal serializer and other stnchronization mechanisms. - # - # @note This method *must* be called immediately upon object construction. - # This is the only way thread-safe initialization can be guaranteed. - # - # @!visibility private - def init_synchronization - return self if defined?(@__async_initialized__) && @__async_initialized__ - @__async_initialized__ = true - @__async_delegator__ = AsyncDelegator.new(self) - @__await_delegator__ = AwaitDelegator.new(@__async_delegator__) - self - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atom.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atom.rb deleted file mode 100644 index 1074006d76a04..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atom.rb +++ /dev/null @@ -1,222 +0,0 @@ -require 'concurrent/atomic/atomic_reference' -require 'concurrent/collection/copy_on_notify_observer_set' -require 'concurrent/concern/observable' -require 'concurrent/synchronization/object' - -# @!macro thread_safe_variable_comparison -# -# ## Thread-safe Variable Classes -# -# Each of the thread-safe variable classes is designed to solve a different -# problem. In general: -# -# * *{Concurrent::Agent}:* Shared, mutable variable providing independent, -# uncoordinated, *asynchronous* change of individual values. Best used when -# the value will undergo frequent, complex updates. Suitable when the result -# of an update does not need to be known immediately. -# * *{Concurrent::Atom}:* Shared, mutable variable providing independent, -# uncoordinated, *synchronous* change of individual values. Best used when -# the value will undergo frequent reads but only occasional, though complex, -# updates. Suitable when the result of an update must be known immediately. -# * *{Concurrent::AtomicReference}:* A simple object reference that can be updated -# atomically. Updates are synchronous but fast. Best used when updates a -# simple set operations. Not suitable when updates are complex. -# {Concurrent::AtomicBoolean} and {Concurrent::AtomicFixnum} are similar -# but optimized for the given data type. -# * *{Concurrent::Exchanger}:* Shared, stateless synchronization point. Used -# when two or more threads need to exchange data. The threads will pair then -# block on each other until the exchange is complete. -# * *{Concurrent::MVar}:* Shared synchronization point. Used when one thread -# must give a value to another, which must take the value. The threads will -# block on each other until the exchange is complete. -# * *{Concurrent::ThreadLocalVar}:* Shared, mutable, isolated variable which -# holds a different value for each thread which has access. Often used as -# an instance variable in objects which must maintain different state -# for different threads. -# * *{Concurrent::TVar}:* Shared, mutable variables which provide -# *coordinated*, *synchronous*, change of *many* stated. Used when multiple -# value must change together, in an all-or-nothing transaction. - - -module Concurrent - - # Atoms provide a way to manage shared, synchronous, independent state. - # - # An atom is initialized with an initial value and an optional validation - # proc. At any time the value of the atom can be synchronously and safely - # changed. If a validator is given at construction then any new value - # will be checked against the validator and will be rejected if the - # validator returns false or raises an exception. - # - # There are two ways to change the value of an atom: {#compare_and_set} and - # {#swap}. The former will set the new value if and only if it validates and - # the current value matches the new value. The latter will atomically set the - # new value to the result of running the given block if and only if that - # value validates. - # - # ## Example - # - # ``` - # def next_fibonacci(set = nil) - # return [0, 1] if set.nil? - # set + [set[-2..-1].reduce{|sum,x| sum + x }] - # end - # - # # create an atom with an initial value - # atom = Concurrent::Atom.new(next_fibonacci) - # - # # send a few update requests - # 5.times do - # atom.swap{|set| next_fibonacci(set) } - # end - # - # # get the current value - # atom.value #=> [0, 1, 1, 2, 3, 5, 8] - # ``` - # - # ## Observation - # - # Atoms support observers through the {Concurrent::Observable} mixin module. - # Notification of observers occurs every time the value of the Atom changes. - # When notified the observer will receive three arguments: `time`, `old_value`, - # and `new_value`. The `time` argument is the time at which the value change - # occurred. The `old_value` is the value of the Atom when the change began - # The `new_value` is the value to which the Atom was set when the change - # completed. Note that `old_value` and `new_value` may be the same. This is - # not an error. It simply means that the change operation returned the same - # value. - # - # Unlike in Clojure, `Atom` cannot participate in {Concurrent::TVar} transactions. - # - # @!macro thread_safe_variable_comparison - # - # @see http://clojure.org/atoms Clojure Atoms - # @see http://clojure.org/state Values and Change - Clojure's approach to Identity and State - class Atom < Synchronization::Object - include Concern::Observable - - safe_initialization! - attr_atomic(:value) - private :value=, :swap_value, :compare_and_set_value, :update_value - public :value - alias_method :deref, :value - - # @!method value - # The current value of the atom. - # - # @return [Object] The current value. - - # Create a new atom with the given initial value. - # - # @param [Object] value The initial value - # @param [Hash] opts The options used to configure the atom - # @option opts [Proc] :validator (nil) Optional proc used to validate new - # values. It must accept one and only one argument which will be the - # intended new value. The validator will return true if the new value - # is acceptable else return false (preferrably) or raise an exception. - # - # @!macro deref_options - # - # @raise [ArgumentError] if the validator is not a `Proc` (when given) - def initialize(value, opts = {}) - super() - @Validator = opts.fetch(:validator, -> v { true }) - self.observers = Collection::CopyOnNotifyObserverSet.new - self.value = value - end - - # Atomically swaps the value of atom using the given block. The current - # value will be passed to the block, as will any arguments passed as - # arguments to the function. The new value will be validated against the - # (optional) validator proc given at construction. If validation fails the - # value will not be changed. - # - # Internally, {#swap} reads the current value, applies the block to it, and - # attempts to compare-and-set it in. Since another thread may have changed - # the value in the intervening time, it may have to retry, and does so in a - # spin loop. The net effect is that the value will always be the result of - # the application of the supplied block to a current value, atomically. - # However, because the block might be called multiple times, it must be free - # of side effects. - # - # @note The given block may be called multiple times, and thus should be free - # of side effects. - # - # @param [Object] args Zero or more arguments passed to the block. - # - # @yield [value, args] Calculates a new value for the atom based on the - # current value and any supplied arguments. - # @yieldparam value [Object] The current value of the atom. - # @yieldparam args [Object] All arguments passed to the function, in order. - # @yieldreturn [Object] The intended new value of the atom. - # - # @return [Object] The final value of the atom after all operations and - # validations are complete. - # - # @raise [ArgumentError] When no block is given. - def swap(*args) - raise ArgumentError.new('no block given') unless block_given? - - loop do - old_value = value - new_value = yield(old_value, *args) - begin - break old_value unless valid?(new_value) - break new_value if compare_and_set(old_value, new_value) - rescue - break old_value - end - end - end - - # Atomically sets the value of atom to the new value if and only if the - # current value of the atom is identical to the old value and the new - # value successfully validates against the (optional) validator given - # at construction. - # - # @param [Object] old_value The expected current value. - # @param [Object] new_value The intended new value. - # - # @return [Boolean] True if the value is changed else false. - def compare_and_set(old_value, new_value) - if valid?(new_value) && compare_and_set_value(old_value, new_value) - observers.notify_observers(Time.now, old_value, new_value) - true - else - false - end - end - - # Atomically sets the value of atom to the new value without regard for the - # current value so long as the new value successfully validates against the - # (optional) validator given at construction. - # - # @param [Object] new_value The intended new value. - # - # @return [Object] The final value of the atom after all operations and - # validations are complete. - def reset(new_value) - old_value = value - if valid?(new_value) - self.value = new_value - observers.notify_observers(Time.now, old_value, new_value) - new_value - else - old_value - end - end - - private - - # Is the new value valid? - # - # @param [Object] new_value The intended new value. - # @return [Boolean] false if the validator function returns false or raises - # an exception else true - def valid?(new_value) - @Validator.call(new_value) - rescue - false - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/atomic_boolean.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/atomic_boolean.rb deleted file mode 100644 index f775691a2a6a5..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/atomic_boolean.rb +++ /dev/null @@ -1,127 +0,0 @@ -require 'concurrent/utility/native_extension_loader' # load native parts first - -require 'concurrent/atomic/mutex_atomic_boolean' - -module Concurrent - - ################################################################### - - # @!macro atomic_boolean_method_initialize - # - # Creates a new `AtomicBoolean` with the given initial value. - # - # @param [Boolean] initial the initial value - - # @!macro atomic_boolean_method_value_get - # - # Retrieves the current `Boolean` value. - # - # @return [Boolean] the current value - - # @!macro atomic_boolean_method_value_set - # - # Explicitly sets the value. - # - # @param [Boolean] value the new value to be set - # - # @return [Boolean] the current value - - # @!macro atomic_boolean_method_true_question - # - # Is the current value `true` - # - # @return [Boolean] true if the current value is `true`, else false - - # @!macro atomic_boolean_method_false_question - # - # Is the current value `false` - # - # @return [Boolean] true if the current value is `false`, else false - - # @!macro atomic_boolean_method_make_true - # - # Explicitly sets the value to true. - # - # @return [Boolean] true if value has changed, otherwise false - - # @!macro atomic_boolean_method_make_false - # - # Explicitly sets the value to false. - # - # @return [Boolean] true if value has changed, otherwise false - - ################################################################### - - # @!macro atomic_boolean_public_api - # - # @!method initialize(initial = false) - # @!macro atomic_boolean_method_initialize - # - # @!method value - # @!macro atomic_boolean_method_value_get - # - # @!method value=(value) - # @!macro atomic_boolean_method_value_set - # - # @!method true? - # @!macro atomic_boolean_method_true_question - # - # @!method false? - # @!macro atomic_boolean_method_false_question - # - # @!method make_true - # @!macro atomic_boolean_method_make_true - # - # @!method make_false - # @!macro atomic_boolean_method_make_false - - ################################################################### - - # @!visibility private - # @!macro internal_implementation_note - AtomicBooleanImplementation = case - when Concurrent.on_cruby? && Concurrent.c_extensions_loaded? - CAtomicBoolean - when Concurrent.on_jruby? - JavaAtomicBoolean - else - MutexAtomicBoolean - end - private_constant :AtomicBooleanImplementation - - # @!macro atomic_boolean - # - # A boolean value that can be updated atomically. Reads and writes to an atomic - # boolean and thread-safe and guaranteed to succeed. Reads and writes may block - # briefly but no explicit locking is required. - # - # @!macro thread_safe_variable_comparison - # - # Performance: - # - # ``` - # Testing with ruby 2.1.2 - # Testing with Concurrent::MutexAtomicBoolean... - # 2.790000 0.000000 2.790000 ( 2.791454) - # Testing with Concurrent::CAtomicBoolean... - # 0.740000 0.000000 0.740000 ( 0.740206) - # - # Testing with jruby 1.9.3 - # Testing with Concurrent::MutexAtomicBoolean... - # 5.240000 2.520000 7.760000 ( 3.683000) - # Testing with Concurrent::JavaAtomicBoolean... - # 3.340000 0.010000 3.350000 ( 0.855000) - # ``` - # - # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicBoolean.html java.util.concurrent.atomic.AtomicBoolean - # - # @!macro atomic_boolean_public_api - class AtomicBoolean < AtomicBooleanImplementation - # @return [String] Short string representation. - def to_s - format '%s value:%s>', super[0..-2], value - end - - alias_method :inspect, :to_s - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/atomic_fixnum.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/atomic_fixnum.rb deleted file mode 100644 index 26cd05d869213..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/atomic_fixnum.rb +++ /dev/null @@ -1,144 +0,0 @@ -require 'concurrent/utility/native_extension_loader' # load native parts first - -require 'concurrent/atomic/mutex_atomic_fixnum' - -module Concurrent - - ################################################################### - - # @!macro atomic_fixnum_method_initialize - # - # Creates a new `AtomicFixnum` with the given initial value. - # - # @param [Fixnum] initial the initial value - # @raise [ArgumentError] if the initial value is not a `Fixnum` - - # @!macro atomic_fixnum_method_value_get - # - # Retrieves the current `Fixnum` value. - # - # @return [Fixnum] the current value - - # @!macro atomic_fixnum_method_value_set - # - # Explicitly sets the value. - # - # @param [Fixnum] value the new value to be set - # - # @return [Fixnum] the current value - # - # @raise [ArgumentError] if the new value is not a `Fixnum` - - # @!macro atomic_fixnum_method_increment - # - # Increases the current value by the given amount (defaults to 1). - # - # @param [Fixnum] delta the amount by which to increase the current value - # - # @return [Fixnum] the current value after incrementation - - # @!macro atomic_fixnum_method_decrement - # - # Decreases the current value by the given amount (defaults to 1). - # - # @param [Fixnum] delta the amount by which to decrease the current value - # - # @return [Fixnum] the current value after decrementation - - # @!macro atomic_fixnum_method_compare_and_set - # - # Atomically sets the value to the given updated value if the current - # value == the expected value. - # - # @param [Fixnum] expect the expected value - # @param [Fixnum] update the new value - # - # @return [Boolean] true if the value was updated else false - - # @!macro atomic_fixnum_method_update - # - # Pass the current value to the given block, replacing it - # with the block's result. May retry if the value changes - # during the block's execution. - # - # @yield [Object] Calculate a new value for the atomic reference using - # given (old) value - # @yieldparam [Object] old_value the starting value of the atomic reference - # - # @return [Object] the new value - - ################################################################### - - # @!macro atomic_fixnum_public_api - # - # @!method initialize(initial = 0) - # @!macro atomic_fixnum_method_initialize - # - # @!method value - # @!macro atomic_fixnum_method_value_get - # - # @!method value=(value) - # @!macro atomic_fixnum_method_value_set - # - # @!method increment(delta = 1) - # @!macro atomic_fixnum_method_increment - # - # @!method decrement(delta = 1) - # @!macro atomic_fixnum_method_decrement - # - # @!method compare_and_set(expect, update) - # @!macro atomic_fixnum_method_compare_and_set - # - # @!method update - # @!macro atomic_fixnum_method_update - - ################################################################### - - # @!visibility private - # @!macro internal_implementation_note - AtomicFixnumImplementation = case - when Concurrent.on_cruby? && Concurrent.c_extensions_loaded? - CAtomicFixnum - when Concurrent.on_jruby? - JavaAtomicFixnum - else - MutexAtomicFixnum - end - private_constant :AtomicFixnumImplementation - - # @!macro atomic_fixnum - # - # A numeric value that can be updated atomically. Reads and writes to an atomic - # fixnum and thread-safe and guaranteed to succeed. Reads and writes may block - # briefly but no explicit locking is required. - # - # @!macro thread_safe_variable_comparison - # - # Performance: - # - # ``` - # Testing with ruby 2.1.2 - # Testing with Concurrent::MutexAtomicFixnum... - # 3.130000 0.000000 3.130000 ( 3.136505) - # Testing with Concurrent::CAtomicFixnum... - # 0.790000 0.000000 0.790000 ( 0.785550) - # - # Testing with jruby 1.9.3 - # Testing with Concurrent::MutexAtomicFixnum... - # 5.460000 2.460000 7.920000 ( 3.715000) - # Testing with Concurrent::JavaAtomicFixnum... - # 4.520000 0.030000 4.550000 ( 1.187000) - # ``` - # - # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicLong.html java.util.concurrent.atomic.AtomicLong - # - # @!macro atomic_fixnum_public_api - class AtomicFixnum < AtomicFixnumImplementation - # @return [String] Short string representation. - def to_s - format '%s value:%s>', super[0..-2], value - end - - alias_method :inspect, :to_s - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/atomic_markable_reference.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/atomic_markable_reference.rb deleted file mode 100644 index e16be65772c96..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/atomic_markable_reference.rb +++ /dev/null @@ -1,167 +0,0 @@ -require 'concurrent/errors' -require 'concurrent/synchronization/object' - -module Concurrent - # An atomic reference which maintains an object reference along with a mark bit - # that can be updated atomically. - # - # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/atomic/AtomicMarkableReference.html - # java.util.concurrent.atomic.AtomicMarkableReference - class AtomicMarkableReference < ::Concurrent::Synchronization::Object - - attr_atomic(:reference) - private :reference, :reference=, :swap_reference, :compare_and_set_reference, :update_reference - - def initialize(value = nil, mark = false) - super() - self.reference = immutable_array(value, mark) - end - - # Atomically sets the value and mark to the given updated value and - # mark given both: - # - the current value == the expected value && - # - the current mark == the expected mark - # - # @param [Object] expected_val the expected value - # @param [Object] new_val the new value - # @param [Boolean] expected_mark the expected mark - # @param [Boolean] new_mark the new mark - # - # @return [Boolean] `true` if successful. A `false` return indicates - # that the actual value was not equal to the expected value or the - # actual mark was not equal to the expected mark - def compare_and_set(expected_val, new_val, expected_mark, new_mark) - # Memoize a valid reference to the current AtomicReference for - # later comparison. - current = reference - curr_val, curr_mark = current - - # Ensure that that the expected marks match. - return false unless expected_mark == curr_mark - - if expected_val.is_a? Numeric - # If the object is a numeric, we need to ensure we are comparing - # the numerical values - return false unless expected_val == curr_val - else - # Otherwise, we need to ensure we are comparing the object identity. - # Theoretically, this could be incorrect if a user monkey-patched - # `Object#equal?`, but they should know that they are playing with - # fire at that point. - return false unless expected_val.equal? curr_val - end - - prospect = immutable_array(new_val, new_mark) - - compare_and_set_reference current, prospect - end - - alias_method :compare_and_swap, :compare_and_set - - # Gets the current reference and marked values. - # - # @return [Array] the current reference and marked values - def get - reference - end - - # Gets the current value of the reference - # - # @return [Object] the current value of the reference - def value - reference[0] - end - - # Gets the current marked value - # - # @return [Boolean] the current marked value - def mark - reference[1] - end - - alias_method :marked?, :mark - - # _Unconditionally_ sets to the given value of both the reference and - # the mark. - # - # @param [Object] new_val the new value - # @param [Boolean] new_mark the new mark - # - # @return [Array] both the new value and the new mark - def set(new_val, new_mark) - self.reference = immutable_array(new_val, new_mark) - end - - # Pass the current value and marked state to the given block, replacing it - # with the block's results. May retry if the value changes during the - # block's execution. - # - # @yield [Object] Calculate a new value and marked state for the atomic - # reference using given (old) value and (old) marked - # @yieldparam [Object] old_val the starting value of the atomic reference - # @yieldparam [Boolean] old_mark the starting state of marked - # - # @return [Array] the new value and new mark - def update - loop do - old_val, old_mark = reference - new_val, new_mark = yield old_val, old_mark - - if compare_and_set old_val, new_val, old_mark, new_mark - return immutable_array(new_val, new_mark) - end - end - end - - # Pass the current value to the given block, replacing it - # with the block's result. Raise an exception if the update - # fails. - # - # @yield [Object] Calculate a new value and marked state for the atomic - # reference using given (old) value and (old) marked - # @yieldparam [Object] old_val the starting value of the atomic reference - # @yieldparam [Boolean] old_mark the starting state of marked - # - # @return [Array] the new value and marked state - # - # @raise [Concurrent::ConcurrentUpdateError] if the update fails - def try_update! - old_val, old_mark = reference - new_val, new_mark = yield old_val, old_mark - - unless compare_and_set old_val, new_val, old_mark, new_mark - fail ::Concurrent::ConcurrentUpdateError, - 'AtomicMarkableReference: Update failed due to race condition.', - 'Note: If you would like to guarantee an update, please use ' + - 'the `AtomicMarkableReference#update` method.' - end - - immutable_array(new_val, new_mark) - end - - # Pass the current value to the given block, replacing it with the - # block's result. Simply return nil if update fails. - # - # @yield [Object] Calculate a new value and marked state for the atomic - # reference using given (old) value and (old) marked - # @yieldparam [Object] old_val the starting value of the atomic reference - # @yieldparam [Boolean] old_mark the starting state of marked - # - # @return [Array] the new value and marked state, or nil if - # the update failed - def try_update - old_val, old_mark = reference - new_val, new_mark = yield old_val, old_mark - - return unless compare_and_set old_val, new_val, old_mark, new_mark - - immutable_array(new_val, new_mark) - end - - private - - def immutable_array(*args) - args.freeze - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/atomic_reference.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/atomic_reference.rb deleted file mode 100644 index bb5fb774598cd..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/atomic_reference.rb +++ /dev/null @@ -1,135 +0,0 @@ -require 'concurrent/utility/native_extension_loader' # load native parts first - -require 'concurrent/atomic_reference/atomic_direct_update' -require 'concurrent/atomic_reference/numeric_cas_wrapper' -require 'concurrent/atomic_reference/mutex_atomic' - -# Shim for TruffleRuby::AtomicReference -if Concurrent.on_truffleruby? && !defined?(TruffleRuby::AtomicReference) - # @!visibility private - module TruffleRuby - AtomicReference = Truffle::AtomicReference - end -end - -module Concurrent - - # @!macro internal_implementation_note - AtomicReferenceImplementation = case - when Concurrent.on_cruby? && Concurrent.c_extensions_loaded? - # @!visibility private - # @!macro internal_implementation_note - class CAtomicReference - include AtomicDirectUpdate - include AtomicNumericCompareAndSetWrapper - alias_method :compare_and_swap, :compare_and_set - end - CAtomicReference - when Concurrent.on_jruby? - # @!visibility private - # @!macro internal_implementation_note - class JavaAtomicReference - include AtomicDirectUpdate - end - JavaAtomicReference - when Concurrent.on_truffleruby? - class TruffleRubyAtomicReference < TruffleRuby::AtomicReference - include AtomicDirectUpdate - alias_method :value, :get - alias_method :value=, :set - alias_method :compare_and_swap, :compare_and_set - alias_method :swap, :get_and_set - end - TruffleRubyAtomicReference - else - MutexAtomicReference - end - private_constant :AtomicReferenceImplementation - - # An object reference that may be updated atomically. All read and write - # operations have java volatile semantic. - # - # @!macro thread_safe_variable_comparison - # - # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicReference.html - # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/package-summary.html - # - # @!method initialize(value = nil) - # @!macro atomic_reference_method_initialize - # @param [Object] value The initial value. - # - # @!method get - # @!macro atomic_reference_method_get - # Gets the current value. - # @return [Object] the current value - # - # @!method set(new_value) - # @!macro atomic_reference_method_set - # Sets to the given value. - # @param [Object] new_value the new value - # @return [Object] the new value - # - # @!method get_and_set(new_value) - # @!macro atomic_reference_method_get_and_set - # Atomically sets to the given value and returns the old value. - # @param [Object] new_value the new value - # @return [Object] the old value - # - # @!method compare_and_set(old_value, new_value) - # @!macro atomic_reference_method_compare_and_set - # - # Atomically sets the value to the given updated value if - # the current value == the expected value. - # - # @param [Object] old_value the expected value - # @param [Object] new_value the new value - # - # @return [Boolean] `true` if successful. A `false` return indicates - # that the actual value was not equal to the expected value. - # - # @!method update - # Pass the current value to the given block, replacing it - # with the block's result. May retry if the value changes - # during the block's execution. - # - # @yield [Object] Calculate a new value for the atomic reference using - # given (old) value - # @yieldparam [Object] old_value the starting value of the atomic reference - # @return [Object] the new value - # - # @!method try_update - # Pass the current value to the given block, replacing it - # with the block's result. Return nil if the update fails. - # - # @yield [Object] Calculate a new value for the atomic reference using - # given (old) value - # @yieldparam [Object] old_value the starting value of the atomic reference - # @note This method was altered to avoid raising an exception by default. - # Instead, this method now returns `nil` in case of failure. For more info, - # please see: https://github.com/ruby-concurrency/concurrent-ruby/pull/336 - # @return [Object] the new value, or nil if update failed - # - # @!method try_update! - # Pass the current value to the given block, replacing it - # with the block's result. Raise an exception if the update - # fails. - # - # @yield [Object] Calculate a new value for the atomic reference using - # given (old) value - # @yieldparam [Object] old_value the starting value of the atomic reference - # @note This behavior mimics the behavior of the original - # `AtomicReference#try_update` API. The reason this was changed was to - # avoid raising exceptions (which are inherently slow) by default. For more - # info: https://github.com/ruby-concurrency/concurrent-ruby/pull/336 - # @return [Object] the new value - # @raise [Concurrent::ConcurrentUpdateError] if the update fails - class AtomicReference < AtomicReferenceImplementation - - # @return [String] Short string representation. - def to_s - format '%s value:%s>', super[0..-2], get - end - - alias_method :inspect, :to_s - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/count_down_latch.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/count_down_latch.rb deleted file mode 100644 index d883aed6f251b..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/count_down_latch.rb +++ /dev/null @@ -1,100 +0,0 @@ -require 'concurrent/utility/engine' -require 'concurrent/atomic/mutex_count_down_latch' -require 'concurrent/atomic/java_count_down_latch' - -module Concurrent - - ################################################################### - - # @!macro count_down_latch_method_initialize - # - # Create a new `CountDownLatch` with the initial `count`. - # - # @param [new] count the initial count - # - # @raise [ArgumentError] if `count` is not an integer or is less than zero - - # @!macro count_down_latch_method_wait - # - # Block on the latch until the counter reaches zero or until `timeout` is reached. - # - # @param [Fixnum] timeout the number of seconds to wait for the counter or `nil` - # to block indefinitely - # @return [Boolean] `true` if the `count` reaches zero else false on `timeout` - - # @!macro count_down_latch_method_count_down - # - # Signal the latch to decrement the counter. Will signal all blocked threads when - # the `count` reaches zero. - - # @!macro count_down_latch_method_count - # - # The current value of the counter. - # - # @return [Fixnum] the current value of the counter - - ################################################################### - - # @!macro count_down_latch_public_api - # - # @!method initialize(count = 1) - # @!macro count_down_latch_method_initialize - # - # @!method wait(timeout = nil) - # @!macro count_down_latch_method_wait - # - # @!method count_down - # @!macro count_down_latch_method_count_down - # - # @!method count - # @!macro count_down_latch_method_count - - ################################################################### - - # @!visibility private - # @!macro internal_implementation_note - CountDownLatchImplementation = case - when Concurrent.on_jruby? - JavaCountDownLatch - else - MutexCountDownLatch - end - private_constant :CountDownLatchImplementation - - # @!macro count_down_latch - # - # A synchronization object that allows one thread to wait on multiple other threads. - # The thread that will wait creates a `CountDownLatch` and sets the initial value - # (normally equal to the number of other threads). The initiating thread passes the - # latch to the other threads then waits for the other threads by calling the `#wait` - # method. Each of the other threads calls `#count_down` when done with its work. - # When the latch counter reaches zero the waiting thread is unblocked and continues - # with its work. A `CountDownLatch` can be used only once. Its value cannot be reset. - # - # @!macro count_down_latch_public_api - # @example Waiter and Decrementer - # latch = Concurrent::CountDownLatch.new(3) - # - # waiter = Thread.new do - # latch.wait() - # puts ("Waiter released") - # end - # - # decrementer = Thread.new do - # sleep(1) - # latch.count_down - # puts latch.count - # - # sleep(1) - # latch.count_down - # puts latch.count - # - # sleep(1) - # latch.count_down - # puts latch.count - # end - # - # [waiter, decrementer].each(&:join) - class CountDownLatch < CountDownLatchImplementation - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/cyclic_barrier.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/cyclic_barrier.rb deleted file mode 100644 index 9ebe29dd09816..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/cyclic_barrier.rb +++ /dev/null @@ -1,128 +0,0 @@ -require 'concurrent/synchronization/lockable_object' -require 'concurrent/utility/native_integer' - -module Concurrent - - # A synchronization aid that allows a set of threads to all wait for each - # other to reach a common barrier point. - # @example - # barrier = Concurrent::CyclicBarrier.new(3) - # jobs = Array.new(3) { |i| -> { sleep i; p done: i } } - # process = -> (i) do - # # waiting to start at the same time - # barrier.wait - # # execute job - # jobs[i].call - # # wait for others to finish - # barrier.wait - # end - # threads = 2.times.map do |i| - # Thread.new(i, &process) - # end - # - # # use main as well - # process.call 2 - # - # # here we can be sure that all jobs are processed - class CyclicBarrier < Synchronization::LockableObject - - # @!visibility private - Generation = Struct.new(:status) - private_constant :Generation - - # Create a new `CyclicBarrier` that waits for `parties` threads - # - # @param [Fixnum] parties the number of parties - # @yield an optional block that will be executed that will be executed after - # the last thread arrives and before the others are released - # - # @raise [ArgumentError] if `parties` is not an integer or is less than zero - def initialize(parties, &block) - Utility::NativeInteger.ensure_integer_and_bounds parties - Utility::NativeInteger.ensure_positive_and_no_zero parties - - super(&nil) - synchronize { ns_initialize parties, &block } - end - - # @return [Fixnum] the number of threads needed to pass the barrier - def parties - synchronize { @parties } - end - - # @return [Fixnum] the number of threads currently waiting on the barrier - def number_waiting - synchronize { @number_waiting } - end - - # Blocks on the barrier until the number of waiting threads is equal to - # `parties` or until `timeout` is reached or `reset` is called - # If a block has been passed to the constructor, it will be executed once by - # the last arrived thread before releasing the others - # @param [Fixnum] timeout the number of seconds to wait for the counter or - # `nil` to block indefinitely - # @return [Boolean] `true` if the `count` reaches zero else false on - # `timeout` or on `reset` or if the barrier is broken - def wait(timeout = nil) - synchronize do - - return false unless @generation.status == :waiting - - @number_waiting += 1 - - if @number_waiting == @parties - @action.call if @action - ns_generation_done @generation, :fulfilled - true - else - generation = @generation - if ns_wait_until(timeout) { generation.status != :waiting } - generation.status == :fulfilled - else - ns_generation_done generation, :broken, false - false - end - end - end - end - - # resets the barrier to its initial state - # If there is at least one waiting thread, it will be woken up, the `wait` - # method will return false and the barrier will be broken - # If the barrier is broken, this method restores it to the original state - # - # @return [nil] - def reset - synchronize { ns_generation_done @generation, :reset } - end - - # A barrier can be broken when: - # - a thread called the `reset` method while at least one other thread was waiting - # - at least one thread timed out on `wait` method - # - # A broken barrier can be restored using `reset` it's safer to create a new one - # @return [Boolean] true if the barrier is broken otherwise false - def broken? - synchronize { @generation.status != :waiting } - end - - protected - - def ns_generation_done(generation, status, continue = true) - generation.status = status - ns_next_generation if continue - ns_broadcast - end - - def ns_next_generation - @generation = Generation.new(:waiting) - @number_waiting = 0 - end - - def ns_initialize(parties, &block) - @parties = parties - @action = block - ns_next_generation - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/event.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/event.rb deleted file mode 100644 index ccf84c9d1b8a1..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/event.rb +++ /dev/null @@ -1,109 +0,0 @@ -require 'thread' -require 'concurrent/synchronization/lockable_object' - -module Concurrent - - # Old school kernel-style event reminiscent of Win32 programming in C++. - # - # When an `Event` is created it is in the `unset` state. Threads can choose to - # `#wait` on the event, blocking until released by another thread. When one - # thread wants to alert all blocking threads it calls the `#set` method which - # will then wake up all listeners. Once an `Event` has been set it remains set. - # New threads calling `#wait` will return immediately. An `Event` may be - # `#reset` at any time once it has been set. - # - # @see http://msdn.microsoft.com/en-us/library/windows/desktop/ms682655.aspx - # @example - # event = Concurrent::Event.new - # - # t1 = Thread.new do - # puts "t1 is waiting" - # event.wait(1) - # puts "event occurred" - # end - # - # t2 = Thread.new do - # puts "t2 calling set" - # event.set - # end - # - # [t1, t2].each(&:join) - # - # # prints: - # # t1 is waiting - # # t2 calling set - # # event occurred - class Event < Synchronization::LockableObject - - # Creates a new `Event` in the unset state. Threads calling `#wait` on the - # `Event` will block. - def initialize - super - synchronize { ns_initialize } - end - - # Is the object in the set state? - # - # @return [Boolean] indicating whether or not the `Event` has been set - def set? - synchronize { @set } - end - - # Trigger the event, setting the state to `set` and releasing all threads - # waiting on the event. Has no effect if the `Event` has already been set. - # - # @return [Boolean] should always return `true` - def set - synchronize { ns_set } - end - - def try? - synchronize { @set ? false : ns_set } - end - - # Reset a previously set event back to the `unset` state. - # Has no effect if the `Event` has not yet been set. - # - # @return [Boolean] should always return `true` - def reset - synchronize do - if @set - @set = false - @iteration +=1 - end - true - end - end - - # Wait a given number of seconds for the `Event` to be set by another - # thread. Will wait forever when no `timeout` value is given. Returns - # immediately if the `Event` has already been set. - # - # @return [Boolean] true if the `Event` was set before timeout else false - def wait(timeout = nil) - synchronize do - unless @set - iteration = @iteration - ns_wait_until(timeout) { iteration < @iteration || @set } - else - true - end - end - end - - protected - - def ns_set - unless @set - @set = true - ns_broadcast - end - true - end - - def ns_initialize - @set = false - @iteration = 0 - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/fiber_local_var.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/fiber_local_var.rb deleted file mode 100644 index e90fc24f9ecb5..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/fiber_local_var.rb +++ /dev/null @@ -1,109 +0,0 @@ -require 'concurrent/constants' -require_relative 'locals' - -module Concurrent - - # A `FiberLocalVar` is a variable where the value is different for each fiber. - # Each variable may have a default value, but when you modify the variable only - # the current fiber will ever see that change. - # - # This is similar to Ruby's built-in fiber-local variables (`Thread.current[:name]`), - # but with these major advantages: - # * `FiberLocalVar` has its own identity, it doesn't need a Symbol. - # * Each Ruby's built-in fiber-local variable leaks some memory forever (it's a Symbol held forever on the fiber), - # so it's only OK to create a small amount of them. - # `FiberLocalVar` has no such issue and it is fine to create many of them. - # * Ruby's built-in fiber-local variables leak forever the value set on each fiber (unless set to nil explicitly). - # `FiberLocalVar` automatically removes the mapping for each fiber once the `FiberLocalVar` instance is GC'd. - # - # @example - # v = FiberLocalVar.new(14) - # v.value #=> 14 - # v.value = 2 - # v.value #=> 2 - # - # @example - # v = FiberLocalVar.new(14) - # - # Fiber.new do - # v.value #=> 14 - # v.value = 1 - # v.value #=> 1 - # end.resume - # - # Fiber.new do - # v.value #=> 14 - # v.value = 2 - # v.value #=> 2 - # end.resume - # - # v.value #=> 14 - class FiberLocalVar - LOCALS = FiberLocals.new - - # Creates a fiber local variable. - # - # @param [Object] default the default value when otherwise unset - # @param [Proc] default_block Optional block that gets called to obtain the - # default value for each fiber - def initialize(default = nil, &default_block) - if default && block_given? - raise ArgumentError, "Cannot use both value and block as default value" - end - - if block_given? - @default_block = default_block - @default = nil - else - @default_block = nil - @default = default - end - - @index = LOCALS.next_index(self) - end - - # Returns the value in the current fiber's copy of this fiber-local variable. - # - # @return [Object] the current value - def value - LOCALS.fetch(@index) { default } - end - - # Sets the current fiber's copy of this fiber-local variable to the specified value. - # - # @param [Object] value the value to set - # @return [Object] the new value - def value=(value) - LOCALS.set(@index, value) - end - - # Bind the given value to fiber local storage during - # execution of the given block. - # - # @param [Object] value the value to bind - # @yield the operation to be performed with the bound variable - # @return [Object] the value - def bind(value) - if block_given? - old_value = self.value - self.value = value - begin - yield - ensure - self.value = old_value - end - end - end - - protected - - # @!visibility private - def default - if @default_block - self.value = @default_block.call - else - @default - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/java_count_down_latch.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/java_count_down_latch.rb deleted file mode 100644 index 3c119bc32c2c4..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/java_count_down_latch.rb +++ /dev/null @@ -1,43 +0,0 @@ -if Concurrent.on_jruby? - require 'concurrent/utility/native_extension_loader' - - module Concurrent - - # @!macro count_down_latch - # @!visibility private - # @!macro internal_implementation_note - class JavaCountDownLatch - - # @!macro count_down_latch_method_initialize - def initialize(count = 1) - Utility::NativeInteger.ensure_integer_and_bounds(count) - Utility::NativeInteger.ensure_positive(count) - @latch = java.util.concurrent.CountDownLatch.new(count) - end - - # @!macro count_down_latch_method_wait - def wait(timeout = nil) - result = nil - if timeout.nil? - Synchronization::JRuby.sleep_interruptibly { @latch.await } - result = true - else - Synchronization::JRuby.sleep_interruptibly do - result = @latch.await(1000 * timeout, java.util.concurrent.TimeUnit::MILLISECONDS) - end - end - result - end - - # @!macro count_down_latch_method_count_down - def count_down - @latch.countDown - end - - # @!macro count_down_latch_method_count - def count - @latch.getCount - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/locals.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/locals.rb deleted file mode 100644 index 0a276aedd5251..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/locals.rb +++ /dev/null @@ -1,189 +0,0 @@ -require 'fiber' -require 'concurrent/utility/engine' -require 'concurrent/constants' - -module Concurrent - # @!visibility private - # @!macro internal_implementation_note - # - # An abstract implementation of local storage, with sub-classes for - # per-thread and per-fiber locals. - # - # Each execution context (EC, thread or fiber) has a lazily initialized array - # of local variable values. Each time a new local variable is created, we - # allocate an "index" for it. - # - # For example, if the allocated index is 1, that means slot #1 in EVERY EC's - # locals array will be used for the value of that variable. - # - # The good thing about using a per-EC structure to hold values, rather than - # a global, is that no synchronization is needed when reading and writing - # those values (since the structure is only ever accessed by a single - # thread). - # - # Of course, when a local variable is GC'd, 1) we need to recover its index - # for use by other new local variables (otherwise the locals arrays could - # get bigger and bigger with time), and 2) we need to null out all the - # references held in the now-unused slots (both to avoid blocking GC of those - # objects, and also to prevent "stale" values from being passed on to a new - # local when the index is reused). - # - # Because we need to null out freed slots, we need to keep references to - # ALL the locals arrays, so we can null out the appropriate slots in all of - # them. This is why we need to use a finalizer to clean up the locals array - # when the EC goes out of scope. - class AbstractLocals - def initialize - @free = [] - @lock = Mutex.new - @all_arrays = {} - @next = 0 - end - - def synchronize - @lock.synchronize { yield } - end - - if Concurrent.on_cruby? - def weak_synchronize - yield - end - else - alias_method :weak_synchronize, :synchronize - end - - def next_index(local) - index = synchronize do - if @free.empty? - @next += 1 - else - @free.pop - end - end - - # When the local goes out of scope, we should free the associated index - # and all values stored into it. - ObjectSpace.define_finalizer(local, local_finalizer(index)) - - index - end - - def free_index(index) - weak_synchronize do - # The cost of GC'ing a TLV is linear in the number of ECs using local - # variables. But that is natural! More ECs means more storage is used - # per local variable. So naturally more CPU time is required to free - # more storage. - # - # DO NOT use each_value which might conflict with new pair assignment - # into the hash in #set method. - @all_arrays.values.each do |locals| - locals[index] = nil - end - - # free index has to be published after the arrays are cleared: - @free << index - end - end - - def fetch(index) - locals = self.locals - value = locals ? locals[index] : nil - - if nil == value - yield - elsif NULL.equal?(value) - nil - else - value - end - end - - def set(index, value) - locals = self.locals! - locals[index] = (nil == value ? NULL : value) - - value - end - - private - - # When the local goes out of scope, clean up that slot across all locals currently assigned. - def local_finalizer(index) - proc do - free_index(index) - end - end - - # When a thread/fiber goes out of scope, remove the array from @all_arrays. - def thread_fiber_finalizer(array_object_id) - proc do - weak_synchronize do - @all_arrays.delete(array_object_id) - end - end - end - - # Returns the locals for the current scope, or nil if none exist. - def locals - raise NotImplementedError - end - - # Returns the locals for the current scope, creating them if necessary. - def locals! - raise NotImplementedError - end - end - - # @!visibility private - # @!macro internal_implementation_note - # An array-backed storage of indexed variables per thread. - class ThreadLocals < AbstractLocals - def locals - Thread.current.thread_variable_get(:concurrent_thread_locals) - end - - def locals! - thread = Thread.current - locals = thread.thread_variable_get(:concurrent_thread_locals) - - unless locals - locals = thread.thread_variable_set(:concurrent_thread_locals, []) - weak_synchronize do - @all_arrays[locals.object_id] = locals - end - # When the thread goes out of scope, we should delete the associated locals: - ObjectSpace.define_finalizer(thread, thread_fiber_finalizer(locals.object_id)) - end - - locals - end - end - - # @!visibility private - # @!macro internal_implementation_note - # An array-backed storage of indexed variables per fiber. - class FiberLocals < AbstractLocals - def locals - Thread.current[:concurrent_fiber_locals] - end - - def locals! - thread = Thread.current - locals = thread[:concurrent_fiber_locals] - - unless locals - locals = thread[:concurrent_fiber_locals] = [] - weak_synchronize do - @all_arrays[locals.object_id] = locals - end - # When the fiber goes out of scope, we should delete the associated locals: - ObjectSpace.define_finalizer(Fiber.current, thread_fiber_finalizer(locals.object_id)) - end - - locals - end - end - - private_constant :AbstractLocals, :ThreadLocals, :FiberLocals -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/lock_local_var.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/lock_local_var.rb deleted file mode 100644 index ebf23a2414093..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/lock_local_var.rb +++ /dev/null @@ -1,28 +0,0 @@ -require 'concurrent/utility/engine' -require_relative 'fiber_local_var' -require_relative 'thread_local_var' - -module Concurrent - # @!visibility private - def self.mutex_owned_per_thread? - return false if Concurrent.on_jruby? || Concurrent.on_truffleruby? - - mutex = Mutex.new - # Lock the mutex: - mutex.synchronize do - # Check if the mutex is still owned in a child fiber: - Fiber.new { mutex.owned? }.resume - end - end - - if mutex_owned_per_thread? - LockLocalVar = ThreadLocalVar - else - LockLocalVar = FiberLocalVar - end - - # Either {FiberLocalVar} or {ThreadLocalVar} depending on whether Mutex (and Monitor) - # are held, respectively, per Fiber or per Thread. - class LockLocalVar - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_boolean.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_boolean.rb deleted file mode 100644 index 015996b06f04c..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_boolean.rb +++ /dev/null @@ -1,68 +0,0 @@ -require 'concurrent/synchronization/safe_initialization' - -module Concurrent - - # @!macro atomic_boolean - # @!visibility private - # @!macro internal_implementation_note - class MutexAtomicBoolean - extend Concurrent::Synchronization::SafeInitialization - - # @!macro atomic_boolean_method_initialize - def initialize(initial = false) - super() - @Lock = ::Mutex.new - @value = !!initial - end - - # @!macro atomic_boolean_method_value_get - def value - synchronize { @value } - end - - # @!macro atomic_boolean_method_value_set - def value=(value) - synchronize { @value = !!value } - end - - # @!macro atomic_boolean_method_true_question - def true? - synchronize { @value } - end - - # @!macro atomic_boolean_method_false_question - def false? - synchronize { !@value } - end - - # @!macro atomic_boolean_method_make_true - def make_true - synchronize { ns_make_value(true) } - end - - # @!macro atomic_boolean_method_make_false - def make_false - synchronize { ns_make_value(false) } - end - - protected - - # @!visibility private - def synchronize - if @Lock.owned? - yield - else - @Lock.synchronize { yield } - end - end - - private - - # @!visibility private - def ns_make_value(value) - old = @value - @value = value - old != @value - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_fixnum.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_fixnum.rb deleted file mode 100644 index 0ca395579fe09..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_fixnum.rb +++ /dev/null @@ -1,81 +0,0 @@ -require 'concurrent/synchronization/safe_initialization' -require 'concurrent/utility/native_integer' - -module Concurrent - - # @!macro atomic_fixnum - # @!visibility private - # @!macro internal_implementation_note - class MutexAtomicFixnum - extend Concurrent::Synchronization::SafeInitialization - - # @!macro atomic_fixnum_method_initialize - def initialize(initial = 0) - super() - @Lock = ::Mutex.new - ns_set(initial) - end - - # @!macro atomic_fixnum_method_value_get - def value - synchronize { @value } - end - - # @!macro atomic_fixnum_method_value_set - def value=(value) - synchronize { ns_set(value) } - end - - # @!macro atomic_fixnum_method_increment - def increment(delta = 1) - synchronize { ns_set(@value + delta.to_i) } - end - - alias_method :up, :increment - - # @!macro atomic_fixnum_method_decrement - def decrement(delta = 1) - synchronize { ns_set(@value - delta.to_i) } - end - - alias_method :down, :decrement - - # @!macro atomic_fixnum_method_compare_and_set - def compare_and_set(expect, update) - synchronize do - if @value == expect.to_i - @value = update.to_i - true - else - false - end - end - end - - # @!macro atomic_fixnum_method_update - def update - synchronize do - @value = yield @value - end - end - - protected - - # @!visibility private - def synchronize - if @Lock.owned? - yield - else - @Lock.synchronize { yield } - end - end - - private - - # @!visibility private - def ns_set(value) - Utility::NativeInteger.ensure_integer_and_bounds value - @value = value - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/mutex_count_down_latch.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/mutex_count_down_latch.rb deleted file mode 100644 index 29aa1caa4f296..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/mutex_count_down_latch.rb +++ /dev/null @@ -1,44 +0,0 @@ -require 'concurrent/synchronization/lockable_object' -require 'concurrent/utility/native_integer' - -module Concurrent - - # @!macro count_down_latch - # @!visibility private - # @!macro internal_implementation_note - class MutexCountDownLatch < Synchronization::LockableObject - - # @!macro count_down_latch_method_initialize - def initialize(count = 1) - Utility::NativeInteger.ensure_integer_and_bounds count - Utility::NativeInteger.ensure_positive count - - super() - synchronize { ns_initialize count } - end - - # @!macro count_down_latch_method_wait - def wait(timeout = nil) - synchronize { ns_wait_until(timeout) { @count == 0 } } - end - - # @!macro count_down_latch_method_count_down - def count_down - synchronize do - @count -= 1 if @count > 0 - ns_broadcast if @count == 0 - end - end - - # @!macro count_down_latch_method_count - def count - synchronize { @count } - end - - protected - - def ns_initialize(count) - @count = count - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/mutex_semaphore.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/mutex_semaphore.rb deleted file mode 100644 index 4347289f1ed10..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/mutex_semaphore.rb +++ /dev/null @@ -1,131 +0,0 @@ -require 'concurrent/synchronization/lockable_object' -require 'concurrent/utility/native_integer' - -module Concurrent - - # @!macro semaphore - # @!visibility private - # @!macro internal_implementation_note - class MutexSemaphore < Synchronization::LockableObject - - # @!macro semaphore_method_initialize - def initialize(count) - Utility::NativeInteger.ensure_integer_and_bounds count - - super() - synchronize { ns_initialize count } - end - - # @!macro semaphore_method_acquire - def acquire(permits = 1) - Utility::NativeInteger.ensure_integer_and_bounds permits - Utility::NativeInteger.ensure_positive permits - - synchronize do - try_acquire_timed(permits, nil) - end - - return unless block_given? - - begin - yield - ensure - release(permits) - end - end - - # @!macro semaphore_method_available_permits - def available_permits - synchronize { @free } - end - - # @!macro semaphore_method_drain_permits - # - # Acquires and returns all permits that are immediately available. - # - # @return [Integer] - def drain_permits - synchronize do - @free.tap { |_| @free = 0 } - end - end - - # @!macro semaphore_method_try_acquire - def try_acquire(permits = 1, timeout = nil) - Utility::NativeInteger.ensure_integer_and_bounds permits - Utility::NativeInteger.ensure_positive permits - - acquired = synchronize do - if timeout.nil? - try_acquire_now(permits) - else - try_acquire_timed(permits, timeout) - end - end - - return acquired unless block_given? - return unless acquired - - begin - yield - ensure - release(permits) - end - end - - # @!macro semaphore_method_release - def release(permits = 1) - Utility::NativeInteger.ensure_integer_and_bounds permits - Utility::NativeInteger.ensure_positive permits - - synchronize do - @free += permits - permits.times { ns_signal } - end - nil - end - - # Shrinks the number of available permits by the indicated reduction. - # - # @param [Fixnum] reduction Number of permits to remove. - # - # @raise [ArgumentError] if `reduction` is not an integer or is negative - # - # @raise [ArgumentError] if `@free` - `@reduction` is less than zero - # - # @return [nil] - # - # @!visibility private - def reduce_permits(reduction) - Utility::NativeInteger.ensure_integer_and_bounds reduction - Utility::NativeInteger.ensure_positive reduction - - synchronize { @free -= reduction } - nil - end - - protected - - # @!visibility private - def ns_initialize(count) - @free = count - end - - private - - # @!visibility private - def try_acquire_now(permits) - if @free >= permits - @free -= permits - true - else - false - end - end - - # @!visibility private - def try_acquire_timed(permits, timeout) - ns_wait_until(timeout) { try_acquire_now(permits) } - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/read_write_lock.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/read_write_lock.rb deleted file mode 100644 index b26bd17a089ce..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/read_write_lock.rb +++ /dev/null @@ -1,255 +0,0 @@ -require 'thread' -require 'concurrent/atomic/atomic_fixnum' -require 'concurrent/errors' -require 'concurrent/synchronization/object' -require 'concurrent/synchronization/lock' - -module Concurrent - - # Ruby read-write lock implementation - # - # Allows any number of concurrent readers, but only one concurrent writer - # (And if the "write" lock is taken, any readers who come along will have to wait) - # - # If readers are already active when a writer comes along, the writer will wait for - # all the readers to finish before going ahead. - # Any additional readers that come when the writer is already waiting, will also - # wait (so writers are not starved). - # - # This implementation is based on `java.util.concurrent.ReentrantReadWriteLock`. - # - # @example - # lock = Concurrent::ReadWriteLock.new - # lock.with_read_lock { data.retrieve } - # lock.with_write_lock { data.modify! } - # - # @note Do **not** try to acquire the write lock while already holding a read lock - # **or** try to acquire the write lock while you already have it. - # This will lead to deadlock - # - # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.html java.util.concurrent.ReentrantReadWriteLock - class ReadWriteLock < Synchronization::Object - - # @!visibility private - WAITING_WRITER = 1 << 15 - - # @!visibility private - RUNNING_WRITER = 1 << 29 - - # @!visibility private - MAX_READERS = WAITING_WRITER - 1 - - # @!visibility private - MAX_WRITERS = RUNNING_WRITER - MAX_READERS - 1 - - safe_initialization! - - # Implementation notes: - # A goal is to make the uncontended path for both readers/writers lock-free - # Only if there is reader-writer or writer-writer contention, should locks be used - # Internal state is represented by a single integer ("counter"), and updated - # using atomic compare-and-swap operations - # When the counter is 0, the lock is free - # Each reader increments the counter by 1 when acquiring a read lock - # (and decrements by 1 when releasing the read lock) - # The counter is increased by (1 << 15) for each writer waiting to acquire the - # write lock, and by (1 << 29) if the write lock is taken - - # Create a new `ReadWriteLock` in the unlocked state. - def initialize - super() - @Counter = AtomicFixnum.new(0) # single integer which represents lock state - @ReadLock = Synchronization::Lock.new - @WriteLock = Synchronization::Lock.new - end - - # Execute a block operation within a read lock. - # - # @yield the task to be performed within the lock. - # - # @return [Object] the result of the block operation. - # - # @raise [ArgumentError] when no block is given. - # @raise [Concurrent::ResourceLimitError] if the maximum number of readers - # is exceeded. - def with_read_lock - raise ArgumentError.new('no block given') unless block_given? - acquire_read_lock - begin - yield - ensure - release_read_lock - end - end - - # Execute a block operation within a write lock. - # - # @yield the task to be performed within the lock. - # - # @return [Object] the result of the block operation. - # - # @raise [ArgumentError] when no block is given. - # @raise [Concurrent::ResourceLimitError] if the maximum number of readers - # is exceeded. - def with_write_lock - raise ArgumentError.new('no block given') unless block_given? - acquire_write_lock - begin - yield - ensure - release_write_lock - end - end - - # Acquire a read lock. If a write lock has been acquired will block until - # it is released. Will not block if other read locks have been acquired. - # - # @return [Boolean] true if the lock is successfully acquired - # - # @raise [Concurrent::ResourceLimitError] if the maximum number of readers - # is exceeded. - def acquire_read_lock - while true - c = @Counter.value - raise ResourceLimitError.new('Too many reader threads') if max_readers?(c) - - # If a writer is waiting when we first queue up, we need to wait - if waiting_writer?(c) - @ReadLock.wait_until { !waiting_writer? } - - # after a reader has waited once, they are allowed to "barge" ahead of waiting writers - # but if a writer is *running*, the reader still needs to wait (naturally) - while true - c = @Counter.value - if running_writer?(c) - @ReadLock.wait_until { !running_writer? } - else - return if @Counter.compare_and_set(c, c+1) - end - end - else - break if @Counter.compare_and_set(c, c+1) - end - end - true - end - - # Release a previously acquired read lock. - # - # @return [Boolean] true if the lock is successfully released - def release_read_lock - while true - c = @Counter.value - if @Counter.compare_and_set(c, c-1) - # If one or more writers were waiting, and we were the last reader, wake a writer up - if waiting_writer?(c) && running_readers(c) == 1 - @WriteLock.signal - end - break - end - end - true - end - - # Acquire a write lock. Will block and wait for all active readers and writers. - # - # @return [Boolean] true if the lock is successfully acquired - # - # @raise [Concurrent::ResourceLimitError] if the maximum number of writers - # is exceeded. - def acquire_write_lock - while true - c = @Counter.value - raise ResourceLimitError.new('Too many writer threads') if max_writers?(c) - - if c == 0 # no readers OR writers running - # if we successfully swap the RUNNING_WRITER bit on, then we can go ahead - break if @Counter.compare_and_set(0, RUNNING_WRITER) - elsif @Counter.compare_and_set(c, c+WAITING_WRITER) - while true - # Now we have successfully incremented, so no more readers will be able to increment - # (they will wait instead) - # However, readers OR writers could decrement right here, OR another writer could increment - @WriteLock.wait_until do - # So we have to do another check inside the synchronized section - # If a writer OR reader is running, then go to sleep - c = @Counter.value - !running_writer?(c) && !running_readers?(c) - end - - # We just came out of a wait - # If we successfully turn the RUNNING_WRITER bit on with an atomic swap, - # Then we are OK to stop waiting and go ahead - # Otherwise go back and wait again - c = @Counter.value - break if !running_writer?(c) && !running_readers?(c) && @Counter.compare_and_set(c, c+RUNNING_WRITER-WAITING_WRITER) - end - break - end - end - true - end - - # Release a previously acquired write lock. - # - # @return [Boolean] true if the lock is successfully released - def release_write_lock - return true unless running_writer? - c = @Counter.update { |counter| counter - RUNNING_WRITER } - @ReadLock.broadcast - @WriteLock.signal if waiting_writers(c) > 0 - true - end - - # Queries if the write lock is held by any thread. - # - # @return [Boolean] true if the write lock is held else false` - def write_locked? - @Counter.value >= RUNNING_WRITER - end - - # Queries whether any threads are waiting to acquire the read or write lock. - # - # @return [Boolean] true if any threads are waiting for a lock else false - def has_waiters? - waiting_writer?(@Counter.value) - end - - private - - # @!visibility private - def running_readers(c = @Counter.value) - c & MAX_READERS - end - - # @!visibility private - def running_readers?(c = @Counter.value) - (c & MAX_READERS) > 0 - end - - # @!visibility private - def running_writer?(c = @Counter.value) - c >= RUNNING_WRITER - end - - # @!visibility private - def waiting_writers(c = @Counter.value) - (c & MAX_WRITERS) / WAITING_WRITER - end - - # @!visibility private - def waiting_writer?(c = @Counter.value) - c >= WAITING_WRITER - end - - # @!visibility private - def max_readers?(c = @Counter.value) - (c & MAX_READERS) == MAX_READERS - end - - # @!visibility private - def max_writers?(c = @Counter.value) - (c & MAX_WRITERS) == MAX_WRITERS - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/reentrant_read_write_lock.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/reentrant_read_write_lock.rb deleted file mode 100644 index 6d72a3a097831..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/reentrant_read_write_lock.rb +++ /dev/null @@ -1,379 +0,0 @@ -require 'thread' -require 'concurrent/atomic/atomic_reference' -require 'concurrent/atomic/atomic_fixnum' -require 'concurrent/errors' -require 'concurrent/synchronization/object' -require 'concurrent/synchronization/lock' -require 'concurrent/atomic/lock_local_var' - -module Concurrent - - # Re-entrant read-write lock implementation - # - # Allows any number of concurrent readers, but only one concurrent writer - # (And while the "write" lock is taken, no read locks can be obtained either. - # Hence, the write lock can also be called an "exclusive" lock.) - # - # If another thread has taken a read lock, any thread which wants a write lock - # will block until all the readers release their locks. However, once a thread - # starts waiting to obtain a write lock, any additional readers that come along - # will also wait (so writers are not starved). - # - # A thread can acquire both a read and write lock at the same time. A thread can - # also acquire a read lock OR a write lock more than once. Only when the read (or - # write) lock is released as many times as it was acquired, will the thread - # actually let it go, allowing other threads which might have been waiting - # to proceed. Therefore the lock can be upgraded by first acquiring - # read lock and then write lock and that the lock can be downgraded by first - # having both read and write lock a releasing just the write lock. - # - # If both read and write locks are acquired by the same thread, it is not strictly - # necessary to release them in the same order they were acquired. In other words, - # the following code is legal: - # - # @example - # lock = Concurrent::ReentrantReadWriteLock.new - # lock.acquire_write_lock - # lock.acquire_read_lock - # lock.release_write_lock - # # At this point, the current thread is holding only a read lock, not a write - # # lock. So other threads can take read locks, but not a write lock. - # lock.release_read_lock - # # Now the current thread is not holding either a read or write lock, so - # # another thread could potentially acquire a write lock. - # - # This implementation was inspired by `java.util.concurrent.ReentrantReadWriteLock`. - # - # @example - # lock = Concurrent::ReentrantReadWriteLock.new - # lock.with_read_lock { data.retrieve } - # lock.with_write_lock { data.modify! } - # - # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.html java.util.concurrent.ReentrantReadWriteLock - class ReentrantReadWriteLock < Synchronization::Object - - # Implementation notes: - # - # A goal is to make the uncontended path for both readers/writers mutex-free - # Only if there is reader-writer or writer-writer contention, should mutexes be used - # Otherwise, a single CAS operation is all we need to acquire/release a lock - # - # Internal state is represented by a single integer ("counter"), and updated - # using atomic compare-and-swap operations - # When the counter is 0, the lock is free - # Each thread which has one OR MORE read locks increments the counter by 1 - # (and decrements by 1 when releasing the read lock) - # The counter is increased by (1 << 15) for each writer waiting to acquire the - # write lock, and by (1 << 29) if the write lock is taken - # - # Additionally, each thread uses a thread-local variable to count how many times - # it has acquired a read lock, AND how many times it has acquired a write lock. - # It uses a similar trick; an increment of 1 means a read lock was taken, and - # an increment of (1 << 15) means a write lock was taken - # This is what makes re-entrancy possible - # - # 2 rules are followed to ensure good liveness properties: - # 1) Once a writer has queued up and is waiting for a write lock, no other thread - # can take a lock without waiting - # 2) When a write lock is released, readers are given the "first chance" to wake - # up and acquire a read lock - # Following these rules means readers and writers tend to "take turns", so neither - # can starve the other, even under heavy contention - - # @!visibility private - READER_BITS = 15 - # @!visibility private - WRITER_BITS = 14 - - # Used with @Counter: - # @!visibility private - WAITING_WRITER = 1 << READER_BITS - # @!visibility private - RUNNING_WRITER = 1 << (READER_BITS + WRITER_BITS) - # @!visibility private - MAX_READERS = WAITING_WRITER - 1 - # @!visibility private - MAX_WRITERS = RUNNING_WRITER - MAX_READERS - 1 - - # Used with @HeldCount: - # @!visibility private - WRITE_LOCK_HELD = 1 << READER_BITS - # @!visibility private - READ_LOCK_MASK = WRITE_LOCK_HELD - 1 - # @!visibility private - WRITE_LOCK_MASK = MAX_WRITERS - - safe_initialization! - - # Create a new `ReentrantReadWriteLock` in the unlocked state. - def initialize - super() - @Counter = AtomicFixnum.new(0) # single integer which represents lock state - @ReadQueue = Synchronization::Lock.new # used to queue waiting readers - @WriteQueue = Synchronization::Lock.new # used to queue waiting writers - @HeldCount = LockLocalVar.new(0) # indicates # of R & W locks held by this thread - end - - # Execute a block operation within a read lock. - # - # @yield the task to be performed within the lock. - # - # @return [Object] the result of the block operation. - # - # @raise [ArgumentError] when no block is given. - # @raise [Concurrent::ResourceLimitError] if the maximum number of readers - # is exceeded. - def with_read_lock - raise ArgumentError.new('no block given') unless block_given? - acquire_read_lock - begin - yield - ensure - release_read_lock - end - end - - # Execute a block operation within a write lock. - # - # @yield the task to be performed within the lock. - # - # @return [Object] the result of the block operation. - # - # @raise [ArgumentError] when no block is given. - # @raise [Concurrent::ResourceLimitError] if the maximum number of readers - # is exceeded. - def with_write_lock - raise ArgumentError.new('no block given') unless block_given? - acquire_write_lock - begin - yield - ensure - release_write_lock - end - end - - # Acquire a read lock. If a write lock is held by another thread, will block - # until it is released. - # - # @return [Boolean] true if the lock is successfully acquired - # - # @raise [Concurrent::ResourceLimitError] if the maximum number of readers - # is exceeded. - def acquire_read_lock - if (held = @HeldCount.value) > 0 - # If we already have a lock, there's no need to wait - if held & READ_LOCK_MASK == 0 - # But we do need to update the counter, if we were holding a write - # lock but not a read lock - @Counter.update { |c| c + 1 } - end - @HeldCount.value = held + 1 - return true - end - - while true - c = @Counter.value - raise ResourceLimitError.new('Too many reader threads') if max_readers?(c) - - # If a writer is waiting OR running when we first queue up, we need to wait - if waiting_or_running_writer?(c) - # Before going to sleep, check again with the ReadQueue mutex held - @ReadQueue.synchronize do - @ReadQueue.ns_wait if waiting_or_running_writer? - end - # Note: the above 'synchronize' block could have used #wait_until, - # but that waits repeatedly in a loop, checking the wait condition - # each time it wakes up (to protect against spurious wakeups) - # But we are already in a loop, which is only broken when we successfully - # acquire the lock! So we don't care about spurious wakeups, and would - # rather not pay the extra overhead of using #wait_until - - # After a reader has waited once, they are allowed to "barge" ahead of waiting writers - # But if a writer is *running*, the reader still needs to wait (naturally) - while true - c = @Counter.value - if running_writer?(c) - @ReadQueue.synchronize do - @ReadQueue.ns_wait if running_writer? - end - elsif @Counter.compare_and_set(c, c+1) - @HeldCount.value = held + 1 - return true - end - end - elsif @Counter.compare_and_set(c, c+1) - @HeldCount.value = held + 1 - return true - end - end - end - - # Try to acquire a read lock and return true if we succeed. If it cannot be - # acquired immediately, return false. - # - # @return [Boolean] true if the lock is successfully acquired - def try_read_lock - if (held = @HeldCount.value) > 0 - if held & READ_LOCK_MASK == 0 - # If we hold a write lock, but not a read lock... - @Counter.update { |c| c + 1 } - end - @HeldCount.value = held + 1 - return true - else - c = @Counter.value - if !waiting_or_running_writer?(c) && @Counter.compare_and_set(c, c+1) - @HeldCount.value = held + 1 - return true - end - end - false - end - - # Release a previously acquired read lock. - # - # @return [Boolean] true if the lock is successfully released - def release_read_lock - held = @HeldCount.value = @HeldCount.value - 1 - rlocks_held = held & READ_LOCK_MASK - if rlocks_held == 0 - c = @Counter.update { |counter| counter - 1 } - # If one or more writers were waiting, and we were the last reader, wake a writer up - if waiting_or_running_writer?(c) && running_readers(c) == 0 - @WriteQueue.signal - end - elsif rlocks_held == READ_LOCK_MASK - raise IllegalOperationError, "Cannot release a read lock which is not held" - end - true - end - - # Acquire a write lock. Will block and wait for all active readers and writers. - # - # @return [Boolean] true if the lock is successfully acquired - # - # @raise [Concurrent::ResourceLimitError] if the maximum number of writers - # is exceeded. - def acquire_write_lock - if (held = @HeldCount.value) >= WRITE_LOCK_HELD - # if we already have a write (exclusive) lock, there's no need to wait - @HeldCount.value = held + WRITE_LOCK_HELD - return true - end - - while true - c = @Counter.value - raise ResourceLimitError.new('Too many writer threads') if max_writers?(c) - - # To go ahead and take the lock without waiting, there must be no writer - # running right now, AND no writers who came before us still waiting to - # acquire the lock - # Additionally, if any read locks have been taken, we must hold all of them - if held > 0 && @Counter.compare_and_set(1, c+RUNNING_WRITER) - # If we are the only one reader and successfully swap the RUNNING_WRITER bit on, then we can go ahead - @HeldCount.value = held + WRITE_LOCK_HELD - return true - elsif @Counter.compare_and_set(c, c+WAITING_WRITER) - while true - # Now we have successfully incremented, so no more readers will be able to increment - # (they will wait instead) - # However, readers OR writers could decrement right here - @WriteQueue.synchronize do - # So we have to do another check inside the synchronized section - # If a writer OR another reader is running, then go to sleep - c = @Counter.value - @WriteQueue.ns_wait if running_writer?(c) || running_readers(c) != held - end - # Note: if you are thinking of replacing the above 'synchronize' block - # with #wait_until, read the comment in #acquire_read_lock first! - - # We just came out of a wait - # If we successfully turn the RUNNING_WRITER bit on with an atomic swap, - # then we are OK to stop waiting and go ahead - # Otherwise go back and wait again - c = @Counter.value - if !running_writer?(c) && - running_readers(c) == held && - @Counter.compare_and_set(c, c+RUNNING_WRITER-WAITING_WRITER) - @HeldCount.value = held + WRITE_LOCK_HELD - return true - end - end - end - end - end - - # Try to acquire a write lock and return true if we succeed. If it cannot be - # acquired immediately, return false. - # - # @return [Boolean] true if the lock is successfully acquired - def try_write_lock - if (held = @HeldCount.value) >= WRITE_LOCK_HELD - @HeldCount.value = held + WRITE_LOCK_HELD - return true - else - c = @Counter.value - if !waiting_or_running_writer?(c) && - running_readers(c) == held && - @Counter.compare_and_set(c, c+RUNNING_WRITER) - @HeldCount.value = held + WRITE_LOCK_HELD - return true - end - end - false - end - - # Release a previously acquired write lock. - # - # @return [Boolean] true if the lock is successfully released - def release_write_lock - held = @HeldCount.value = @HeldCount.value - WRITE_LOCK_HELD - wlocks_held = held & WRITE_LOCK_MASK - if wlocks_held == 0 - c = @Counter.update { |counter| counter - RUNNING_WRITER } - @ReadQueue.broadcast - @WriteQueue.signal if waiting_writers(c) > 0 - elsif wlocks_held == WRITE_LOCK_MASK - raise IllegalOperationError, "Cannot release a write lock which is not held" - end - true - end - - private - - # @!visibility private - def running_readers(c = @Counter.value) - c & MAX_READERS - end - - # @!visibility private - def running_readers?(c = @Counter.value) - (c & MAX_READERS) > 0 - end - - # @!visibility private - def running_writer?(c = @Counter.value) - c >= RUNNING_WRITER - end - - # @!visibility private - def waiting_writers(c = @Counter.value) - (c & MAX_WRITERS) >> READER_BITS - end - - # @!visibility private - def waiting_or_running_writer?(c = @Counter.value) - c >= WAITING_WRITER - end - - # @!visibility private - def max_readers?(c = @Counter.value) - (c & MAX_READERS) == MAX_READERS - end - - # @!visibility private - def max_writers?(c = @Counter.value) - (c & MAX_WRITERS) == MAX_WRITERS - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/semaphore.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/semaphore.rb deleted file mode 100644 index f0799f0f41a63..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/semaphore.rb +++ /dev/null @@ -1,163 +0,0 @@ -require 'concurrent/atomic/mutex_semaphore' - -module Concurrent - - ################################################################### - - # @!macro semaphore_method_initialize - # - # Create a new `Semaphore` with the initial `count`. - # - # @param [Fixnum] count the initial count - # - # @raise [ArgumentError] if `count` is not an integer - - # @!macro semaphore_method_acquire - # - # Acquires the given number of permits from this semaphore, - # blocking until all are available. If a block is given, - # yields to it and releases the permits afterwards. - # - # @param [Fixnum] permits Number of permits to acquire - # - # @raise [ArgumentError] if `permits` is not an integer or is less than zero - # - # @return [nil, BasicObject] Without a block, `nil` is returned. If a block - # is given, its return value is returned. - - # @!macro semaphore_method_available_permits - # - # Returns the current number of permits available in this semaphore. - # - # @return [Integer] - - # @!macro semaphore_method_drain_permits - # - # Acquires and returns all permits that are immediately available. - # - # @return [Integer] - - # @!macro semaphore_method_try_acquire - # - # Acquires the given number of permits from this semaphore, - # only if all are available at the time of invocation or within - # `timeout` interval. If a block is given, yields to it if the permits - # were successfully acquired, and releases them afterward, returning the - # block's return value. - # - # @param [Fixnum] permits the number of permits to acquire - # - # @param [Fixnum] timeout the number of seconds to wait for the counter - # or `nil` to return immediately - # - # @raise [ArgumentError] if `permits` is not an integer or is less than zero - # - # @return [true, false, nil, BasicObject] `false` if no permits are - # available, `true` when acquired a permit. If a block is given, the - # block's return value is returned if the permits were acquired; if not, - # `nil` is returned. - - # @!macro semaphore_method_release - # - # Releases the given number of permits, returning them to the semaphore. - # - # @param [Fixnum] permits Number of permits to return to the semaphore. - # - # @raise [ArgumentError] if `permits` is not a number or is less than zero - # - # @return [nil] - - ################################################################### - - # @!macro semaphore_public_api - # - # @!method initialize(count) - # @!macro semaphore_method_initialize - # - # @!method acquire(permits = 1) - # @!macro semaphore_method_acquire - # - # @!method available_permits - # @!macro semaphore_method_available_permits - # - # @!method drain_permits - # @!macro semaphore_method_drain_permits - # - # @!method try_acquire(permits = 1, timeout = nil) - # @!macro semaphore_method_try_acquire - # - # @!method release(permits = 1) - # @!macro semaphore_method_release - - ################################################################### - - # @!visibility private - # @!macro internal_implementation_note - SemaphoreImplementation = if Concurrent.on_jruby? - require 'concurrent/utility/native_extension_loader' - JavaSemaphore - else - MutexSemaphore - end - private_constant :SemaphoreImplementation - - # @!macro semaphore - # - # A counting semaphore. Conceptually, a semaphore maintains a set of - # permits. Each {#acquire} blocks if necessary until a permit is - # available, and then takes it. Each {#release} adds a permit, potentially - # releasing a blocking acquirer. - # However, no actual permit objects are used; the Semaphore just keeps a - # count of the number available and acts accordingly. - # Alternatively, permits may be acquired within a block, and automatically - # released after the block finishes executing. - # - # @!macro semaphore_public_api - # @example - # semaphore = Concurrent::Semaphore.new(2) - # - # t1 = Thread.new do - # semaphore.acquire - # puts "Thread 1 acquired semaphore" - # end - # - # t2 = Thread.new do - # semaphore.acquire - # puts "Thread 2 acquired semaphore" - # end - # - # t3 = Thread.new do - # semaphore.acquire - # puts "Thread 3 acquired semaphore" - # end - # - # t4 = Thread.new do - # sleep(2) - # puts "Thread 4 releasing semaphore" - # semaphore.release - # end - # - # [t1, t2, t3, t4].each(&:join) - # - # # prints: - # # Thread 3 acquired semaphore - # # Thread 2 acquired semaphore - # # Thread 4 releasing semaphore - # # Thread 1 acquired semaphore - # - # @example - # semaphore = Concurrent::Semaphore.new(1) - # - # puts semaphore.available_permits - # semaphore.acquire do - # puts semaphore.available_permits - # end - # puts semaphore.available_permits - # - # # prints: - # # 1 - # # 0 - # # 1 - class Semaphore < SemaphoreImplementation - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/thread_local_var.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/thread_local_var.rb deleted file mode 100644 index 3b7e12b5bb97c..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic/thread_local_var.rb +++ /dev/null @@ -1,111 +0,0 @@ -require 'concurrent/constants' -require_relative 'locals' - -module Concurrent - - # A `ThreadLocalVar` is a variable where the value is different for each thread. - # Each variable may have a default value, but when you modify the variable only - # the current thread will ever see that change. - # - # This is similar to Ruby's built-in thread-local variables (`Thread#thread_variable_get`), - # but with these major advantages: - # * `ThreadLocalVar` has its own identity, it doesn't need a Symbol. - # * Each Ruby's built-in thread-local variable leaks some memory forever (it's a Symbol held forever on the thread), - # so it's only OK to create a small amount of them. - # `ThreadLocalVar` has no such issue and it is fine to create many of them. - # * Ruby's built-in thread-local variables leak forever the value set on each thread (unless set to nil explicitly). - # `ThreadLocalVar` automatically removes the mapping for each thread once the `ThreadLocalVar` instance is GC'd. - # - # @!macro thread_safe_variable_comparison - # - # @example - # v = ThreadLocalVar.new(14) - # v.value #=> 14 - # v.value = 2 - # v.value #=> 2 - # - # @example - # v = ThreadLocalVar.new(14) - # - # t1 = Thread.new do - # v.value #=> 14 - # v.value = 1 - # v.value #=> 1 - # end - # - # t2 = Thread.new do - # v.value #=> 14 - # v.value = 2 - # v.value #=> 2 - # end - # - # v.value #=> 14 - class ThreadLocalVar - LOCALS = ThreadLocals.new - - # Creates a thread local variable. - # - # @param [Object] default the default value when otherwise unset - # @param [Proc] default_block Optional block that gets called to obtain the - # default value for each thread - def initialize(default = nil, &default_block) - if default && block_given? - raise ArgumentError, "Cannot use both value and block as default value" - end - - if block_given? - @default_block = default_block - @default = nil - else - @default_block = nil - @default = default - end - - @index = LOCALS.next_index(self) - end - - # Returns the value in the current thread's copy of this thread-local variable. - # - # @return [Object] the current value - def value - LOCALS.fetch(@index) { default } - end - - # Sets the current thread's copy of this thread-local variable to the specified value. - # - # @param [Object] value the value to set - # @return [Object] the new value - def value=(value) - LOCALS.set(@index, value) - end - - # Bind the given value to thread local storage during - # execution of the given block. - # - # @param [Object] value the value to bind - # @yield the operation to be performed with the bound variable - # @return [Object] the value - def bind(value) - if block_given? - old_value = self.value - self.value = value - begin - yield - ensure - self.value = old_value - end - end - end - - protected - - # @!visibility private - def default - if @default_block - self.value = @default_block.call - else - @default - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic_reference/atomic_direct_update.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic_reference/atomic_direct_update.rb deleted file mode 100644 index 5d2d7edd4f540..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic_reference/atomic_direct_update.rb +++ /dev/null @@ -1,37 +0,0 @@ -require 'concurrent/errors' - -module Concurrent - - # Define update methods that use direct paths - # - # @!visibility private - # @!macro internal_implementation_note - module AtomicDirectUpdate - def update - true until compare_and_set(old_value = get, new_value = yield(old_value)) - new_value - end - - def try_update - old_value = get - new_value = yield old_value - - return unless compare_and_set old_value, new_value - - new_value - end - - def try_update! - old_value = get - new_value = yield old_value - unless compare_and_set(old_value, new_value) - if $VERBOSE - raise ConcurrentUpdateError, "Update failed" - else - raise ConcurrentUpdateError, "Update failed", ConcurrentUpdateError::CONC_UP_ERR_BACKTRACE - end - end - new_value - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic_reference/mutex_atomic.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic_reference/mutex_atomic.rb deleted file mode 100644 index e5e2a6377d59a..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic_reference/mutex_atomic.rb +++ /dev/null @@ -1,67 +0,0 @@ -require 'concurrent/atomic_reference/atomic_direct_update' -require 'concurrent/atomic_reference/numeric_cas_wrapper' -require 'concurrent/synchronization/safe_initialization' - -module Concurrent - - # @!visibility private - # @!macro internal_implementation_note - class MutexAtomicReference - extend Concurrent::Synchronization::SafeInitialization - include AtomicDirectUpdate - include AtomicNumericCompareAndSetWrapper - alias_method :compare_and_swap, :compare_and_set - - # @!macro atomic_reference_method_initialize - def initialize(value = nil) - super() - @Lock = ::Mutex.new - @value = value - end - - # @!macro atomic_reference_method_get - def get - synchronize { @value } - end - alias_method :value, :get - - # @!macro atomic_reference_method_set - def set(new_value) - synchronize { @value = new_value } - end - alias_method :value=, :set - - # @!macro atomic_reference_method_get_and_set - def get_and_set(new_value) - synchronize do - old_value = @value - @value = new_value - old_value - end - end - alias_method :swap, :get_and_set - - # @!macro atomic_reference_method_compare_and_set - def _compare_and_set(old_value, new_value) - synchronize do - if @value.equal? old_value - @value = new_value - true - else - false - end - end - end - - protected - - # @!visibility private - def synchronize - if @Lock.owned? - yield - else - @Lock.synchronize { yield } - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic_reference/numeric_cas_wrapper.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic_reference/numeric_cas_wrapper.rb deleted file mode 100644 index 709a3822318dc..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomic_reference/numeric_cas_wrapper.rb +++ /dev/null @@ -1,28 +0,0 @@ -module Concurrent - - # Special "compare and set" handling of numeric values. - # - # @!visibility private - # @!macro internal_implementation_note - module AtomicNumericCompareAndSetWrapper - - # @!macro atomic_reference_method_compare_and_set - def compare_and_set(old_value, new_value) - if old_value.kind_of? Numeric - while true - old = get - - return false unless old.kind_of? Numeric - - return false unless old == old_value - - result = _compare_and_set(old, new_value) - return result if result - end - else - _compare_and_set(old_value, new_value) - end - end - - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomics.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomics.rb deleted file mode 100644 index 16cbe66101b1e..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/atomics.rb +++ /dev/null @@ -1,10 +0,0 @@ -require 'concurrent/atomic/atomic_reference' -require 'concurrent/atomic/atomic_boolean' -require 'concurrent/atomic/atomic_fixnum' -require 'concurrent/atomic/cyclic_barrier' -require 'concurrent/atomic/count_down_latch' -require 'concurrent/atomic/event' -require 'concurrent/atomic/read_write_lock' -require 'concurrent/atomic/reentrant_read_write_lock' -require 'concurrent/atomic/semaphore' -require 'concurrent/atomic/thread_local_var' diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/copy_on_notify_observer_set.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/copy_on_notify_observer_set.rb deleted file mode 100644 index 7c700bd78ac4c..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/copy_on_notify_observer_set.rb +++ /dev/null @@ -1,107 +0,0 @@ -require 'concurrent/synchronization/lockable_object' - -module Concurrent - module Collection - - # A thread safe observer set implemented using copy-on-read approach: - # observers are added and removed from a thread safe collection; every time - # a notification is required the internal data structure is copied to - # prevent concurrency issues - # - # @api private - class CopyOnNotifyObserverSet < Synchronization::LockableObject - - def initialize - super() - synchronize { ns_initialize } - end - - # @!macro observable_add_observer - def add_observer(observer = nil, func = :update, &block) - if observer.nil? && block.nil? - raise ArgumentError, 'should pass observer as a first argument or block' - elsif observer && block - raise ArgumentError.new('cannot provide both an observer and a block') - end - - if block - observer = block - func = :call - end - - synchronize do - @observers[observer] = func - observer - end - end - - # @!macro observable_delete_observer - def delete_observer(observer) - synchronize do - @observers.delete(observer) - observer - end - end - - # @!macro observable_delete_observers - def delete_observers - synchronize do - @observers.clear - self - end - end - - # @!macro observable_count_observers - def count_observers - synchronize { @observers.count } - end - - # Notifies all registered observers with optional args - # @param [Object] args arguments to be passed to each observer - # @return [CopyOnWriteObserverSet] self - def notify_observers(*args, &block) - observers = duplicate_observers - notify_to(observers, *args, &block) - self - end - - # Notifies all registered observers with optional args and deletes them. - # - # @param [Object] args arguments to be passed to each observer - # @return [CopyOnWriteObserverSet] self - def notify_and_delete_observers(*args, &block) - observers = duplicate_and_clear_observers - notify_to(observers, *args, &block) - self - end - - protected - - def ns_initialize - @observers = {} - end - - private - - def duplicate_and_clear_observers - synchronize do - observers = @observers.dup - @observers.clear - observers - end - end - - def duplicate_observers - synchronize { @observers.dup } - end - - def notify_to(observers, *args) - raise ArgumentError.new('cannot give arguments and a block') if block_given? && !args.empty? - observers.each do |observer, function| - args = yield if block_given? - observer.send(function, *args) - end - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/copy_on_write_observer_set.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/copy_on_write_observer_set.rb deleted file mode 100644 index bcb6750d41109..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/copy_on_write_observer_set.rb +++ /dev/null @@ -1,111 +0,0 @@ -require 'concurrent/synchronization/lockable_object' - -module Concurrent - module Collection - - # A thread safe observer set implemented using copy-on-write approach: - # every time an observer is added or removed the whole internal data structure is - # duplicated and replaced with a new one. - # - # @api private - class CopyOnWriteObserverSet < Synchronization::LockableObject - - def initialize - super() - synchronize { ns_initialize } - end - - # @!macro observable_add_observer - def add_observer(observer = nil, func = :update, &block) - if observer.nil? && block.nil? - raise ArgumentError, 'should pass observer as a first argument or block' - elsif observer && block - raise ArgumentError.new('cannot provide both an observer and a block') - end - - if block - observer = block - func = :call - end - - synchronize do - new_observers = @observers.dup - new_observers[observer] = func - @observers = new_observers - observer - end - end - - # @!macro observable_delete_observer - def delete_observer(observer) - synchronize do - new_observers = @observers.dup - new_observers.delete(observer) - @observers = new_observers - observer - end - end - - # @!macro observable_delete_observers - def delete_observers - self.observers = {} - self - end - - # @!macro observable_count_observers - def count_observers - observers.count - end - - # Notifies all registered observers with optional args - # @param [Object] args arguments to be passed to each observer - # @return [CopyOnWriteObserverSet] self - def notify_observers(*args, &block) - notify_to(observers, *args, &block) - self - end - - # Notifies all registered observers with optional args and deletes them. - # - # @param [Object] args arguments to be passed to each observer - # @return [CopyOnWriteObserverSet] self - def notify_and_delete_observers(*args, &block) - old = clear_observers_and_return_old - notify_to(old, *args, &block) - self - end - - protected - - def ns_initialize - @observers = {} - end - - private - - def notify_to(observers, *args) - raise ArgumentError.new('cannot give arguments and a block') if block_given? && !args.empty? - observers.each do |observer, function| - args = yield if block_given? - observer.send(function, *args) - end - end - - def observers - synchronize { @observers } - end - - def observers=(new_set) - synchronize { @observers = new_set } - end - - def clear_observers_and_return_old - synchronize do - old_observers = @observers - @observers = {} - old_observers - end - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/java_non_concurrent_priority_queue.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/java_non_concurrent_priority_queue.rb deleted file mode 100644 index 2be9e4373a32d..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/java_non_concurrent_priority_queue.rb +++ /dev/null @@ -1,84 +0,0 @@ -if Concurrent.on_jruby? - - module Concurrent - module Collection - - - # @!macro priority_queue - # - # @!visibility private - # @!macro internal_implementation_note - class JavaNonConcurrentPriorityQueue - - # @!macro priority_queue_method_initialize - def initialize(opts = {}) - order = opts.fetch(:order, :max) - if [:min, :low].include?(order) - @queue = java.util.PriorityQueue.new(11) # 11 is the default initial capacity - else - @queue = java.util.PriorityQueue.new(11, java.util.Collections.reverseOrder()) - end - end - - # @!macro priority_queue_method_clear - def clear - @queue.clear - true - end - - # @!macro priority_queue_method_delete - def delete(item) - found = false - while @queue.remove(item) do - found = true - end - found - end - - # @!macro priority_queue_method_empty - def empty? - @queue.size == 0 - end - - # @!macro priority_queue_method_include - def include?(item) - @queue.contains(item) - end - alias_method :has_priority?, :include? - - # @!macro priority_queue_method_length - def length - @queue.size - end - alias_method :size, :length - - # @!macro priority_queue_method_peek - def peek - @queue.peek - end - - # @!macro priority_queue_method_pop - def pop - @queue.poll - end - alias_method :deq, :pop - alias_method :shift, :pop - - # @!macro priority_queue_method_push - def push(item) - raise ArgumentError.new('cannot enqueue nil') if item.nil? - @queue.add(item) - end - alias_method :<<, :push - alias_method :enq, :push - - # @!macro priority_queue_method_from_list - def self.from_list(list, opts = {}) - queue = new(opts) - list.each{|item| queue << item } - queue - end - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/lock_free_stack.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/lock_free_stack.rb deleted file mode 100644 index 3704410ba0bd4..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/lock_free_stack.rb +++ /dev/null @@ -1,160 +0,0 @@ -require 'concurrent/synchronization/object' - -module Concurrent - - # @!macro warn.edge - class LockFreeStack < Synchronization::Object - - safe_initialization! - - class Node - # TODO (pitr-ch 20-Dec-2016): Could be unified with Stack class? - - # @return [Node] - attr_reader :next_node - - # @return [Object] - attr_reader :value - - # @!visibility private - # allow to nil-ify to free GC when the entry is no longer relevant, not synchronised - attr_writer :value - - def initialize(value, next_node) - @value = value - @next_node = next_node - end - - singleton_class.send :alias_method, :[], :new - end - - # The singleton for empty node - EMPTY = Node[nil, nil] - def EMPTY.next_node - self - end - - attr_atomic(:head) - private :head, :head=, :swap_head, :compare_and_set_head, :update_head - - # @!visibility private - def self.of1(value) - new Node[value, EMPTY] - end - - # @!visibility private - def self.of2(value1, value2) - new Node[value1, Node[value2, EMPTY]] - end - - # @param [Node] head - def initialize(head = EMPTY) - super() - self.head = head - end - - # @param [Node] head - # @return [true, false] - def empty?(head = head()) - head.equal? EMPTY - end - - # @param [Node] head - # @param [Object] value - # @return [true, false] - def compare_and_push(head, value) - compare_and_set_head head, Node[value, head] - end - - # @param [Object] value - # @return [self] - def push(value) - while true - current_head = head - return self if compare_and_set_head current_head, Node[value, current_head] - end - end - - # @return [Node] - def peek - head - end - - # @param [Node] head - # @return [true, false] - def compare_and_pop(head) - compare_and_set_head head, head.next_node - end - - # @return [Object] - def pop - while true - current_head = head - return current_head.value if compare_and_set_head current_head, current_head.next_node - end - end - - # @param [Node] head - # @return [true, false] - def compare_and_clear(head) - compare_and_set_head head, EMPTY - end - - include Enumerable - - # @param [Node] head - # @return [self] - def each(head = nil) - return to_enum(:each, head) unless block_given? - it = head || peek - until it.equal?(EMPTY) - yield it.value - it = it.next_node - end - self - end - - # @return [true, false] - def clear - while true - current_head = head - return false if current_head == EMPTY - return true if compare_and_set_head current_head, EMPTY - end - end - - # @param [Node] head - # @return [true, false] - def clear_if(head) - compare_and_set_head head, EMPTY - end - - # @param [Node] head - # @param [Node] new_head - # @return [true, false] - def replace_if(head, new_head) - compare_and_set_head head, new_head - end - - # @return [self] - # @yield over the cleared stack - # @yieldparam [Object] value - def clear_each(&block) - while true - current_head = head - return self if current_head == EMPTY - if compare_and_set_head current_head, EMPTY - each current_head, &block - return self - end - end - end - - # @return [String] Short string representation. - def to_s - format '%s %s>', super[0..-2], to_a.to_s - end - - alias_method :inspect, :to_s - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/atomic_reference_map_backend.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/atomic_reference_map_backend.rb deleted file mode 100644 index dc5189389da36..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/atomic_reference_map_backend.rb +++ /dev/null @@ -1,927 +0,0 @@ -require 'concurrent/constants' -require 'concurrent/thread_safe/util' -require 'concurrent/thread_safe/util/adder' -require 'concurrent/thread_safe/util/cheap_lockable' -require 'concurrent/thread_safe/util/power_of_two_tuple' -require 'concurrent/thread_safe/util/volatile' -require 'concurrent/thread_safe/util/xor_shift_random' - -module Concurrent - - # @!visibility private - module Collection - - # A Ruby port of the Doug Lea's jsr166e.ConcurrentHashMapV8 class version 1.59 - # available in public domain. - # - # Original source code available here: - # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/ConcurrentHashMapV8.java?revision=1.59 - # - # The Ruby port skips out the +TreeBin+ (red-black trees for use in bins whose - # size exceeds a threshold). - # - # A hash table supporting full concurrency of retrievals and high expected - # concurrency for updates. However, even though all operations are - # thread-safe, retrieval operations do _not_ entail locking, and there is - # _not_ any support for locking the entire table in a way that prevents all - # access. - # - # Retrieval operations generally do not block, so may overlap with update - # operations. Retrievals reflect the results of the most recently _completed_ - # update operations holding upon their onset. (More formally, an update - # operation for a given key bears a _happens-before_ relation with any (non - # +nil+) retrieval for that key reporting the updated value.) For aggregate - # operations such as +clear()+, concurrent retrievals may reflect insertion or - # removal of only some entries. Similarly, the +each_pair+ iterator yields - # elements reflecting the state of the hash table at some point at or since - # the start of the +each_pair+. Bear in mind that the results of aggregate - # status methods including +size()+ and +empty?+} are typically useful only - # when a map is not undergoing concurrent updates in other threads. Otherwise - # the results of these methods reflect transient states that may be adequate - # for monitoring or estimation purposes, but not for program control. - # - # The table is dynamically expanded when there are too many collisions (i.e., - # keys that have distinct hash codes but fall into the same slot modulo the - # table size), with the expected average effect of maintaining roughly two - # bins per mapping (corresponding to a 0.75 load factor threshold for - # resizing). There may be much variance around this average as mappings are - # added and removed, but overall, this maintains a commonly accepted - # time/space tradeoff for hash tables. However, resizing this or any other - # kind of hash table may be a relatively slow operation. When possible, it is - # a good idea to provide a size estimate as an optional :initial_capacity - # initializer argument. An additional optional :load_factor constructor - # argument provides a further means of customizing initial table capacity by - # specifying the table density to be used in calculating the amount of space - # to allocate for the given number of elements. Note that using many keys with - # exactly the same +hash+ is a sure way to slow down performance of any hash - # table. - # - # ## Design overview - # - # The primary design goal of this hash table is to maintain concurrent - # readability (typically method +[]+, but also iteration and related methods) - # while minimizing update contention. Secondary goals are to keep space - # consumption about the same or better than plain +Hash+, and to support high - # initial insertion rates on an empty table by many threads. - # - # Each key-value mapping is held in a +Node+. The validation-based approach - # explained below leads to a lot of code sprawl because retry-control - # precludes factoring into smaller methods. - # - # The table is lazily initialized to a power-of-two size upon the first - # insertion. Each bin in the table normally contains a list of +Node+s (most - # often, the list has only zero or one +Node+). Table accesses require - # volatile/atomic reads, writes, and CASes. The lists of nodes within bins are - # always accurately traversable under volatile reads, so long as lookups check - # hash code and non-nullness of value before checking key equality. - # - # We use the top two bits of +Node+ hash fields for control purposes -- they - # are available anyway because of addressing constraints. As explained further - # below, these top bits are used as follows: - # - # - 00 - Normal - # - 01 - Locked - # - 11 - Locked and may have a thread waiting for lock - # - 10 - +Node+ is a forwarding node - # - # The lower 28 bits of each +Node+'s hash field contain a the key's hash code, - # except for forwarding nodes, for which the lower bits are zero (and so - # always have hash field == +MOVED+). - # - # Insertion (via +[]=+ or its variants) of the first node in an empty bin is - # performed by just CASing it to the bin. This is by far the most common case - # for put operations under most key/hash distributions. Other update - # operations (insert, delete, and replace) require locks. We do not want to - # waste the space required to associate a distinct lock object with each bin, - # so instead use the first node of a bin list itself as a lock. Blocking - # support for these locks relies +Concurrent::ThreadSafe::Util::CheapLockable. However, we also need a - # +try_lock+ construction, so we overlay these by using bits of the +Node+ - # hash field for lock control (see above), and so normally use builtin - # monitors only for blocking and signalling using - # +cheap_wait+/+cheap_broadcast+ constructions. See +Node#try_await_lock+. - # - # Using the first node of a list as a lock does not by itself suffice though: - # When a node is locked, any update must first validate that it is still the - # first node after locking it, and retry if not. Because new nodes are always - # appended to lists, once a node is first in a bin, it remains first until - # deleted or the bin becomes invalidated (upon resizing). However, operations - # that only conditionally update may inspect nodes until the point of update. - # This is a converse of sorts to the lazy locking technique described by - # Herlihy & Shavit. - # - # The main disadvantage of per-bin locks is that other update operations on - # other nodes in a bin list protected by the same lock can stall, for example - # when user +eql?+ or mapping functions take a long time. However, - # statistically, under random hash codes, this is not a common problem. - # Ideally, the frequency of nodes in bins follows a Poisson distribution - # (http://en.wikipedia.org/wiki/Poisson_distribution) with a parameter of - # about 0.5 on average, given the resizing threshold of 0.75, although with a - # large variance because of resizing granularity. Ignoring variance, the - # expected occurrences of list size k are (exp(-0.5) * pow(0.5, k) / - # factorial(k)). The first values are: - # - # - 0: 0.60653066 - # - 1: 0.30326533 - # - 2: 0.07581633 - # - 3: 0.01263606 - # - 4: 0.00157952 - # - 5: 0.00015795 - # - 6: 0.00001316 - # - 7: 0.00000094 - # - 8: 0.00000006 - # - more: less than 1 in ten million - # - # Lock contention probability for two threads accessing distinct elements is - # roughly 1 / (8 * #elements) under random hashes. - # - # The table is resized when occupancy exceeds a percentage threshold - # (nominally, 0.75, but see below). Only a single thread performs the resize - # (using field +size_control+, to arrange exclusion), but the table otherwise - # remains usable for reads and updates. Resizing proceeds by transferring - # bins, one by one, from the table to the next table. Because we are using - # power-of-two expansion, the elements from each bin must either stay at same - # index, or move with a power of two offset. We eliminate unnecessary node - # creation by catching cases where old nodes can be reused because their next - # fields won't change. On average, only about one-sixth of them need cloning - # when a table doubles. The nodes they replace will be garbage collectable as - # soon as they are no longer referenced by any reader thread that may be in - # the midst of concurrently traversing table. Upon transfer, the old table bin - # contains only a special forwarding node (with hash field +MOVED+) that - # contains the next table as its key. On encountering a forwarding node, - # access and update operations restart, using the new table. - # - # Each bin transfer requires its bin lock. However, unlike other cases, a - # transfer can skip a bin if it fails to acquire its lock, and revisit it - # later. Method +rebuild+ maintains a buffer of TRANSFER_BUFFER_SIZE bins that - # have been skipped because of failure to acquire a lock, and blocks only if - # none are available (i.e., only very rarely). The transfer operation must - # also ensure that all accessible bins in both the old and new table are - # usable by any traversal. When there are no lock acquisition failures, this - # is arranged simply by proceeding from the last bin (+table.size - 1+) up - # towards the first. Upon seeing a forwarding node, traversals arrange to move - # to the new table without revisiting nodes. However, when any node is skipped - # during a transfer, all earlier table bins may have become visible, so are - # initialized with a reverse-forwarding node back to the old table until the - # new ones are established. (This sometimes requires transiently locking a - # forwarding node, which is possible under the above encoding.) These more - # expensive mechanics trigger only when necessary. - # - # The traversal scheme also applies to partial traversals of - # ranges of bins (via an alternate Traverser constructor) - # to support partitioned aggregate operations. Also, read-only - # operations give up if ever forwarded to a null table, which - # provides support for shutdown-style clearing, which is also not - # currently implemented. - # - # Lazy table initialization minimizes footprint until first use. - # - # The element count is maintained using a +Concurrent::ThreadSafe::Util::Adder+, - # which avoids contention on updates but can encounter cache thrashing - # if read too frequently during concurrent access. To avoid reading so - # often, resizing is attempted either when a bin lock is - # contended, or upon adding to a bin already holding two or more - # nodes (checked before adding in the +x_if_absent+ methods, after - # adding in others). Under uniform hash distributions, the - # probability of this occurring at threshold is around 13%, - # meaning that only about 1 in 8 puts check threshold (and after - # resizing, many fewer do so). But this approximation has high - # variance for small table sizes, so we check on any collision - # for sizes <= 64. The bulk putAll operation further reduces - # contention by only committing count updates upon these size - # checks. - # - # @!visibility private - class AtomicReferenceMapBackend - - # @!visibility private - class Table < Concurrent::ThreadSafe::Util::PowerOfTwoTuple - def cas_new_node(i, hash, key, value) - cas(i, nil, Node.new(hash, key, value)) - end - - def try_to_cas_in_computed(i, hash, key) - succeeded = false - new_value = nil - new_node = Node.new(locked_hash = hash | LOCKED, key, NULL) - if cas(i, nil, new_node) - begin - if NULL == (new_value = yield(NULL)) - was_null = true - else - new_node.value = new_value - end - succeeded = true - ensure - volatile_set(i, nil) if !succeeded || was_null - new_node.unlock_via_hash(locked_hash, hash) - end - end - return succeeded, new_value - end - - def try_lock_via_hash(i, node, node_hash) - node.try_lock_via_hash(node_hash) do - yield if volatile_get(i) == node - end - end - - def delete_node_at(i, node, predecessor_node) - if predecessor_node - predecessor_node.next = node.next - else - volatile_set(i, node.next) - end - end - end - - # Key-value entry. Nodes with a hash field of +MOVED+ are special, and do - # not contain user keys or values. Otherwise, keys are never +nil+, and - # +NULL+ +value+ fields indicate that a node is in the process of being - # deleted or created. For purposes of read-only access, a key may be read - # before a value, but can only be used after checking value to be +!= NULL+. - # - # @!visibility private - class Node - extend Concurrent::ThreadSafe::Util::Volatile - attr_volatile :hash, :value, :next - - include Concurrent::ThreadSafe::Util::CheapLockable - - bit_shift = Concurrent::ThreadSafe::Util::FIXNUM_BIT_SIZE - 2 # need 2 bits for ourselves - # Encodings for special uses of Node hash fields. See above for explanation. - MOVED = ('10' << ('0' * bit_shift)).to_i(2) # hash field for forwarding nodes - LOCKED = ('01' << ('0' * bit_shift)).to_i(2) # set/tested only as a bit - WAITING = ('11' << ('0' * bit_shift)).to_i(2) # both bits set/tested together - HASH_BITS = ('00' << ('1' * bit_shift)).to_i(2) # usable bits of normal node hash - - SPIN_LOCK_ATTEMPTS = Concurrent::ThreadSafe::Util::CPU_COUNT > 1 ? Concurrent::ThreadSafe::Util::CPU_COUNT * 2 : 0 - - attr_reader :key - - def initialize(hash, key, value, next_node = nil) - super() - @key = key - self.lazy_set_hash(hash) - self.lazy_set_value(value) - self.next = next_node - end - - # Spins a while if +LOCKED+ bit set and this node is the first of its bin, - # and then sets +WAITING+ bits on hash field and blocks (once) if they are - # still set. It is OK for this method to return even if lock is not - # available upon exit, which enables these simple single-wait mechanics. - # - # The corresponding signalling operation is performed within callers: Upon - # detecting that +WAITING+ has been set when unlocking lock (via a failed - # CAS from non-waiting +LOCKED+ state), unlockers acquire the - # +cheap_synchronize+ lock and perform a +cheap_broadcast+. - def try_await_lock(table, i) - if table && i >= 0 && i < table.size # bounds check, TODO: why are we bounds checking? - spins = SPIN_LOCK_ATTEMPTS - randomizer = base_randomizer = Concurrent::ThreadSafe::Util::XorShiftRandom.get - while equal?(table.volatile_get(i)) && self.class.locked_hash?(my_hash = hash) - if spins >= 0 - if (randomizer = (randomizer >> 1)).even? # spin at random - if (spins -= 1) == 0 - Thread.pass # yield before blocking - else - randomizer = base_randomizer = Concurrent::ThreadSafe::Util::XorShiftRandom.xorshift(base_randomizer) if randomizer.zero? - end - end - elsif cas_hash(my_hash, my_hash | WAITING) - force_acquire_lock(table, i) - break - end - end - end - end - - def key?(key) - @key.eql?(key) - end - - def matches?(key, hash) - pure_hash == hash && key?(key) - end - - def pure_hash - hash & HASH_BITS - end - - def try_lock_via_hash(node_hash = hash) - if cas_hash(node_hash, locked_hash = node_hash | LOCKED) - begin - yield - ensure - unlock_via_hash(locked_hash, node_hash) - end - end - end - - def locked? - self.class.locked_hash?(hash) - end - - def unlock_via_hash(locked_hash, node_hash) - unless cas_hash(locked_hash, node_hash) - self.hash = node_hash - cheap_synchronize { cheap_broadcast } - end - end - - private - def force_acquire_lock(table, i) - cheap_synchronize do - if equal?(table.volatile_get(i)) && (hash & WAITING) == WAITING - cheap_wait - else - cheap_broadcast # possibly won race vs signaller - end - end - end - - class << self - def locked_hash?(hash) - (hash & LOCKED) != 0 - end - end - end - - # shorthands - MOVED = Node::MOVED - LOCKED = Node::LOCKED - WAITING = Node::WAITING - HASH_BITS = Node::HASH_BITS - - NOW_RESIZING = -1 - DEFAULT_CAPACITY = 16 - MAX_CAPACITY = Concurrent::ThreadSafe::Util::MAX_INT - - # The buffer size for skipped bins during transfers. The - # value is arbitrary but should be large enough to avoid - # most locking stalls during resizes. - TRANSFER_BUFFER_SIZE = 32 - - extend Concurrent::ThreadSafe::Util::Volatile - attr_volatile :table, # The array of bins. Lazily initialized upon first insertion. Size is always a power of two. - - # Table initialization and resizing control. When negative, the - # table is being initialized or resized. Otherwise, when table is - # null, holds the initial table size to use upon creation, or 0 - # for default. After initialization, holds the next element count - # value upon which to resize the table. - :size_control - - def initialize(options = nil) - super() - @counter = Concurrent::ThreadSafe::Util::Adder.new - initial_capacity = options && options[:initial_capacity] || DEFAULT_CAPACITY - self.size_control = (capacity = table_size_for(initial_capacity)) > MAX_CAPACITY ? MAX_CAPACITY : capacity - end - - def get_or_default(key, else_value = nil) - hash = key_hash(key) - current_table = table - while current_table - node = current_table.volatile_get_by_hash(hash) - current_table = - while node - if (node_hash = node.hash) == MOVED - break node.key - elsif (node_hash & HASH_BITS) == hash && node.key?(key) && NULL != (value = node.value) - return value - end - node = node.next - end - end - else_value - end - - def [](key) - get_or_default(key) - end - - def key?(key) - get_or_default(key, NULL) != NULL - end - - def []=(key, value) - get_and_set(key, value) - value - end - - def compute_if_absent(key) - hash = key_hash(key) - current_table = table || initialize_table - while true - if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) - succeeded, new_value = current_table.try_to_cas_in_computed(i, hash, key) { yield } - if succeeded - increment_size - return new_value - end - elsif (node_hash = node.hash) == MOVED - current_table = node.key - elsif NULL != (current_value = find_value_in_node_list(node, key, hash, node_hash & HASH_BITS)) - return current_value - elsif Node.locked_hash?(node_hash) - try_await_lock(current_table, i, node) - else - succeeded, value = attempt_internal_compute_if_absent(key, hash, current_table, i, node, node_hash) { yield } - return value if succeeded - end - end - end - - def compute_if_present(key) - new_value = nil - internal_replace(key) do |old_value| - if (new_value = yield(NULL == old_value ? nil : old_value)).nil? - NULL - else - new_value - end - end - new_value - end - - def compute(key) - internal_compute(key) do |old_value| - if (new_value = yield(NULL == old_value ? nil : old_value)).nil? - NULL - else - new_value - end - end - end - - def merge_pair(key, value) - internal_compute(key) do |old_value| - if NULL == old_value || !(value = yield(old_value)).nil? - value - else - NULL - end - end - end - - def replace_pair(key, old_value, new_value) - NULL != internal_replace(key, old_value) { new_value } - end - - def replace_if_exists(key, new_value) - if (result = internal_replace(key) { new_value }) && NULL != result - result - end - end - - def get_and_set(key, value) # internalPut in the original CHMV8 - hash = key_hash(key) - current_table = table || initialize_table - while true - if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) - if current_table.cas_new_node(i, hash, key, value) - increment_size - break - end - elsif (node_hash = node.hash) == MOVED - current_table = node.key - elsif Node.locked_hash?(node_hash) - try_await_lock(current_table, i, node) - else - succeeded, old_value = attempt_get_and_set(key, value, hash, current_table, i, node, node_hash) - break old_value if succeeded - end - end - end - - def delete(key) - replace_if_exists(key, NULL) - end - - def delete_pair(key, value) - result = internal_replace(key, value) { NULL } - if result && NULL != result - !!result - else - false - end - end - - def each_pair - return self unless current_table = table - current_table_size = base_size = current_table.size - i = base_index = 0 - while base_index < base_size - if node = current_table.volatile_get(i) - if node.hash == MOVED - current_table = node.key - current_table_size = current_table.size - else - begin - if NULL != (value = node.value) # skip deleted or special nodes - yield node.key, value - end - end while node = node.next - end - end - - if (i_with_base = i + base_size) < current_table_size - i = i_with_base # visit upper slots if present - else - i = base_index += 1 - end - end - self - end - - def size - (sum = @counter.sum) < 0 ? 0 : sum # ignore transient negative values - end - - def empty? - size == 0 - end - - # Implementation for clear. Steps through each bin, removing all nodes. - def clear - return self unless current_table = table - current_table_size = current_table.size - deleted_count = i = 0 - while i < current_table_size - if !(node = current_table.volatile_get(i)) - i += 1 - elsif (node_hash = node.hash) == MOVED - current_table = node.key - current_table_size = current_table.size - elsif Node.locked_hash?(node_hash) - decrement_size(deleted_count) # opportunistically update count - deleted_count = 0 - node.try_await_lock(current_table, i) - else - current_table.try_lock_via_hash(i, node, node_hash) do - begin - deleted_count += 1 if NULL != node.value # recheck under lock - node.value = nil - end while node = node.next - current_table.volatile_set(i, nil) - i += 1 - end - end - end - decrement_size(deleted_count) - self - end - - private - # Internal versions of the insertion methods, each a - # little more complicated than the last. All have - # the same basic structure: - # 1. If table uninitialized, create - # 2. If bin empty, try to CAS new node - # 3. If bin stale, use new table - # 4. Lock and validate; if valid, scan and add or update - # - # The others interweave other checks and/or alternative actions: - # * Plain +get_and_set+ checks for and performs resize after insertion. - # * compute_if_absent prescans for mapping without lock (and fails to add - # if present), which also makes pre-emptive resize checks worthwhile. - # - # Someday when details settle down a bit more, it might be worth - # some factoring to reduce sprawl. - def internal_replace(key, expected_old_value = NULL, &block) - hash = key_hash(key) - current_table = table - while current_table - if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) - break - elsif (node_hash = node.hash) == MOVED - current_table = node.key - elsif (node_hash & HASH_BITS) != hash && !node.next # precheck - break # rules out possible existence - elsif Node.locked_hash?(node_hash) - try_await_lock(current_table, i, node) - else - succeeded, old_value = attempt_internal_replace(key, expected_old_value, hash, current_table, i, node, node_hash, &block) - return old_value if succeeded - end - end - NULL - end - - def attempt_internal_replace(key, expected_old_value, hash, current_table, i, node, node_hash) - current_table.try_lock_via_hash(i, node, node_hash) do - predecessor_node = nil - old_value = NULL - begin - if node.matches?(key, hash) && NULL != (current_value = node.value) - if NULL == expected_old_value || expected_old_value == current_value # NULL == expected_old_value means whatever value - old_value = current_value - if NULL == (node.value = yield(old_value)) - current_table.delete_node_at(i, node, predecessor_node) - decrement_size - end - end - break - end - - predecessor_node = node - end while node = node.next - - return true, old_value - end - end - - def find_value_in_node_list(node, key, hash, pure_hash) - do_check_for_resize = false - while true - if pure_hash == hash && node.key?(key) && NULL != (value = node.value) - return value - elsif node = node.next - do_check_for_resize = true # at least 2 nodes -> check for resize - pure_hash = node.pure_hash - else - return NULL - end - end - ensure - check_for_resize if do_check_for_resize - end - - def internal_compute(key, &block) - hash = key_hash(key) - current_table = table || initialize_table - while true - if !(node = current_table.volatile_get(i = current_table.hash_to_index(hash))) - succeeded, new_value = current_table.try_to_cas_in_computed(i, hash, key, &block) - if succeeded - if NULL == new_value - break nil - else - increment_size - break new_value - end - end - elsif (node_hash = node.hash) == MOVED - current_table = node.key - elsif Node.locked_hash?(node_hash) - try_await_lock(current_table, i, node) - else - succeeded, new_value = attempt_compute(key, hash, current_table, i, node, node_hash, &block) - break new_value if succeeded - end - end - end - - def attempt_internal_compute_if_absent(key, hash, current_table, i, node, node_hash) - added = false - current_table.try_lock_via_hash(i, node, node_hash) do - while true - if node.matches?(key, hash) && NULL != (value = node.value) - return true, value - end - last = node - unless node = node.next - last.next = Node.new(hash, key, value = yield) - added = true - increment_size - return true, value - end - end - end - ensure - check_for_resize if added - end - - def attempt_compute(key, hash, current_table, i, node, node_hash) - added = false - current_table.try_lock_via_hash(i, node, node_hash) do - predecessor_node = nil - while true - if node.matches?(key, hash) && NULL != (value = node.value) - if NULL == (node.value = value = yield(value)) - current_table.delete_node_at(i, node, predecessor_node) - decrement_size - value = nil - end - return true, value - end - predecessor_node = node - unless node = node.next - if NULL == (value = yield(NULL)) - value = nil - else - predecessor_node.next = Node.new(hash, key, value) - added = true - increment_size - end - return true, value - end - end - end - ensure - check_for_resize if added - end - - def attempt_get_and_set(key, value, hash, current_table, i, node, node_hash) - node_nesting = nil - current_table.try_lock_via_hash(i, node, node_hash) do - node_nesting = 1 - old_value = nil - found_old_value = false - while node - if node.matches?(key, hash) && NULL != (old_value = node.value) - found_old_value = true - node.value = value - break - end - last = node - unless node = node.next - last.next = Node.new(hash, key, value) - break - end - node_nesting += 1 - end - - return true, old_value if found_old_value - increment_size - true - end - ensure - check_for_resize if node_nesting && (node_nesting > 1 || current_table.size <= 64) - end - - def initialize_copy(other) - super - @counter = Concurrent::ThreadSafe::Util::Adder.new - self.table = nil - self.size_control = (other_table = other.table) ? other_table.size : DEFAULT_CAPACITY - self - end - - def try_await_lock(current_table, i, node) - check_for_resize # try resizing if can't get lock - node.try_await_lock(current_table, i) - end - - def key_hash(key) - key.hash & HASH_BITS - end - - # Returns a power of two table size for the given desired capacity. - def table_size_for(entry_count) - size = 2 - size <<= 1 while size < entry_count - size - end - - # Initializes table, using the size recorded in +size_control+. - def initialize_table - until current_table ||= table - if (size_ctrl = size_control) == NOW_RESIZING - Thread.pass # lost initialization race; just spin - else - try_in_resize_lock(current_table, size_ctrl) do - initial_size = size_ctrl > 0 ? size_ctrl : DEFAULT_CAPACITY - current_table = self.table = Table.new(initial_size) - initial_size - (initial_size >> 2) # 75% load factor - end - end - end - current_table - end - - # If table is too small and not already resizing, creates next table and - # transfers bins. Rechecks occupancy after a transfer to see if another - # resize is already needed because resizings are lagging additions. - def check_for_resize - while (current_table = table) && MAX_CAPACITY > (table_size = current_table.size) && NOW_RESIZING != (size_ctrl = size_control) && size_ctrl < @counter.sum - try_in_resize_lock(current_table, size_ctrl) do - self.table = rebuild(current_table) - (table_size << 1) - (table_size >> 1) # 75% load factor - end - end - end - - def try_in_resize_lock(current_table, size_ctrl) - if cas_size_control(size_ctrl, NOW_RESIZING) - begin - if current_table == table # recheck under lock - size_ctrl = yield # get new size_control - end - ensure - self.size_control = size_ctrl - end - end - end - - # Moves and/or copies the nodes in each bin to new table. See above for explanation. - def rebuild(table) - old_table_size = table.size - new_table = table.next_in_size_table - # puts "#{old_table_size} -> #{new_table.size}" - forwarder = Node.new(MOVED, new_table, NULL) - rev_forwarder = nil - locked_indexes = nil # holds bins to revisit; nil until needed - locked_arr_idx = 0 - bin = old_table_size - 1 - i = bin - while true - if !(node = table.volatile_get(i)) - # no lock needed (or available) if bin >= 0, because we're not popping values from locked_indexes until we've run through the whole table - redo unless (bin >= 0 ? table.cas(i, nil, forwarder) : lock_and_clean_up_reverse_forwarders(table, old_table_size, new_table, i, forwarder)) - elsif Node.locked_hash?(node_hash = node.hash) - locked_indexes ||= ::Array.new - if bin < 0 && locked_arr_idx > 0 - locked_arr_idx -= 1 - i, locked_indexes[locked_arr_idx] = locked_indexes[locked_arr_idx], i # swap with another bin - redo - end - if bin < 0 || locked_indexes.size >= TRANSFER_BUFFER_SIZE - node.try_await_lock(table, i) # no other options -- block - redo - end - rev_forwarder ||= Node.new(MOVED, table, NULL) - redo unless table.volatile_get(i) == node && node.locked? # recheck before adding to list - locked_indexes << i - new_table.volatile_set(i, rev_forwarder) - new_table.volatile_set(i + old_table_size, rev_forwarder) - else - redo unless split_old_bin(table, new_table, i, node, node_hash, forwarder) - end - - if bin > 0 - i = (bin -= 1) - elsif locked_indexes && !locked_indexes.empty? - bin = -1 - i = locked_indexes.pop - locked_arr_idx = locked_indexes.size - 1 - else - return new_table - end - end - end - - def lock_and_clean_up_reverse_forwarders(old_table, old_table_size, new_table, i, forwarder) - # transiently use a locked forwarding node - locked_forwarder = Node.new(moved_locked_hash = MOVED | LOCKED, new_table, NULL) - if old_table.cas(i, nil, locked_forwarder) - new_table.volatile_set(i, nil) # kill the potential reverse forwarders - new_table.volatile_set(i + old_table_size, nil) # kill the potential reverse forwarders - old_table.volatile_set(i, forwarder) - locked_forwarder.unlock_via_hash(moved_locked_hash, MOVED) - true - end - end - - # Splits a normal bin with list headed by e into lo and hi parts; installs in given table. - def split_old_bin(table, new_table, i, node, node_hash, forwarder) - table.try_lock_via_hash(i, node, node_hash) do - split_bin(new_table, i, node, node_hash) - table.volatile_set(i, forwarder) - end - end - - def split_bin(new_table, i, node, node_hash) - bit = new_table.size >> 1 # bit to split on - run_bit = node_hash & bit - last_run = nil - low = nil - high = nil - current_node = node - # this optimises for the lowest amount of volatile writes and objects created - while current_node = current_node.next - unless (b = current_node.hash & bit) == run_bit - run_bit = b - last_run = current_node - end - end - if run_bit == 0 - low = last_run - else - high = last_run - end - current_node = node - until current_node == last_run - pure_hash = current_node.pure_hash - if (pure_hash & bit) == 0 - low = Node.new(pure_hash, current_node.key, current_node.value, low) - else - high = Node.new(pure_hash, current_node.key, current_node.value, high) - end - current_node = current_node.next - end - new_table.volatile_set(i, low) - new_table.volatile_set(i + bit, high) - end - - def increment_size - @counter.increment - end - - def decrement_size(by = 1) - @counter.add(-by) - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/mri_map_backend.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/mri_map_backend.rb deleted file mode 100644 index e0cf9990c52e0..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/mri_map_backend.rb +++ /dev/null @@ -1,66 +0,0 @@ -require 'thread' -require 'concurrent/collection/map/non_concurrent_map_backend' - -module Concurrent - - # @!visibility private - module Collection - - # @!visibility private - class MriMapBackend < NonConcurrentMapBackend - - def initialize(options = nil, &default_proc) - super(options, &default_proc) - @write_lock = Mutex.new - end - - def []=(key, value) - @write_lock.synchronize { super } - end - - def compute_if_absent(key) - if NULL != (stored_value = @backend.fetch(key, NULL)) # fast non-blocking path for the most likely case - stored_value - else - @write_lock.synchronize { super } - end - end - - def compute_if_present(key) - @write_lock.synchronize { super } - end - - def compute(key) - @write_lock.synchronize { super } - end - - def merge_pair(key, value) - @write_lock.synchronize { super } - end - - def replace_pair(key, old_value, new_value) - @write_lock.synchronize { super } - end - - def replace_if_exists(key, new_value) - @write_lock.synchronize { super } - end - - def get_and_set(key, value) - @write_lock.synchronize { super } - end - - def delete(key) - @write_lock.synchronize { super } - end - - def delete_pair(key, value) - @write_lock.synchronize { super } - end - - def clear - @write_lock.synchronize { super } - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/non_concurrent_map_backend.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/non_concurrent_map_backend.rb deleted file mode 100644 index ca5fd9b48e37b..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/non_concurrent_map_backend.rb +++ /dev/null @@ -1,148 +0,0 @@ -require 'concurrent/constants' - -module Concurrent - - # @!visibility private - module Collection - - # @!visibility private - class NonConcurrentMapBackend - - # WARNING: all public methods of the class must operate on the @backend - # directly without calling each other. This is important because of the - # SynchronizedMapBackend which uses a non-reentrant mutex for performance - # reasons. - def initialize(options = nil, &default_proc) - validate_options_hash!(options) if options.kind_of?(::Hash) - set_backend(default_proc) - @default_proc = default_proc - end - - def [](key) - @backend[key] - end - - def []=(key, value) - @backend[key] = value - end - - def compute_if_absent(key) - if NULL != (stored_value = @backend.fetch(key, NULL)) - stored_value - else - @backend[key] = yield - end - end - - def replace_pair(key, old_value, new_value) - if pair?(key, old_value) - @backend[key] = new_value - true - else - false - end - end - - def replace_if_exists(key, new_value) - if NULL != (stored_value = @backend.fetch(key, NULL)) - @backend[key] = new_value - stored_value - end - end - - def compute_if_present(key) - if NULL != (stored_value = @backend.fetch(key, NULL)) - store_computed_value(key, yield(stored_value)) - end - end - - def compute(key) - store_computed_value(key, yield(get_or_default(key, nil))) - end - - def merge_pair(key, value) - if NULL == (stored_value = @backend.fetch(key, NULL)) - @backend[key] = value - else - store_computed_value(key, yield(stored_value)) - end - end - - def get_and_set(key, value) - stored_value = get_or_default(key, nil) - @backend[key] = value - stored_value - end - - def key?(key) - @backend.key?(key) - end - - def delete(key) - @backend.delete(key) - end - - def delete_pair(key, value) - if pair?(key, value) - @backend.delete(key) - true - else - false - end - end - - def clear - @backend.clear - self - end - - def each_pair - dupped_backend.each_pair do |k, v| - yield k, v - end - self - end - - def size - @backend.size - end - - def get_or_default(key, default_value) - @backend.fetch(key, default_value) - end - - private - - def set_backend(default_proc) - if default_proc - @backend = ::Hash.new { |_h, key| default_proc.call(self, key) } - else - @backend = {} - end - end - - def initialize_copy(other) - super - set_backend(@default_proc) - self - end - - def dupped_backend - @backend.dup - end - - def pair?(key, expected_value) - NULL != (stored_value = @backend.fetch(key, NULL)) && expected_value.equal?(stored_value) - end - - def store_computed_value(key, new_value) - if new_value.nil? - @backend.delete(key) - nil - else - @backend[key] = new_value - end - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/synchronized_map_backend.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/synchronized_map_backend.rb deleted file mode 100644 index 190c8d98d906c..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/synchronized_map_backend.rb +++ /dev/null @@ -1,82 +0,0 @@ -require 'concurrent/collection/map/non_concurrent_map_backend' - -module Concurrent - - # @!visibility private - module Collection - - # @!visibility private - class SynchronizedMapBackend < NonConcurrentMapBackend - - require 'mutex_m' - include Mutex_m - # WARNING: Mutex_m is a non-reentrant lock, so the synchronized methods are - # not allowed to call each other. - - def [](key) - synchronize { super } - end - - def []=(key, value) - synchronize { super } - end - - def compute_if_absent(key) - synchronize { super } - end - - def compute_if_present(key) - synchronize { super } - end - - def compute(key) - synchronize { super } - end - - def merge_pair(key, value) - synchronize { super } - end - - def replace_pair(key, old_value, new_value) - synchronize { super } - end - - def replace_if_exists(key, new_value) - synchronize { super } - end - - def get_and_set(key, value) - synchronize { super } - end - - def key?(key) - synchronize { super } - end - - def delete(key) - synchronize { super } - end - - def delete_pair(key, value) - synchronize { super } - end - - def clear - synchronize { super } - end - - def size - synchronize { super } - end - - def get_or_default(key, default_value) - synchronize { super } - end - - private - def dupped_backend - synchronize { super } - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/truffleruby_map_backend.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/truffleruby_map_backend.rb deleted file mode 100644 index 68a1b3884df10..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/map/truffleruby_map_backend.rb +++ /dev/null @@ -1,14 +0,0 @@ -module Concurrent - - # @!visibility private - module Collection - - # @!visibility private - class TruffleRubyMapBackend < TruffleRuby::ConcurrentMap - def initialize(options = nil) - options ||= {} - super(initial_capacity: options[:initial_capacity], load_factor: options[:load_factor]) - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/non_concurrent_priority_queue.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/non_concurrent_priority_queue.rb deleted file mode 100644 index 694cd7ac7cdec..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/non_concurrent_priority_queue.rb +++ /dev/null @@ -1,143 +0,0 @@ -require 'concurrent/utility/engine' -require 'concurrent/collection/java_non_concurrent_priority_queue' -require 'concurrent/collection/ruby_non_concurrent_priority_queue' - -module Concurrent - module Collection - - # @!visibility private - # @!macro internal_implementation_note - NonConcurrentPriorityQueueImplementation = case - when Concurrent.on_jruby? - JavaNonConcurrentPriorityQueue - else - RubyNonConcurrentPriorityQueue - end - private_constant :NonConcurrentPriorityQueueImplementation - - # @!macro priority_queue - # - # A queue collection in which the elements are sorted based on their - # comparison (spaceship) operator `<=>`. Items are added to the queue - # at a position relative to their priority. On removal the element - # with the "highest" priority is removed. By default the sort order is - # from highest to lowest, but a lowest-to-highest sort order can be - # set on construction. - # - # The API is based on the `Queue` class from the Ruby standard library. - # - # The pure Ruby implementation, `RubyNonConcurrentPriorityQueue` uses a heap algorithm - # stored in an array. The algorithm is based on the work of Robert Sedgewick - # and Kevin Wayne. - # - # The JRuby native implementation is a thin wrapper around the standard - # library `java.util.NonConcurrentPriorityQueue`. - # - # When running under JRuby the class `NonConcurrentPriorityQueue` extends `JavaNonConcurrentPriorityQueue`. - # When running under all other interpreters it extends `RubyNonConcurrentPriorityQueue`. - # - # @note This implementation is *not* thread safe. - # - # @see http://en.wikipedia.org/wiki/Priority_queue - # @see http://ruby-doc.org/stdlib-2.0.0/libdoc/thread/rdoc/Queue.html - # - # @see http://algs4.cs.princeton.edu/24pq/index.php#2.6 - # @see http://algs4.cs.princeton.edu/24pq/MaxPQ.java.html - # - # @see http://docs.oracle.com/javase/7/docs/api/java/util/PriorityQueue.html - # - # @!visibility private - class NonConcurrentPriorityQueue < NonConcurrentPriorityQueueImplementation - - alias_method :has_priority?, :include? - - alias_method :size, :length - - alias_method :deq, :pop - alias_method :shift, :pop - - alias_method :<<, :push - alias_method :enq, :push - - # @!method initialize(opts = {}) - # @!macro priority_queue_method_initialize - # - # Create a new priority queue with no items. - # - # @param [Hash] opts the options for creating the queue - # @option opts [Symbol] :order (:max) dictates the order in which items are - # stored: from highest to lowest when `:max` or `:high`; from lowest to - # highest when `:min` or `:low` - - # @!method clear - # @!macro priority_queue_method_clear - # - # Removes all of the elements from this priority queue. - - # @!method delete(item) - # @!macro priority_queue_method_delete - # - # Deletes all items from `self` that are equal to `item`. - # - # @param [Object] item the item to be removed from the queue - # @return [Object] true if the item is found else false - - # @!method empty? - # @!macro priority_queue_method_empty - # - # Returns `true` if `self` contains no elements. - # - # @return [Boolean] true if there are no items in the queue else false - - # @!method include?(item) - # @!macro priority_queue_method_include - # - # Returns `true` if the given item is present in `self` (that is, if any - # element == `item`), otherwise returns false. - # - # @param [Object] item the item to search for - # - # @return [Boolean] true if the item is found else false - - # @!method length - # @!macro priority_queue_method_length - # - # The current length of the queue. - # - # @return [Fixnum] the number of items in the queue - - # @!method peek - # @!macro priority_queue_method_peek - # - # Retrieves, but does not remove, the head of this queue, or returns `nil` - # if this queue is empty. - # - # @return [Object] the head of the queue or `nil` when empty - - # @!method pop - # @!macro priority_queue_method_pop - # - # Retrieves and removes the head of this queue, or returns `nil` if this - # queue is empty. - # - # @return [Object] the head of the queue or `nil` when empty - - # @!method push(item) - # @!macro priority_queue_method_push - # - # Inserts the specified element into this priority queue. - # - # @param [Object] item the item to insert onto the queue - - # @!method self.from_list(list, opts = {}) - # @!macro priority_queue_method_from_list - # - # Create a new priority queue from the given list. - # - # @param [Enumerable] list the list to build the queue from - # @param [Hash] opts the options for creating the queue - # - # @return [NonConcurrentPriorityQueue] the newly created and populated queue - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/ruby_non_concurrent_priority_queue.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/ruby_non_concurrent_priority_queue.rb deleted file mode 100644 index 322b4ac2d95ae..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/collection/ruby_non_concurrent_priority_queue.rb +++ /dev/null @@ -1,160 +0,0 @@ -module Concurrent - module Collection - - # @!macro priority_queue - # - # @!visibility private - # @!macro internal_implementation_note - class RubyNonConcurrentPriorityQueue - - # @!macro priority_queue_method_initialize - def initialize(opts = {}) - order = opts.fetch(:order, :max) - @comparator = [:min, :low].include?(order) ? -1 : 1 - clear - end - - # @!macro priority_queue_method_clear - def clear - @queue = [nil] - @length = 0 - true - end - - # @!macro priority_queue_method_delete - def delete(item) - return false if empty? - original_length = @length - k = 1 - while k <= @length - if @queue[k] == item - swap(k, @length) - @length -= 1 - sink(k) || swim(k) - @queue.pop - else - k += 1 - end - end - @length != original_length - end - - # @!macro priority_queue_method_empty - def empty? - size == 0 - end - - # @!macro priority_queue_method_include - def include?(item) - @queue.include?(item) - end - alias_method :has_priority?, :include? - - # @!macro priority_queue_method_length - def length - @length - end - alias_method :size, :length - - # @!macro priority_queue_method_peek - def peek - empty? ? nil : @queue[1] - end - - # @!macro priority_queue_method_pop - def pop - return nil if empty? - max = @queue[1] - swap(1, @length) - @length -= 1 - sink(1) - @queue.pop - max - end - alias_method :deq, :pop - alias_method :shift, :pop - - # @!macro priority_queue_method_push - def push(item) - raise ArgumentError.new('cannot enqueue nil') if item.nil? - @length += 1 - @queue << item - swim(@length) - true - end - alias_method :<<, :push - alias_method :enq, :push - - # @!macro priority_queue_method_from_list - def self.from_list(list, opts = {}) - queue = new(opts) - list.each{|item| queue << item } - queue - end - - private - - # Exchange the values at the given indexes within the internal array. - # - # @param [Integer] x the first index to swap - # @param [Integer] y the second index to swap - # - # @!visibility private - def swap(x, y) - temp = @queue[x] - @queue[x] = @queue[y] - @queue[y] = temp - end - - # Are the items at the given indexes ordered based on the priority - # order specified at construction? - # - # @param [Integer] x the first index from which to retrieve a comparable value - # @param [Integer] y the second index from which to retrieve a comparable value - # - # @return [Boolean] true if the two elements are in the correct priority order - # else false - # - # @!visibility private - def ordered?(x, y) - (@queue[x] <=> @queue[y]) == @comparator - end - - # Percolate down to maintain heap invariant. - # - # @param [Integer] k the index at which to start the percolation - # - # @!visibility private - def sink(k) - success = false - - while (j = (2 * k)) <= @length do - j += 1 if j < @length && ! ordered?(j, j+1) - break if ordered?(k, j) - swap(k, j) - success = true - k = j - end - - success - end - - # Percolate up to maintain heap invariant. - # - # @param [Integer] k the index at which to start the percolation - # - # @!visibility private - def swim(k) - success = false - - while k > 1 && ! ordered?(k/2, k) do - swap(k, k/2) - k = k/2 - success = true - end - - success - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/deprecation.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/deprecation.rb deleted file mode 100644 index 35ae4b2c9df68..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/deprecation.rb +++ /dev/null @@ -1,34 +0,0 @@ -require 'concurrent/concern/logging' - -module Concurrent - module Concern - - # @!visibility private - # @!macro internal_implementation_note - module Deprecation - # TODO require additional parameter: a version. Display when it'll be removed based on that. Error if not removed. - include Concern::Logging - - def deprecated(message, strip = 2) - caller_line = caller(strip).first if strip > 0 - klass = if Module === self - self - else - self.class - end - message = if strip > 0 - format("[DEPRECATED] %s\ncalled on: %s", message, caller_line) - else - format('[DEPRECATED] %s', message) - end - log WARN, klass.to_s, message - end - - def deprecated_method(old_name, new_name) - deprecated "`#{old_name}` is deprecated and it'll removed in next release, use `#{new_name}` instead", 3 - end - - extend self - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/dereferenceable.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/dereferenceable.rb deleted file mode 100644 index dc172ba74d93d..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/dereferenceable.rb +++ /dev/null @@ -1,73 +0,0 @@ -module Concurrent - module Concern - - # Object references in Ruby are mutable. This can lead to serious problems when - # the `#value` of a concurrent object is a mutable reference. Which is always the - # case unless the value is a `Fixnum`, `Symbol`, or similar "primitive" data type. - # Most classes in this library that expose a `#value` getter method do so using the - # `Dereferenceable` mixin module. - # - # @!macro copy_options - module Dereferenceable - # NOTE: This module is going away in 2.0. In the mean time we need it to - # play nicely with the synchronization layer. This means that the - # including class SHOULD be synchronized and it MUST implement a - # `#synchronize` method. Not doing so will lead to runtime errors. - - # Return the value this object represents after applying the options specified - # by the `#set_deref_options` method. - # - # @return [Object] the current value of the object - def value - synchronize { apply_deref_options(@value) } - end - alias_method :deref, :value - - protected - - # Set the internal value of this object - # - # @param [Object] value the new value - def value=(value) - synchronize{ @value = value } - end - - # @!macro dereferenceable_set_deref_options - # Set the options which define the operations #value performs before - # returning data to the caller (dereferencing). - # - # @note Most classes that include this module will call `#set_deref_options` - # from within the constructor, thus allowing these options to be set at - # object creation. - # - # @param [Hash] opts the options defining dereference behavior. - # @option opts [String] :dup_on_deref (false) call `#dup` before returning the data - # @option opts [String] :freeze_on_deref (false) call `#freeze` before returning the data - # @option opts [String] :copy_on_deref (nil) call the given `Proc` passing - # the internal value and returning the value returned from the proc - def set_deref_options(opts = {}) - synchronize{ ns_set_deref_options(opts) } - end - - # @!macro dereferenceable_set_deref_options - # @!visibility private - def ns_set_deref_options(opts) - @dup_on_deref = opts[:dup_on_deref] || opts[:dup] - @freeze_on_deref = opts[:freeze_on_deref] || opts[:freeze] - @copy_on_deref = opts[:copy_on_deref] || opts[:copy] - @do_nothing_on_deref = !(@dup_on_deref || @freeze_on_deref || @copy_on_deref) - nil - end - - # @!visibility private - def apply_deref_options(value) - return nil if value.nil? - return value if @do_nothing_on_deref - value = @copy_on_deref.call(value) if @copy_on_deref - value = value.dup if @dup_on_deref - value = value.freeze if @freeze_on_deref - value - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/logging.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/logging.rb deleted file mode 100644 index 568a539ebf0b8..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/logging.rb +++ /dev/null @@ -1,116 +0,0 @@ -require 'logger' -require 'concurrent/atomic/atomic_reference' - -module Concurrent - module Concern - - # Include where logging is needed - # - # @!visibility private - module Logging - include Logger::Severity - - # Logs through {Concurrent.global_logger}, it can be overridden by setting @logger - # @param [Integer] level one of Logger::Severity constants - # @param [String] progname e.g. a path of an Actor - # @param [String, nil] message when nil block is used to generate the message - # @yieldreturn [String] a message - def log(level, progname, message = nil, &block) - logger = if defined?(@logger) && @logger - @logger - else - Concurrent.global_logger - end - logger.call level, progname, message, &block - rescue => error - $stderr.puts "`Concurrent.configuration.logger` failed to log #{[level, progname, message, block]}\n" + - "#{error.message} (#{error.class})\n#{error.backtrace.join "\n"}" - end - end - end -end - -module Concurrent - extend Concern::Logging - - # @return [Logger] Logger with provided level and output. - def self.create_simple_logger(level = Logger::FATAL, output = $stderr) - # TODO (pitr-ch 24-Dec-2016): figure out why it had to be replaced, stdlogger was deadlocking - lambda do |severity, progname, message = nil, &block| - return false if severity < level - - message = block ? block.call : message - formatted_message = case message - when String - message - when Exception - format "%s (%s)\n%s", - message.message, message.class, (message.backtrace || []).join("\n") - else - message.inspect - end - - output.print format "[%s] %5s -- %s: %s\n", - Time.now.strftime('%Y-%m-%d %H:%M:%S.%L'), - Logger::SEV_LABEL[severity], - progname, - formatted_message - true - end - end - - # Use logger created by #create_simple_logger to log concurrent-ruby messages. - def self.use_simple_logger(level = Logger::FATAL, output = $stderr) - Concurrent.global_logger = create_simple_logger level, output - end - - # @return [Logger] Logger with provided level and output. - # @deprecated - def self.create_stdlib_logger(level = Logger::FATAL, output = $stderr) - logger = Logger.new(output) - logger.level = level - logger.formatter = lambda do |severity, datetime, progname, msg| - formatted_message = case msg - when String - msg - when Exception - format "%s (%s)\n%s", - msg.message, msg.class, (msg.backtrace || []).join("\n") - else - msg.inspect - end - format "[%s] %5s -- %s: %s\n", - datetime.strftime('%Y-%m-%d %H:%M:%S.%L'), - severity, - progname, - formatted_message - end - - lambda do |loglevel, progname, message = nil, &block| - logger.add loglevel, message, progname, &block - end - end - - # Use logger created by #create_stdlib_logger to log concurrent-ruby messages. - # @deprecated - def self.use_stdlib_logger(level = Logger::FATAL, output = $stderr) - Concurrent.global_logger = create_stdlib_logger level, output - end - - # TODO (pitr-ch 27-Dec-2016): remove deadlocking stdlib_logger methods - - # Suppresses all output when used for logging. - NULL_LOGGER = lambda { |level, progname, message = nil, &block| } - - # @!visibility private - GLOBAL_LOGGER = AtomicReference.new(create_simple_logger(Logger::WARN)) - private_constant :GLOBAL_LOGGER - - def self.global_logger - GLOBAL_LOGGER.value - end - - def self.global_logger=(value) - GLOBAL_LOGGER.value = value - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/obligation.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/obligation.rb deleted file mode 100644 index 2c9ac12003502..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/obligation.rb +++ /dev/null @@ -1,220 +0,0 @@ -require 'thread' -require 'timeout' - -require 'concurrent/atomic/event' -require 'concurrent/concern/dereferenceable' - -module Concurrent - module Concern - - module Obligation - include Concern::Dereferenceable - # NOTE: The Dereferenceable module is going away in 2.0. In the mean time - # we need it to place nicely with the synchronization layer. This means - # that the including class SHOULD be synchronized and it MUST implement a - # `#synchronize` method. Not doing so will lead to runtime errors. - - # Has the obligation been fulfilled? - # - # @return [Boolean] - def fulfilled? - state == :fulfilled - end - alias_method :realized?, :fulfilled? - - # Has the obligation been rejected? - # - # @return [Boolean] - def rejected? - state == :rejected - end - - # Is obligation completion still pending? - # - # @return [Boolean] - def pending? - state == :pending - end - - # Is the obligation still unscheduled? - # - # @return [Boolean] - def unscheduled? - state == :unscheduled - end - - # Has the obligation completed processing? - # - # @return [Boolean] - def complete? - [:fulfilled, :rejected].include? state - end - - # Is the obligation still awaiting completion of processing? - # - # @return [Boolean] - def incomplete? - ! complete? - end - - # The current value of the obligation. Will be `nil` while the state is - # pending or the operation has been rejected. - # - # @param [Numeric] timeout the maximum time in seconds to wait. - # @return [Object] see Dereferenceable#deref - def value(timeout = nil) - wait timeout - deref - end - - # Wait until obligation is complete or the timeout has been reached. - # - # @param [Numeric] timeout the maximum time in seconds to wait. - # @return [Obligation] self - def wait(timeout = nil) - event.wait(timeout) if timeout != 0 && incomplete? - self - end - - # Wait until obligation is complete or the timeout is reached. Will re-raise - # any exceptions raised during processing (but will not raise an exception - # on timeout). - # - # @param [Numeric] timeout the maximum time in seconds to wait. - # @return [Obligation] self - # @raise [Exception] raises the reason when rejected - def wait!(timeout = nil) - wait(timeout).tap { raise self if rejected? } - end - alias_method :no_error!, :wait! - - # The current value of the obligation. Will be `nil` while the state is - # pending or the operation has been rejected. Will re-raise any exceptions - # raised during processing (but will not raise an exception on timeout). - # - # @param [Numeric] timeout the maximum time in seconds to wait. - # @return [Object] see Dereferenceable#deref - # @raise [Exception] raises the reason when rejected - def value!(timeout = nil) - wait(timeout) - if rejected? - raise self - else - deref - end - end - - # The current state of the obligation. - # - # @return [Symbol] the current state - def state - synchronize { @state } - end - - # If an exception was raised during processing this will return the - # exception object. Will return `nil` when the state is pending or if - # the obligation has been successfully fulfilled. - # - # @return [Exception] the exception raised during processing or `nil` - def reason - synchronize { @reason } - end - - # @example allows Obligation to be risen - # rejected_ivar = Ivar.new.fail - # raise rejected_ivar - def exception(*args) - raise 'obligation is not rejected' unless rejected? - reason.exception(*args) - end - - protected - - # @!visibility private - def get_arguments_from(opts = {}) - [*opts.fetch(:args, [])] - end - - # @!visibility private - def init_obligation - @event = Event.new - @value = @reason = nil - end - - # @!visibility private - def event - @event - end - - # @!visibility private - def set_state(success, value, reason) - if success - @value = value - @state = :fulfilled - else - @reason = reason - @state = :rejected - end - end - - # @!visibility private - def state=(value) - synchronize { ns_set_state(value) } - end - - # Atomic compare and set operation - # State is set to `next_state` only if `current state == expected_current`. - # - # @param [Symbol] next_state - # @param [Symbol] expected_current - # - # @return [Boolean] true is state is changed, false otherwise - # - # @!visibility private - def compare_and_set_state(next_state, *expected_current) - synchronize do - if expected_current.include? @state - @state = next_state - true - else - false - end - end - end - - # Executes the block within mutex if current state is included in expected_states - # - # @return block value if executed, false otherwise - # - # @!visibility private - def if_state(*expected_states) - synchronize do - raise ArgumentError.new('no block given') unless block_given? - - if expected_states.include? @state - yield - else - false - end - end - end - - protected - - # Am I in the current state? - # - # @param [Symbol] expected The state to check against - # @return [Boolean] true if in the expected state else false - # - # @!visibility private - def ns_check_state?(expected) - @state == expected - end - - # @!visibility private - def ns_set_state(value) - @state = value - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/observable.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/observable.rb deleted file mode 100644 index b5132714bfbbc..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concern/observable.rb +++ /dev/null @@ -1,110 +0,0 @@ -require 'concurrent/collection/copy_on_notify_observer_set' -require 'concurrent/collection/copy_on_write_observer_set' - -module Concurrent - module Concern - - # The [observer pattern](http://en.wikipedia.org/wiki/Observer_pattern) is one - # of the most useful design patterns. - # - # The workflow is very simple: - # - an `observer` can register itself to a `subject` via a callback - # - many `observers` can be registered to the same `subject` - # - the `subject` notifies all registered observers when its status changes - # - an `observer` can deregister itself when is no more interested to receive - # event notifications - # - # In a single threaded environment the whole pattern is very easy: the - # `subject` can use a simple data structure to manage all its subscribed - # `observer`s and every `observer` can react directly to every event without - # caring about synchronization. - # - # In a multi threaded environment things are more complex. The `subject` must - # synchronize the access to its data structure and to do so currently we're - # using two specialized ObserverSet: {Concurrent::Concern::CopyOnWriteObserverSet} - # and {Concurrent::Concern::CopyOnNotifyObserverSet}. - # - # When implementing and `observer` there's a very important rule to remember: - # **there are no guarantees about the thread that will execute the callback** - # - # Let's take this example - # ``` - # class Observer - # def initialize - # @count = 0 - # end - # - # def update - # @count += 1 - # end - # end - # - # obs = Observer.new - # [obj1, obj2, obj3, obj4].each { |o| o.add_observer(obs) } - # # execute [obj1, obj2, obj3, obj4] - # ``` - # - # `obs` is wrong because the variable `@count` can be accessed by different - # threads at the same time, so it should be synchronized (using either a Mutex - # or an AtomicFixum) - module Observable - - # @!macro observable_add_observer - # - # Adds an observer to this set. If a block is passed, the observer will be - # created by this method and no other params should be passed. - # - # @param [Object] observer the observer to add - # @param [Symbol] func the function to call on the observer during notification. - # Default is :update - # @return [Object] the added observer - def add_observer(observer = nil, func = :update, &block) - observers.add_observer(observer, func, &block) - end - - # As `#add_observer` but can be used for chaining. - # - # @param [Object] observer the observer to add - # @param [Symbol] func the function to call on the observer during notification. - # @return [Observable] self - def with_observer(observer = nil, func = :update, &block) - add_observer(observer, func, &block) - self - end - - # @!macro observable_delete_observer - # - # Remove `observer` as an observer on this object so that it will no - # longer receive notifications. - # - # @param [Object] observer the observer to remove - # @return [Object] the deleted observer - def delete_observer(observer) - observers.delete_observer(observer) - end - - # @!macro observable_delete_observers - # - # Remove all observers associated with this object. - # - # @return [Observable] self - def delete_observers - observers.delete_observers - self - end - - # @!macro observable_count_observers - # - # Return the number of observers associated with this object. - # - # @return [Integer] the observers count - def count_observers - observers.count_observers - end - - protected - - attr_accessor :observers - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concurrent_ruby.jar b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/concurrent_ruby.jar deleted file mode 100644 index a4bda412818117a8592d49e1f5f50b5e887aab10..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 135855 zcmcG$WpG?;vMp$f(PCz1W?5jdO3cj6%#0yd)j<Z( z*1iM1gfuNcdd9g74Ca%_E!+ee>skadiesM_TE>l|qvQ>U|?8(Qf*ItD9h%c2ON^3GLTHwP=g z`b4P6uLfu%6OfT7i8qwS$q5lNn7DBF&0ol{SOLatuLm&zYXbyZ(aam_<4-xm{Tp1S z4pZx?o)<3GI=#<0|DYqQw4`l(;9Hy4TnG8G>F*Cj%}X+@H~sJiR)Rc=&8LnBP}-fw9rWJaa~ zvTopucA2>(PFrEDr$P!^lf=J73P8zz?}-7P1%D^nwxp_Fg5d!tB9wT`ysFMDc$0Wu zDh)5`lg~=jIpGxsf4rlDVqK2JvNKC}Uy=v9Q$|I8`AOAm-KI5N-|5j$p-s;>VwVD= z^0q7uBwLlW9q$*iT_gY?F*Uq%8W-sofE<4x5w1>Oa@S$}xrov^yK%ZcFO?Q%zw>mh zTSd_Dd!~nZf}&p;djYO;k8BWU;9`2zY@&IprwcKMDo0J6-MS-qm9)0Bco-SPgNgaq zRE+kNj|jbJmR+v_g=nnZhyjOas@>pDFlIzhIXc5&2~wAVJ?}l&>n1gBIGNJc?98Yd zNkbU8)l9itFf|^^u>1wJw`;uQ+B5hcCtlsm?0o$E>5~`e-+2)GhX)zjS~D2_mlx?e ze7F&Vv6~ZvfRnAYxuJrwiSdVb85&EO8#w4YxRWydO%?lch8)yO| z1v++U1myv!!4j2ngCl?bc|9l27+Q^)sSclY6Y z=nv{Kdr|@g(V?ZdQydu2^?*d36p0Oxa3eb!fkes+ivjut)QKg**19RMQwUHaZUQ;2 zrXv+-ewB0AA-o`N=z6WUySLhqCMam2{iqdbFjl%=Zl-O}xD$SkqrRX2K8n%gr ztm9ku*Zd?v5xUA&G}l9-dV*v1c*25SndiHP7||e{5J#xs59Ey%s3%H|>Q#;t8ukvR zSBxV)KBGZS#z~(N=leZ?aqPueG;A2;1d~rnVTJ)E8N(ryst~$J@yDtY{M=p+A66~> zh)@4HF8ohemHFSS+Ptn&zVHk$Fc{*IOY=mm1V#muSe8TlK-z9zLA44tTd~wVTO1HT zuq75&uZbj2z#7wmZIyOYHyLshP_mYpG=1n{qa4;Y?tfdFlFTJu)@$ol($UKH2|Dx_RyTmlyTfw=201 zv*N>gmgCTyz}m$cF739R+gG|79c6XOxD9sR`AjuCpeA32UC5mfi02k1FpgLXQxv5d zF1=7ppbIh4ufux(ynT{2QF~fWdz1*7Qo93_%XpkBo7VBjpnC zjI2R*-%S`1sN4Ab$F3#um*q|$b}j!~y9)jH(JJvLEBqIR|1xr=in=4JGU}U+$?(Ok%5d-ODn%~Xvx*N2^ zf3=6(3eeI?s28BZQLv+u8L3H#95CmiPBwIK;fliCNUp!aBxfwuYr~$f_X?$PxQ`4d zhv5r1WZ+Y|4Er`dbLb@2?9kS8rN5dg%fLB!cHW@az+)&cYZ?JO#!~UH%b2Y_3&^@* z2AL_H3ES}Z@o&>_HY>z{=Sa?~)3TN+b~tPst8L{{y$2gWlx~9?yPSj~jZg0ruTHIV z&Ms}WA!nmQyr{n!Xmnhvxt-ZDP;n)1tN;MCo|swZTKWg&>Od|=PNS;!pRLB`Y~54m zn^+_xljrAvmW)c(#>-g;SWWvdXPbK>`NX`W(qc}(+A@oeCop!G$Rp4D;U~tPisDwA zp)^;BBhb}n$BBnxe7_~r2)z3a=lS^AWa&)m(3(Z@m8(Ch^RL`jSIAZ4I-ds>@7qM0 zfoGe+n1H)j_jXsHfmx{CeRt|3+rH4#txn9I=ABh`jm!wJ1o^4W#X{&yN#%~0T@xWA zL%>Lye1GThJ1-VT*D_6?sP;%D3a+ki7^{{wjQ>7G*w^#ECS9!(aS#1OPKYIM#vx-uwSkI%vG3rYHM zZ^js;)oX$?nm2u8cA)M(eYvJ0ajed#CUZ?AGyn%NXucg--eEUp2dEia0;V~^?GY6@u?ryHqrrCW6l%gkB{moV`dmB zQ|M$^G0|}yF0GVgBAdQ)_~M>~1Lf^dcP8wrGnGo~=+iSj(eds1K$x6osOG@^vvuf$ zGi$2W%H|}@{IXBoo5OivEB~WbAZmA^box)D%uZTcnmVwMM}nBGDPbs6V@t;c()3M& z<#${|#A}(YXXmPotw}_&!otRL=mAg}@ZfK_%3j%GsILjQz4BFs0@z5pbC-z9bO=Zm z-jSEo#13jyEcgRo;I=VM^RcYKTUDXl&Q8=;O z+ChTf{C1@Z+gR3qLmJ%6_3t$C3wV(s=}0ztqp_9C8D~)Q%kcJj>5^R2TDY@uT=V(F z$616Hn<+i;-&@_H&~F^<%zH>POl!3P8Y=Q!SVMnKjxMYT6?pHzb^9g`)-oe)m%g$XKe43w?=FGi6003&=d3 z{iH%nY-y)qu5^*e+8+8V+1U%Pg0p6T&^335#o#3)=q+I2H7?{GP4bP+;}Rb0i4O2P zGuoS>PZr6zGAmD@x7}ymz_*^D>6T;vCu!Zx0$23*^**!CBEYX@%vi4xb%RjUX(5_Fi{CfJwb; zI=`>l;6K^cY9JCU8IPabMDP$IKWG|G{5Zu2jxq86j~IW92`qO6|MUqB?(cH9@c(Lz z=lmCrw>^l{? zMd%F%z5Rp?Igm;aoT4oe9ETrXQ;79^%**fjgWt#J6=Dlog|{W8x~EqV37uE#2O5T| zww9~CWRH7QLtQ#Zqee`cKW-Wicbq3Kw&C*v<#?UwYhsTS&3WuMHn}vHYf|ER0#OoC z`Y*#SLhGj{M<{-gzU9%FrXPLPGl0i58N!*1K?m3FWcM8a5bN?K3e3t0md-6kE=- zYXc31;it50)@dbf(N9L=L#QRgK?i1rm|f7`THgkTtk$ z3Ua(Z=2w`C7WoYizMAl7!Qf5yKqgHk+9bqv!$F4_fVYEMd*yny^+22}=yg##bV zz~*mxyug3oYm{vq^-YY$ZLN%q9sWuF6gB_Ui*H4A>n`gO=E@Cdmmo_KPVyZ*2`PvP zW+f?VR{|&#PTZ9dtJ=)%Y?u6z2s-Z|KfuBeewg|uT%Fl~F6l)w9FBWAPP%rbIvyNW z+irfQ-71QLgP}ro&KMMiiTBT*wf}Mm>L}1J0zbzMH(oF@Zb4~Y!d&AFtOYK*j%V#d zJFZ{3i7jBoMI;$MHe>D-UK;`j>Q4F8x{-m6q5o+VNF(xsT;)f@YU{!{l-3QlU*Pa-X1q zz2FkC`xzC;WQM0&d4*q^NyYX>CcUdJ!=`f$QzEB$(xuQ>6DFNzS*NyO-6Z8%tSk;M z{UlwVQ$F&2KlJGc;0u5{c2`*%5C| zCa0;R)EMD|_RKg0vSJBJy}+MwAr3~!g)Z4{bBY6}O%pIl_){a!iMljK$Xu9aOX8$e zmgyS^#Lp}0G8yarh-XnV5f%Y;wj*66RKLPbK{I^{5Pm}!=V{2Yh0ExB=`JV@gP3_v zcW*7#)NlxUr+G!%C=*qqSZpDWQoca^@m1vx$5h@A20Qn+zRLRF_SJtzG-Yc=6abma=qsoRUAIP%nwGNvu}Q|^xQCD;V+_cUZxq{|9fMz!PKbE;q^^f^lM zz*|ifYAs^MNhaNVm!tBO-bAKp3gXX#?P#-ZtY<$9w|)AMgCJr7;Go~x9t{AC<}`w==q zx~7_@aHYWZKSVc@M+n;D(7vl&&*LnEf}B}qT#ObNoU2rL{w%6!jJ3II#)-a|`+fU! zC+k_M+#p0_yZuz@$B~vN*QIrHQYNd%jNB^xJU7~JF_Ug}aY>baB%ziyhkoI4d|KX4 z-`ryLg(2@#TM024oMY^zRTyX zIOfUNongi(fXq8;oDkex0|CiiM38f7<_a#;@UJ6Cv(;aW*XUsgn1#zT&6sBeWkDt= zqztoGzyoj89?TJ{0sF`R2}3^+DS1CbOmg?Qh5S%HI^=iIKVXJYmW;Rj05Ub?-_e!7 zlN$a}drIru3F;eK8rvBC2Lb7Lu(EOEWS!V8Sf#N1L5<2xzN7($IzTpV9hlZ8O}dWu ztoo!RcM1AJY436xsPqq|0qcO2<>u|`8bS}Dl{GGMhM#$d)Gjvu&{(`16SKG>XCM;K&zLbe6Q#gXeW`vhA&-!D z;JL>nx{&Y8&6vv1=(DzcG3tcXK8;Y@PJ^THQ(W2L9miqD`8f+cSnU0yR2==i!vkA-`i`}(LM%8lx}1Kp7auA8zQSsmy50E3PtgY7B-pNLRH|= zcmR%sQH`j^j^^?{q9*oJOi_nH>doz>G38}$fozGHjzuH&E5>VptOJz-oUUwg=5n{i z)}g6surg=seG(+vNrr%uqji~ey;at{$wJ4l>p)!OD#wzCM|`c9#eJTG7DG%~2X$Y# z>MT=mM-=|r{0z9Z7)RCVQ9sKE;mR`n2+njw+!uM-xE^e>Snfb8$9%SN=5ejEWo#YB zab0N?AqJ%RJUOf333waGf!>SMF^X76=T3iL9?tjPu2X9K9?AEsb>%4fHS> z9AU9?A2Ck@Gb$3%RA?-Ms)U%UjH<}=c_)y; zs2y*$bgbz@WWSfcCq0bs0oLm$7~|6ib0&WyC&^O31W8aEDEeyZ@cZkbm+3)9>g&^M zzu2e1l<*uZdoI$w7^U1?SrH5c>e(aJ8R$+dqzpyI9OSKz00D3eR5&JKd>j>8TjlW( zxFJWh;*w-M{_c{mUFTuoRi=C8NI;V?7MiA^Y!5hIwirjU5S-vM?jYK z({rqV4W=@zey`zkkU)0h)wK}~;Hg8hK98viy3@7oQ&hjTEuWoq)Gb{SSF9_XDN~aO z-R^f7O*cchsU1(}rd{qAw(|jApk0%~R)V9^7hSnBBoZ zE4%cRu;;*`dr+iGqeZ8dCxa_ARv|{EQ1qDuJQ+<(zXf$F@%s?a2FpeM9m^w~yjELe zpM@s8Y5Orsih+dOJAK;`-uA1t__8wu5ZIFSdUuhuofN7^YsdcNi%}-Cgh^I3sp2)X z7cXd|C3fC+oa{a;x>+M#z5~SNf9pSWn&AEYr zs2wMsUOGG})!b`jAaIh6ZB~xWZ=TB)1dw+eI+XOX5RJo~sm{2uQSavTGeN7}>O#fX zsZy{JL`a(==?pM7XeQ-rs0cwLmq*=nBCojE!8n&id6EMWLVu1lN;M$gPH@4Ph+LCj&p|hMavQ7bR&bF5)WID`4Nk*F# z!V0$)g5LFXiHsf~^u+Aq`7e8gv%P)_{|lb^S?V31$r2>zGFiBuGg&rM9Vf?qp0BP@{P?7V5k&;Q z0JsX2%#KuM!WzQiJd`qm@I}BdP;bk?T1s+cd&$6G;bKMl!!auA9z%2KbX_E#I(E^L zoQCSPYkThGA1y+OsO?RbT^<#ax1Hek&NBMH8`!`LqU*cLDD5^J5ccPOode5crOpRF zhOUZ0Sobxx#@9z$B&uSGUYocJP`no%a;~`$JXDRucJ(qdz(j4+4sw+2d-V6A+aztP z8&pp=t%X?k%WN1z?T?DcFdYU@6n6xJxSD(q5J`i`SEx!&~VP1+~`*r>=qj)%rsw`7s@ z1Sdkd*etp>%-Hk&draMDhe8mkvhw1P6lfZXF;<>#)_7=MRC9zjIgIyT|fy)slnb{aQ|vk@qYGDiWaP<4`?#eR#3UwdmJT%WrTvh zV5k{N(~pV{Z^iIc=H^d$OxR*aVe5oC#XdJD#5&ABekk3W3eWxnmQ)FdGS449@{2M0 z3=%9smdV{eumrF`Obc4F?=VjW?Jows|J6F~KwCv2`GBS6|7lpV{sqfMm35gvy3?Pe z39BNEB!@`1qN@TX0FG2bm=XYu$3@73t&>`%9P%zhd{`-rhZO2NRCaF+pP(&@rHcn>;| zmb@HIvL0n{UKkIwTPCn1IGTi}>jMxWN9tcjckaarIt!U`=iwq%(W9!MaE-{O2iD2_ zj#%8IgamuIHb!yjh;=3Qj(M1QY}G8d-5f&GRQZe+DX?pkjArpo6PX3Zs1Y5*j2X-~ z5!{a)k{?Oab<*)8Y2qbO#k#?-O?fYIoDB;>_%dg2qdH0~ zd9^j9987|eSjQHvP@+~C1B+u&FHz*Uocfy9L70%SDL{Fkin8u50n->wgNw z#hkJcb zAq7W)6ACdLoNf@CVruv`=af((a$)qS)r51kHotsaz2XfnDS_ z8LDX7z*pvyYppU;e-0*#|Ha{}z~k*FpHVLhWBA`M+KM3zmcG&|bL8Snu6N@c{Pt zgQL`jhG3m^>DfY8^Oe~N_T1GXbL-gzxfAB)Owu_Ulm0WCRa5~aBoS~hgz&8}l?IEk zSazk{64ZUUUQo2lZ!)^rfm#;*-k}?|44)|R=14y`u zb{xK&y94BFW2+a2Wt}p({q_{ZN4u@hu#5;4*NFRXOcd0YoKrkR+h1jr)Co2dFL^0=7KY-Hvm6X+@28vUU$l!5_GMzfgDx7?4&vfg7Q4YYIRkGFz@L-q1iO^ z+rwWWyP2t`vy{H3vvx#N7vs`;NeVU@%E*;!9Yhj4AeLU1F%NPVn0r#|Cot|l|H?FV zQBrI6=YF`ni zHkx%^cQp>qfO?Eas-;z)&etSIQ~&q8oIKm;vemni*{Za~eH&=Xh9J+gBfbzkMmAu}UtKHpUFs+=oQDiQSMugG zhyb`i$O-^y3Xqf9{5BYgCYsiq$ZISx8S)b6*PnnHvI31ARq&5WS=}Geo zWb{te#NS-MC*4{m(2A~Cix{G&!g9~6eVY;bJcr^aWT9q!N>u~Vbn$gEFYt?sg-i@Y zklOaBR)v=`w985c#E{WZK8rN8Z*F^d8rE&VjImsiLiPFLuJ*t^n2PrRRM5N&89wECej5iXsh)D+bi}( z`7K>MxYb}7G#;%$R*@Z1xyr?su8b^vUm^_NhO{QTn$eG@z0DwFzfKidVWf#wF|+SH zpvJf`(&a^2?@nCoPJFFmb9N@?lEc5zw8#`IS&xFn@oT5!EUFK)AYo-9MeZoZm1+uXZI=>- zbrI*J+ud{%`jOruFN)!${z*U`N?@d6NfZ_=(S}UC!6=pvw{94z6;-nji44_DHJ~<> z2o|shAl5~fktc*zM;$c?po|l)rIipa?dgad*F;AdBvqF}kEcY>UIri67LE(2s!O3B zoyu4;P?y4BnaY?%#!~$bnORtuLS{0yqh+5(dl1@MmqKwcX5eo(k{5u}Ou4VDU_us6 zEs_|CW7kewHYf;iGqUeeBaImJnv)z*0MMRt%(+RydP14o;HhJPhvL>J4>|~>(71h! zQ)m&YjI@Z%4;wV9kwi2G1+s&Ilfy^FE>tRn;_}cja#`=>7*LJ`@)u#lq@vgM$f|~O zz={<&Y;S7ewo{tfs^rTT26osX8<&3e>DT!wpkK=A9A!Y*JB0duQ*aIv`QDjY6y@h& z8Ggftle_Sy7d(sPR`CBRqgMn-c$L|SLO#qdQaemJ|B z_*7MH0F&oqTU&K$aXFnJXWftb*h; z-B_Vc!O_2S!z%<_Y%$xt^`uK5IS->+7dHcYMp;)`NaenPy`1YjdyCj!u~aAk=`KX+ z=QJKLuut#ir(#K4aI2#g{gxADe#q0f(a%U!hwgygs+`hMggNh}H>QQ*V-h&UYk0ii z-n5#%p~m-WM=>-RbxV#raHHJjHi_hp0CK5f5k|F>;ogTSj}U&OKjgA;W!9qbhH2$4 zhn64b3C5uhoqCXsUujq@-o^l!7v0jlIn4r^ zEW>CyjHiFUoefzOLR#df!sE6r!^@~b-(h?i-BdAQ9MoGL-(08Sgx^(lY7cH zD`|A(?6~uNRqUtqIk&gp+DA3%?0qnK?dO}4cq-g9sQBfZONSFb1Y3cb-@T=^XMpkv zvM3ixldvYyx2jL`$P*_Gc@uf`;oLB)l)LybV#1!rq-R7H23uExg)c8b#QSj_5mMd_ zM7?s~F^cGU7@BijeiG?sjoyO7&lg)$0+&{ZNoA|@HW6OlD*j7K5JNrm6#EE>``}>V ztok^z41G&u+0H8fiDVpPVU0c**qxM;xJXamYAjNwQb^B1nfD zzU#=dBYYlWR{+ zSP4ya;;lb6&~l=KqnTMr&>}*z_uyLAa}?KGDBG*5;0A_?p=dayJ*^7lX-TnUPFr=V zC`{3a6eI!e^&N~Eif#=~ViN+Bk=N+YntpZ;X`Kr%k=Mmw8qF&-Lw~h)dU`zGoO@hP@%3&AU~90n+R`+X_&r|vf&Obm#OoG zx7q0cw%8Dj!0$YNB0f+~AUcG%ibsrPjDGVNH1E4I=@d0FQa9#+_H-B2#wl@?$@=6*7zcE~Forp5e9lov{TAeK?| z+aMeMu*up0q+c0%zirFLr0 zyahKbR=afRXr9t~NHBS!auoB>xBYmeLgsgYwmr@G@%D$b5N6+>H8J?G*Drjuqo~(k z0y-Rphnsit*!N`WRHBrAk{jJY)7m&Eb3r))2$Hy^rZeg?dULo+b6oYgqeunTj|HlW zA{xGxi%WDB0KBe}cw;n@xuzrbOEH{uVG=Db>C;C&zQU-Z{|rGpJ8Pn=ZV{MG*4xy{`%1hmNEYu|i?vPeAkN-u|4woxwAz%Q_JcEE&`g1Oz; z)-E6?!EjH}+M5rhcX+*IykQA&kJQ5_Ksxa5Ke2Ka<@Sto`aK`Y?a9v_AzQwEnUnnaj)Fs~ukSFHmmmw< zB8m*)70JZV9NjX#HjF~eTSDwYXic^WHLDs@+N9ziqnwG?3NT>^6yrwznV-X-8c$?# zzUj&kT_SnSlQT}S`+2oXVCK!K4#VoQMPsB5mCC3B)SKnSpD=BjBjVM`VYtrhtvVog z%wRe~2rn3&e_sXB_!4iA!ao?HW)nvgmotH`ADU4UMCiX>EqN!dt1fj$E4>-mbfo|l z^CF+H=Ck9|-wFQIM%c815oI5fRjFiurzQT~?7_bkLI13c{;Gu%m3;qnvUm}=R}w+$ z2|0bj39SfeLi3n~hX%m#Xg`z6zY}UdXAE9Tv`zL5Da6N`?Lb}%VcCAw<1_`)UU=R( z=RI6={blX@`u2v>L%g6w%2>4(6%H*$-BNA#!rjBd1MdgR`yO_ynvCT3Z57Y3Uyu!BA5?legw79xE30Hg%S=g=*SkGtmAuP)3zxWDz(>nET1KNPNPY33A$BdrgemNP#wXjU zzCIj`lntm|w;!Tcc?WRoz8t{^{Radk!RcAr@TYw7)T|9IAe%`0R&Ay6cWT&ST`E3l zwPOjkK@;9Uw^Yc(QiOsuto$p|K4Cm8>nT5zh#-x;YQ};0_;c&+qERI`&@sluws4T| zZoaRGTtpo`a4nr9fzesoVLD+yGCwJQe+%4Bn1#&ETwvXG>K187s*AL; zIzT;xrNfMK`W2cn0>vKGM0v|gdJX!cVnapfc_D-TC!%IjT-*keX5b5&1pL*$8Z&z3 zyphze3IWTQFQS+F!v!mW@{u#P*v+xIJR!$n!uBL`M@zv)W8b58+IdS4&IWeMeb6-qw$GKAsF_+^dRzB?Geiu$;Ut$X` z)I^_X7Gy1(B6$HGRN#hamaWN*v){%p-E1saeVP?t;&p=nx^b+e(w#%Dq|y|88q8dy z(Awy;lDdf8RmoVu;AG}yPx~xfh~hJaP+=q4UFJ;KQYJZNH@9vj@0aCL@bb?kk78TA zanmirLAg8U)w{-PA;X|L4y1awnhRSrRUio5BSDRnG?ji{m2k0aQ%}u}ZJjkxs~enS ztL*qonyDH#{w$#rV@fkSoaXz!0pVbc0k`f{^A?1oF=1Is1pl`GeP&Wr}=x5ef&~)#uKni9!8@RhCacK=wD7-4+&lfc4=|My&w2>#D zawj=?RT(M|!2@|Pe1^;!p1Uj^qjNPj4xKodwk`JaCr#))3Z%oAp>fPNGb0UB1$NHM>rdL>DsPT66Xx z-hUS8$vK5CHq@MW_Mf;{@seHQ57!6~DnD)MyQ*MIy;iZ@w29Dg7v=}Xe?M|tg~ls0 z8@ci74c_%6>QDWQDg(LD2De@_=)X1_p5uhR7TTc|Dqe2IX z&M;F^?wKpeKCb;?&zGPIrXtBXT?&)CFcrXw+Rlp$wCC6_2y4%uu{>Nf_P`W1SB=jI z_}U5c#+;d)Wubm>gYP19rhI#G`=r8L(iuIZcfNb&dU@gQSdAumq0o4zHmyKZ%O;W6diyTV}_UYkxUF1v~R%>%o9qE=?rjREnR>X$(oMvbl8C&;^>BxF3sH8+#pNtwHt>i8M`yix|2lluf`>kWyH425*)?2nG(w?TgFMPNn; zk%s@t15W!HT!R}z3jP!QE}jNOti@5vqpHcAxfzv$nqZlF+{U@=t{_iWo>V??rvIt{ zf5R^<9op_!EbC%*2b^@Gl9!IV#%ZsVS)V9K!XDt!jPmguAom@t1jc__65Y{BpcW&g zXc_z1D$Re8*r(OM(Z6IPSJ7meoV0gg=%v-7^N zadQ!^bDNtK4#X@bQA8f9izF4j7kmVS6fKt8k8pqe0@ey_w)G7zG!qjN920P^ zZ`GjAz2nkq+1dY=Um?BA#1z|(;vW-mm(9H8~O^MJl zZS&u046My;^HFA$I=&Bpi-lk2B#PqZ4=@Kqdwg3VOyMMN7Z{HT(Sv*O^Kk?E+lFd? zF^(g=2Z)B6P?AO?2K$pD-)ce+{lkZ4b=!rLeH1 zPE|J}d=eWrjroxgs09H{%dO^6bEX%pf2KxlE{hpAm>0 z6xlA2f{vs${Pyq*iPmHBj>eh(ivfmhND+!m1 zgNLQ7WE5@o+UM;wRq>@zH&z=hnC%#Akq55DswsZvs(z-g}60{zPmEE#V+ z2VEL|W>HE{0+fiVeP7Zw`~fVJkX$QeD*c@(;o_l&A9kG7b3}y~iBq7ejc(nxz=`e0 z+w94w`~{$YwD_Rh%i%B98h=ze@XAlt;sNy1u^rHLFdyvZ_JU>!7VB1wbjW~F)&Y4r zdzwxnjx^I${{f^^b`W(gg($O+b_BeQs}Ff^La_*S=LY^ynFgWX{Pbr8 zvG#B2&EJlE{8wE1f7&O=m^k@isE6$S@@ z2MNM8APeUbub)-v>(@s-&jxf4yn*m@DVkbv=d*Nq-KG1A^WNLU#j!S&y0{!%bl!i= zavx@S>b$)?AL@SA3dR%SHz9685`r|zZ}DrTdyi@`@y5*EH#;W3Q;u zPlV?peF%xSQ8BwEwP5T>>h@w;@48?$)PXQy_Fw&ubV zzE*wO^)8KJh0y}!E`k!5R%g^$+X3j=5wP#r$=6>l3prD&8Phsu320UWqY1 zSgE8`TH*-Dxbj)Dg9Q5JN3auPX9y zJK2D~FX**20>R`0ruEQXbh$yVP;k(Ym`HYAwFh9dWVi;Z-}tBem6lyQmE>iqO4H_r zxj+*v(F8VTa474!)&Nd}2@DF9BY^k@ zmb)omcKQ)TaCr!d-$DToax7eu6~&x?un;3m_R|f0FJn6frxegC!c<5)H`$>U=|nmY z|J!%um~ZcHMS_cNk#gYLXu{b6p;}(wL2vCoeNO%*Xm`avQ8mvct>hi6N0^}iUaR6| za%|y9wiB_xA&$DFQfL~FTD{GRNYQizW;+d;S#!#b=|Ye)jSaA0YVH_b}TOzq6>?t1-H z6QIfs5i7kD71f>sAjUiKq{2-3$rSR8{fF#j06V_msl(r3qqFiDV6N2hse|jXYkBtb zA_)ZcE};gL=b5slcZ@K0DX;7DS0u4Z{G64kbyA{CrWx$hDI8_gOe(X%FVAtYDEszc z6PTX3!qog4a0pnvF4QmtU5C5&QEW3YuT(#lsMW;tu;Zx;E@6UWEylR)`P*OA&PmDzv>%fwQ+U}NFQKUUB4*?<6^z2m`*S-E%tNeEQa(i3HZv20#eL9k&$x{HCl?@RhSj{y`FJ8`aBF{CM_YU z`VE;<0#L`Ae6CHc?*iNtW@Zcu7LI!ej6xiCh)U8=HX-#Q&5h9utU;u!d&{W4*PUCmKyi0>cXuo9?(XjH6nA%b zcPMVf-6`%4#ogil*IN61&p5Js$JzIOU@&OECHXx`W@cu5lQ<-NW_}0KO`M-Gn7Nv6 zZRY9$o#iEop(Vmac&Mj2Fr4a7_44%3F-^@NY%Ck}Ylj0A;O@Fs9I^PZ1zV){>i>>TJF$q!Q{b zH`F0B_}-Lk0cBn8 zAImzq|Cbl(fAi;GHQj&zBgx+Y{eS=Y&s$Zjq(ct^&^Y>0QEBy>&sT{r`65rMx*IJT zCah>7i~z4|?8Y4)@2XbP(N|k=t-$k_uUoZXtKECYv$@SKIFBwi(wAC}(7#ZpMe8H% zVfCd!^c9J&QKC|!T7=ZOQDp?1plXC1$9NdvU4?k~{+67|Q^N@zuWnzlz9AE8v+RKF zcg0;xP2k=PzpJJW)bkX@uce}~7^@V-Zxxj_*@3XjHYj|;&mVh36fz&LRtUVVuz#xA zs%zz8spcZg{G8{58xWiJsiZse(Y>|l9ySqjzq`*j9M<5r_p!LOL2NDdozvzbmJf|E zz~xF17&%E3NXWl=+=SwORbIb!GTidl-nM$mDSK4M$OWakfvNm%=Vws&#F$%lmOHU) zCmdh8hF;<|iWIIdngr`tgH?cH)Trer2*IGO*$Pf=vMR4Bz`2WJlwD|*(CQze8k%;D zgnmLJT)9bVx`y#I)k%7Yo302X8h6HYG@_W#sx8S+a5p&1bxiD#LxEka-HVY=P!d3B zj398;9KrzG`S;Srw~3UqI6yGi0q}=^4@`gi{}c@Wc)v;Q)d_hClh;a0Q$r*233QoY zjgTaTl-g1Xs<*_C5o{j=qU8H#OBiXu|w_iee353tUmk&ID?;=vG;JS8A#Y} z2N27)H?r22RX{`$hSYOBS`%3WbRPk74~_F|J4J+)A<^k@R#`Mz+DQ1$ctKr|#U{Mg z1nR&Ds)9DGJ|1*IE7@M#7eRO3p+}J8UU=c_0DSt07t-r{_;>e}So~3UQa`>r}NXb417AumL;|qSOy><7Tw0yi)7M7j^EY?tX z6-IG1#~NDfyO07NR2~XaD?;zsjfpdy8W}mWw_K>O*dZ2`&9+$1zo)2Bl93&T(aBl8 z7Nu{MG-Pz>j{ADc=}F$i7o~Mxa1=obZz*Yy%oDFOg0M3WYh(}}2?9l@BV>51HJsXI z9;2&k?e?DVgt7}uwZ>SE0K2S`tBNj#(rahV)bl`Vni#sP<>1SldYdh=-IDOWA~7kn zw3X!^6?V;y&V(x0ZgszEt9ZITZjdtb$~?wYl~QFZIFF>q-Ltg}wc1nYymZ$5;+!(` zg_m*WkD~ju`|3z>%7(Sv^yrvsb~P4821+S5MquvANt{)-OC}-b(`J~YVbxf`y|nK4 zEtqRl%`^vKeq@=Rlv_BhvoPt&j$#wpg zd|D?hqj-aOk&v*Rd&X_cq-$pf%ySUWe6Ij}%I#6?C~q+QY7AlIn0>ZERB1@6KYQp= zar$I1TTtGheyTDAWI~BLX!U-Toz-<8r$KPRk{f-B4&ZiI!ssmC{(3>>j{WGq$GJm4 zR~R7S|Ek5v8rH$Isc^;prrV2euP*p{BURBD4D}J_)>huM$=^pSX_cZr<^{DmoSSyy zc?9Lr!Crp<8)hRa?Tz`PV7vC|o1mPB;yJ4{v1u#e4`b?`_=wnAqmDvK{e-8JAU)dXq3Bt?}y06!sTRjaE`FT(VI_- z>*x=!cZvF1tY0z>i~@*S4-H!{x#hlbeC_S}8@1R8?QqL2?z|+GL@0djY!LGKpdHWD}fZhAR(2 zO!#ZL-Y=hF;%`zt)+4IaYtmgb_-2;|EH~gI@y9(xMX@;Zb`2<>ggyMgd_kNI=&#G@ z_&jhc&-z~b$ShgW!eE-%=sOxgkGWSO{2|^y!3@6GgzZ2!d*f_fPQV$4VE|X7sE<}+o|hl=35lJO%X?g z7v>A8g(wa?)GPym`!)j<$M6PI#1m7;+JGis2RCDnn^$NZo$~%8)#|ok^EW4jzMzQp z%4xW;5N@9qyy{gz40>$*9R~`Di~H9^cBJzwXLlo) zGP0Q2?-_!2z1y|Z&nsGU#ezGfcc9RzW5vAG`iJ9KCDI;fRXZ!@i-7h?APRJ;Iz)8s+SY>XX_+EHZHVD|;$40q&l1Zw@B%oFaJ zdSh62Af8)A;k?uL8LbO<{)5u<<=#M2IJM<+Y#hpssgHw`JYd>G3#VvI;~cc5G@YFO z))5kObAIAIl8=iDVK+@Bl@Moh(!?7!9hoYQop%)V>+os9>IMMV&q0n`{-=T zmVdP>sf~L9(Q5mk>y}JXhtlS!fa(~k``-BaxLR6%dAsgpG zH>Z$(Om%Pn3a^o2+toyNL|o~(xo^=LeD2-hy7z!H`c+}ZKc2o%X57MKx{Z6qd zw+hZ@VOLJp)LTf$hYbVa?;+hpeZ6asGc>CKKR)|4$3`lWE)#Be6}w{*^g8KG!Um&^ zo*JESR>ePT9wdheLwRW99Y?bFB|EWG;`oF9X z{;PMj`bYCJ#kRv5kQ%;y9V}?>R2>il9w_>!c@+!L8*z1`k!of(ZfCs$1P4jR6F?wh zs6z`+(30qOXLBW!`6#FDaq8pc{hi#;q;o00&lp&VD`(pRSjazDwy%d7*JHxmy=D(o z*OKCRhOs=q%pirZ$;7AG)nlBCHzk4@BU8{`<)O+ovjuag`S3>zvF-|`T8!j^ zWwrKds2~xQ3l7Z66K?C zpTe-qDYE2ctJe9RLQSXCYAkAzZZ9+aib9Dp7kb6mwClmafkI{0Kj%r>QaOarp+l*` zD&=mvi#{1`KNS;CkWEMK2a9fUNT6gN8l%P{d^$(;YWx1^)syD3d4eCTm~4v`C8K9o)ESFh>MwYVZ1`UrfW=I zcBjccCi3WyC*v_MMHGWGO0vwNp^zkw{R-KR6kiI{i}eQdegz!3h47QwB!B>UpV&QW zCyqxD1uP`42w$l6HmA@$|j=}E=3r;l=7KGp? z-Vd|5AE*AIS?nue^d<#rLoos<7ETP8`u>C-1j&t|FxP_hQ*mX-A^IcA#Yjzcdyr0p zU4BG`JP?i3wCPQPDw!o~Z23!0^x{e1pOXFDIu3SQ${>@Iad$lPVXBlbEPkG?TS0H7 zU@E3^v2=69`xLyH$UV~*CU+jGTB;=H?3Ack-fGE4<5Kg_=#*!e^));9_ZnisiDEu@ zZ#ei=zs+FepUDhRUsS!aZ@&0tgI$>v$t5F^{nl}Q&69#z6AHLsFn*102U3%Hg!Z_9 zBmgTf?3HdZpnwI)Ej}Hg{Zy?n+kMbef9wm$n4B<5;z*n>?^5ALrqd$3Awj(hS#4e>Ce+%fJM4gtGWPC6C`Y#0s2 z!OS7+G362=9~E;q47Ol%IX46(a_I0v#I3~lb;_*G7wpzJjc-PV$@CAhBZXxRSEE)& z=gLYi0#$;gjX!gXa6w@ywSx)28yZsY=ScdM$ZBe8YZflhx{V!IK+&{RI|k}GSz4<_ z&In5`!54L~u)-sQx*r=Zqn4aN7m8R{|GFR=OlF_@Y}=#y4ny+$TQTzlf!MG> z>9^Db!UC6s1({VA3DE|lYoN0nlo#sV>~g0>cX+7FawlaHDW=b$iRZZ+kOnusn-(S ze=n$ZRZnED@Gnl(&|khS(x~@M=0in!`@%1owLum3lJ{RT!=;u|^e@;H`!j)v@P(X3 z3_AvM{Yjw~A)7MA$!mt<=&93~OsGzzm?0IC9YC-ic*P4HPv$9C6F{UobG*SN~-vH~kUk|`{)OqA*rhmgxc(mx?wlTt# z{4_G8__+Q)#TW}y4N41iE*?P#o6yPEa0n1d<)y~06Jt6~6@MVx#vAqB=NCk$l*n+t zz$}kkXo8!A-`agV@gc~^7(h6I36Y4I$0el^*~BmzBjCLJ%d^*!;1jL@&;I!T+q3`L zLrqrFlEMb;p$-jH*{VgSE?QMf_=IU{3v4NVjYmr=%M&SsPg$9alU|s}RZ70` z8k5e3;j-mGJNA&~U^=7ViXB;`XMStLj)vTz4d7KhRJB})iwC@_WgDsrGmVJ@=$I(;)Kjm^RXf*2S3;RM#8#QsLin`|m-e7Cz_ki^gMP zH4uf;WU3WcJ?YuPxQJSPXSpX61G_(*!_o9aTUK_qIDuY^c7;n7n__}&PD?@$zFgKJ z>3=N*@>a%h1*`PBpYb=q`0xS)mxN4QB2YwckPO6N*oPBZ0fIxW(KPreA2N0ua##EC z(zOs`;8Z8gQZIZ<)<5whU%S-<%`#^3!^!YSJ->^_zZZl$RY%T3C-nI7I-(KCK_$Uy z@?{9V?w%WFBVm9+^p^*4s_-!5^>nxRsmudBh4;pjedz8avX|`R0nao^$uW%~7IPZ8 zZGQk@X5dGzk3>Ws>?s75Q(HvVftJv!j8Lv$UO&m4TtPiJYOO ziIMZa%FFmk$^LIB!PCE1mo=-aH4)SbB431lAgH0{qkxi9G|ojsr$+_K+L#(MA)acr zA!$E-`OO^8j;J7vF0;+_IL`UFPDp<#H~pnqf!+Yv3C|J_4Fjq}lDcJh#{BD$?h8m} z5b;l`;`_==7!RD5#aEc;n6@~W7R#2y>&3c1NIktZ8DWjv6|Ch#%hd^1n=#I!_W~>Dh(W|Ilvj2|H!^ zAi)veITmGKm(0fFlfd+l_Ea}sMkl)oo+YX;-{pahbDR3~ns53<8Vl8w$~-5w*vMua z_KmiPIqHXUygQl;os~*0(9E+ zdFKUFQ$#A1`MwG?N>YwQ5cqz{5NGfwAQQYRUD=Qw>tkB8c0=_3x;6Bb;W`w4TL^R7 z83a`kxP5eLINfWi%jt6B?d5%s+z+#JtN^6Ng*i(IjhJ)2*Bs8xfj*Sz;^^v&5tt+x z8H_ZP@uT(6<0tH`JdRP~u^}S|5O(z*c9d4-4qY!amcKPQeb% zSDq~$0&ByqY3c89-4CEw2iAneD+C_H?4A2`sSgWw?a%wKFq@J0`Dk0IJ zW@{LGVn63PwaCeaV^>j4+hB;Yim%E#ocvWKj&iA^A__?|qxYiKrXdT3 zs=32cZe05V!RzKLrF@~rmXhxWkLdKKMEci~kLq=ANv@}oD@DTO=~FGBx}(AE`NYGm zXdbm%CzcT9fF$x5g0Vy~@#LSx@6iac&}%SWVR+5F6f^3HLo3K4YR_?5LPasN5A_oH zYH(f#5qtn;&S-b~Pd+FT-SM-go~EF6->3=3s9UJOVYjMJaU>1^mV}UVo4cCC62&C*(KcZc9UgS>dgBg&|H_B5O7Qo4FF^ z+evn2Gt*Uk{-53;_OOpK>Ks%@t$i~O z_LLR2%<7Sfjw&}<1lvGDo~q)Ao2DZNW8IofOQ`wQu(WI=^->Oo!TC-5OC0^z4RLP{ z%C(M7XUe>D&fs`QnUPHUk|LS;Wu7Qk?*fLXqw>Gnx;jf*m8hS;Za_uKd|428&yRa3 zfO{`$pV}y?^O$yHk3jw|(KDj}iR-o(|D!F{VM`HM4-v>@{8Hj1RF=Y9LX6qnZ-Fon zH+P#DujZMxz%p4j8>7KtvUXsnV3IvU-kpfVpiIJ(`qi~SlHZ3-PnTv^3a~DVC#R5! z8L#{%-uC7{lMbicT-;IOO}RM;nIfXXY3XIT;jtIjWKknAPJ<(s>R*W#J>?Q&u~fTO zL%$(TN!PDd5AV`zi>FUxiq%n|VT+O0Y>y0DAu-&9Z6pB&Q!aTNodhQ(UcFrROV75m;;ICdSVOPzPp-D2Pg&8;wQSX!`#j=MJm2n+o*FT|kJAs9KJ zKCp})|TW5C-Z%GZYkV&HCm)PbCY zKBLyFC1~z@yKGYo-Wv#p2%|fo1vTh2T{zDif)gAciTmG}&yk58`nh*#1(`WyK)qaX zrr9F|k$4GB{Z@_nD%^)|GdHN^M~TlmjTd?6AYAxI{QbwaTLOpNUFrDYP$-m>cAc|?@QG#zZa}dm5vZfm%)g*< zs4kv?!Ix1J>e{h6DVn`@E-+j{+H8`cb^n1=q3(@De+R^ZggKz-kz8r)Q(8~@=5BBp z$8lpod-wrf`PN@kv%8?55zW^l%*DvRHVD%3=^J6R{8j5(+^z+lZ& z*-~Y7Ka^u9yM{3KZ3s8yn(xN)bt~L*d*i;?vRFD&by*sqnQ6@!qEKFtMy>a&HAVCi z6>+JZQ~&8bKlpN|4=y+An)mbfc}?IwEaKAID_x8#+*keck1ucc{IETRVM=%3va*^O zX8G=mVOKT9o-?_WZsz*7&+rgo#IBfAF!FXf+xtD!%dhG>rgKE)l&0$uekk&U;Lf zLl$YuME294xOfG3zoi9VP)Y8 z&mc-qNmXc8n_X+-Z2S1xnjL5G%yrsRevp>SJ2UJeFHG+^#LJ6n=%tL~-Y)_rr^Q&w zJ=rugj_%#g9rXG8-yqJ5MpvOT;5*m(BgOqsn=*g;&i_xIH}{_LIa)ljDffyj*vn7B1o~|{yii`j>VuoL5z6wjj|xdD5>K<$QW{x zMnge}L>xKCEMo|~IhhUK354hvbyqq@D|F_Afv>Qms5g1}9I{MogWLFx4OsoFh#S9# zBA%m6OOvArS^7L0tSgwCO@5y%}1oK{AcJ%^he^3myy;ZUpSA>||C&)B7SrD#sa={SsA_@MR8mgO=Hu4Z^{4R2Cs08F}wyr}yp( zLBWQL9CgE_?oxUzUFEP_GlnDQ&^=^N1I(6d-boinMpXbIAA|2VhiJoz^l#1t%$N<% zfrV6ZVQwzy9)o$zI$F|7DmW2>L{cT>Vu?!g%2h-imia?XVvl>QS>|TL)W-=b(%>ps zDYbKjkTCLSdGzp*GB80<3W{daxm?(H%;6@~1)U4RJv{3q#^@tsijfid_=6eaED_lu z80Q!b%p?7gk^UqDf27|i;ch1b;W~;@HQuuh9ZZ^Py^hXm^cKKHJr24d(KvurBM0^S zmPk;w@iBY(xSfI#g#^Y>Xr{4N@e!i=p!~Ep;fg~(Z-Ir*hSAobBa=?QNE~`5m@-wq ze9aQgMv;VhJZ9xmVIwRzHe+$w^b%7q8cS!skV~W=jMXq}7c){Yk8k6oJ#{rheMH(t zB}5ojHJ;CnG(T%ns&N`3a^#$wjv@%<7g>qN?LYW8^goZP<`8ull;Ei8O%&5twSQ() zAC?pKH`}bh)SFYO#MaP0ge9iQJBw-k#?UX z)u}06+v>y;trd0!zW-9TP()T5uJ)O0Q|C9^Fe1IXEqAR&73lmH*17p`B(?L#ycH@W z*ibANr9!gSY$$~g0$$yhm4;zF-T_3Y=&20Dneimt1st(aL@}G$<#Up7t!1q>J9&l2 z^^M-4F7wh_#A|53aIJ(PxCGZ8@N4(0i<~EL{mXE-ubJjabs~Gw3}qPiTBWRzTwp*3J@*g zS5A!p+`xM8PNSbAxIMe-yzvxHv+EWnEl_8Y6L$C zgASoI4}D%icu}j)(R5EAz*j+nW^{UA-0GX?m)8)h2#D5STI=i$PPT71QYohs@ATF0 z(#Z~`P;x}r--=1e$YwLVS<5zX(5)&XjwPKM6 zzbp7yv9-b}8 zQVVhr_$#XQm-`u_q}UOpB)qFEqW!z8gn5BGPI1EsnCc>Hay)nH}yXuJqj{^LHIu9O;%Pm?_~k*NW99_8-eZ>5XA%(BKgU>JmPF% zGACpS#)B`lJdk*u$D{b9Q#C03R{;HkiA?rO*wMAud1gPIsw9Ts=P!{!mN3CS#mW5o zWlJ4Hy1_!3!h3aM`S+C$~zY+*i1 zP6D=VaY@l!Oje8b)3huA;{jJUT}CE@`wQctyd^?TapQaQ>YogEd=|H|VAhkcTJY^2 z4qAB21S7KWFEEHM?rhYS>GtA7EdZ zqlVX(hY*)B*5ZI05OtB3MqCipEn$Hb(Mp?7=LE3o&SqiKF*(UBGRbV_NF^Sf-5%L= z%ul~jcjRlV;%7L$u;1U^c@aZ?os)?2zrE;uz3A>5{{HcJxb{VMM^EMnV$N{1j00Y& zH!C7LP0e1Xrz!%wl@d)#R5paRAJG{aDkotF3Q5ktmMB&WDo51;I}!?sDF-okn(C=4 z0Gf%99K~Dr;3g}85K|A~^#~Pa%w53SSE1K0ihk5xL4;fvsZ-CkPwh$tQ%`A@cEVGm zM{VGyBBFK=JK#_SbW8b)HvfIi@Dmqx>8eWpR;Rv`Y|o2oswe5%2h|Mp%|9Z|k3Z>} z7?VF`hJhzOS0^b?r_p8}w*h9FnHSOv*m+E1i^Y`I$@?KB{ehHa*o=3Jb?)OZaOh_E z!jJyq^@4Pm>8;A-#@^GKDM$K>;o#uhobI%^aK71WBApS^(vd4ULxOB5M4Uh;zRt?q zo`aEv6O|g)`E|Op!vw?OlWp_l}*fn(~lBvQk>GA6eHsZ zTLx{N4OZ_pz7y<|%)%kTaFftfJ#~BSEvXDwP2qH^V+V2ZggStNwhZFnGOB@JMr$@3 z`dgKe7n3iMf!0t~A+10ONvG;yqXqwO@WtO>K~s1v61=Dn8ba8HXRd-zAzH(a|h^@S_=PSX6EpSjWNIrpm?k$9M_ zozZSAosr)y(|U?REVNhKjh*W2N@-9629;HmOmJ{1;ZDXeBa3b z?T;m^UATt;4HGZy;IVxx9hGo#wxqW1P%lLjw-C$pc9W zQ{|oLl(~tKT%*S|<)N+tFM~y65I#UO1-^>n^tm<)m4a6f;PogNa)+G{pinCR(n+q5 z{sc7#WDxrF?L0n)DkID}FY_`zA{zaU?!ph+GaCIR@#&wl2%4U_`SCBLid1)(4Ao`7 zHiga+eeZV>h0EJwGvpF~lT72oH?k8JofTf#5MjyfCK{uUU0&t+&x9R!K+oLsf}Y`W z*IW`G!v_m_Isv_U5e?sq-PLV2L@j!8Too;c-UoWFF~KE2g5SHoUopj0<>D`JtIm3) zQ@Se?EX~gdUG{b^FdIQRl^}McQd$&eBW9rWqaNO2@dd)rB{B(jQ?sPPCn^qHAS?-U zIGwoz;r~wUzwyFQ7hR%ir{5Ba#iRU|g+uLUWmgn*$R=5|sm8S+mn0$E?L>nEHEwYD zq5Km`wGS3F3y#{YyZ~dLZA9Bi8#kk)Cx~y*@h*LUC%PT=YZ~d3o_3OhO=^H#E$QR8 zI8CnlsP^K?Wbv`|CdsP2qzl!Bsy_RqaW5tc5dug2*y?ZXvBPTe03~7Pn|^9@ok0!X z``YapD#JIfmH^_@Sv$Bn+>&jy_POczFIH?4@uJ%oG`m6lQFh?3_I~nrdjtm9I|v5Q zJD`20eP0d8Blzo)c(ek{BK~1gUHh9^fB=|OKJb4^vb=w≥vL8QIuO=>NwVVQ~W| zbHLFK5Sc-J~&98oiw+!JKo-z*XUD@HEd_T*%AsO zGA46L5WQ3z6uKI;knavmu?ZX*B;ihYbVulu!s`X3w5uvw8m&}SXpZTJHRt9E6b4Nx z743(ICo!qyE(61H_GA%eWWMiWHcMJ$dekYHyxEf^R>rY=&KAjDqwsLHNeQ0uPDVw9 zQ6IWOX&)W^#0Xqvq3uDu&bXJHi~dK;kB3H|&>%P*FbZaZFrC|fAIg}bU;8)*SncN@ zik7sv3br^95RfG)3dRbXTQ$%i{GK(?>Fo1a zRP}2g$x23=P!w!xd*ZHhAYraRSriN|8bQozG7cfHE;_mpwd4kY#?Wo%U zWqbk%i~2AtH&t6xIVnSJH(ot!CsRESe6A0$u{ENfk(FDS3zHg4PbE|FORkz?cAAQg ziG^8eVR)u;S2bgJqz8t~gXi`}F*3^uDKb(i5) z{|V4)Wh*vWZ@az_psYs+*Jk)GGI@w*jDGJ5{q=L)8-@!)p<=VySq$Q9HgxHt((_Wy zYNH5Q=E3jq&xBvM)E|VAvFL z7T+W#p)E-G&4sS5^9nV!S?YY#kFE4-Y9Dno-$I*yx3czfmz4C+?HH#FX}y4eO-N1J z81`^9oLYvMq`Jq>TF!VAQc;PG_#kvHbbo`&Oq@h$7Isdw0{)RIt-=^pN9Bp?mX?~R zd!AKeP?2Ykk3;m4jNNN)cIV>zr9C;p;ki2#_#>*X`8s+$F(Pg_T46ytG<8r)cL@hs zj10Kpq10nXj&h&QqFs-;*j7E)*vFJKm>Bcmwz$xt-tvnY#P7H^$u&eSepczMMz%$; zwF@-S^zRq=FgM2*Q4gioLZV(uI&@IDjGB;7niMw(*d_z!A?sTU3}aG^+njlr5g^m^ z0kO4T=wr4?H6Q9bz6aMlyQ8|>eAO$(=Ie&c%G2WOroFEFz->R>^Xhp#0gBfSz4aE4 zCHW~E>%#|`RY0Dwg*D>?m4!F60j3Im=Ss29YjT{f?M4~E3d9#wX3rw3PzALJ%au22 ze#ZV66$R+f@Wlo&&{O#1aiss~fl%f6@7cA|zs~2V9;e}Eg8=DWp&A$oW=9S{5NePK zC;F2q@jVSXZG%2^;LEVRAP%935x~M-3UJB@}6$jH63|!-(>nIfQW)67R;p z*V68C)8g}(qe_5b9#j+(n>&F%=fp2^g?}a!ay=T@6U&DvVpA|qE3V4%I^zP6Pp8&Y zW4#EAWmrYT+6{^zY`^pDtQWSSQzGVYT_$m$@m*d$AaorLI$A~bx=!R{3Xkn|5RrAr z!;|z<(Dg6XTwJt?fzvM|<3&=?;>q>Zjvxkx8QE z>CFgtfriV`EZa^8#r6m}(w%YZG+2_OvdrdsBXRcBRAFOlbpkCch6=nFm8|*QUCFux z^E}?tS9cS;B-oFM3U36n@To6adU16%4ZBRd9z-!)bWDBmT(f(w)BDcWuX)`koS9z$ zz1i6c8iD`fj7Kg}>M{9AIuJL{Q{{oTq)C!#hQdFTnO>3uGd_4d*k-t5KF@pyJ)=4E z8CQ}TBBjUKfF{8_hg_-G!R(v!>oKmh(>

L9yBEN znXSRY#)HDl;yHSM1tv%x2@L}vFdzQNiuv}RR0M#)6fpXWz51^fT(;M;4^R}Cv05l< zb)mcf;q49-@VUyQEhc@4e39B*X3I89%@4 zmlyNb)++}Q^Q(f9=AhGK(gV9J6Yc8dQ>Ns;O}C>U{fsJrBtK8kTqG`UETZ zC$cow3DisHUKV`Rr1u4)@;tfaq{T z%9sc|C{UXg=A1L|VG|s}(1|1Lfqh8*}_1Nhaw=Y6>-Z;-3gpD2w#@v0+ z+l&Xb0F&A*duK};47(f|ia?-^0(OTiPoDHKqf@UpbEOM1J&E|XHx_9HTSug5_jMB5 zI?U#^!xgQ{&v@CVnRTmb=qux5m6k|u(8+w+a-7>X8QYp@SYIqz63)K0#h38Oa8VPijEJNd3{AUc#5Tq{brtr1z;JPb8fg+08N+M9Txr={xS?Fq@0eRL;v*m z9y6RSOfhe?_aRmOZu_=EtJu`Pf`eQ<1uiu~@yMNXl7R zaX6OsN?Tfy!ya^Wv=TeZz2@e!XBBr;mCun;%!$OJCMw%}l)epi$x3 z`$GG5Aux7w6+WIxKBL;g;pJ4Q@JBd^_7LG?Ja-05jd>19n@l^?0xkv}q{q{ClB%LK=rykxK6!QoVrreZh}aRAfLjgz%}RWPUG}v85A_<*1x!Lxh~Bl(=}s8NGh1@<~3v z(9PZ_k{%R%fC=7*T%6!bjbn+IOCGl-;i-w>nKR%@l48J>$ZpSY6R> z_$f;6veS-!!ohw(NN+=Hh4MAXFcOJ_hqKb`%+^-E;g0i`1@YtG=+XkVF9A(Jf@VqGZb`bAbwZ#+$pRxakOR zlxtHvrmzkoh>tmy_&{UcUQk=_Ge&5j7P}IH!_s`(h3nP%MIJYF_&2Bmz3ye0MkCXU zKTYk1i;JWoH#(C2Csl=n0Px*PKsD6l4_qi$qT+2JS?V6NnTCFOMHI_oo$&AIe zVG*E@gKv~i80{!o!jeOjprwqp+c139+Y;WQLuRNvgG;5T-f+3-btSSUl*_TWz>9SB z@jiprwvsIEXw8Az=eYt-=1q6V&%MYQW7R?ssp?8?iRiBYTrHDG_G(Mz@ z^t-O!=9}=UP)lz%Gc8;r5PEX!ilxqL2PGyk-rn{PgElUmW`Dc@n1nIy#npB8r}_1V z`~C=;eLD!@`4*VrID15g+M6sClZCe)K9B(^zFGZARj^-k^K*Z@f%$|yH4mz)AJ?9y z2*;faq1g3wC(~+=!(Y2BV#~Pg*wrje8Ybx~BR&!3Hy^$?bk0smoOjukb z3{I%tfOUHU3ElQdNr-!Uo(GZQ^{;ra0{u6UyyL^3iZ~c&vC)WY4P;U=(1b32*!owR zmwXx(kC}*tlGCJYAEXYr%sOO+I{RgTdV%0Q*gUPc&GdCgLcHR!{kSy3_Cm%z0~fDv zi8JLN;Co;LTaL9)d&h{a%heBfM`1q0%`)Kc*PUy^jAoxDQWY;I=_iL`TAa1RpE*oC z@O%7{jcff<5snViR}%kqtHR(1dpmIEu@fbu11(8TXch%w8U?ZP5HI01Yn2bkY5Tgt zV^h+;iw*~pin>2MEfoxRiJa~GW#V5NbB(WH3ROCzMj%UEb1Xu{p?}Z)gP{VvgT1h}<7{mtD-=grnvTIZ~D#z?hwd4ploXcvb_j^f!@6g%KK@@bF z?Cozb>H*--!lVVzu27d#|S}T*$Nz*+Q~lw zeGvSL>XafA2ivPHGjhxV^k2 z;QXEhol12{@)kQ=6V=Q;VpqBY>Y5n$GnnjwFhc_rQKbj135Z)6-BTISOPL)^8M41( z*r5~sLMv>Uj`0#Q!2SSb@$?mMvXg=>K|rhhW8 zk$QhrrZEATonW2d#L^R=$PJFRm08QFw11($7w>nK2a<#{SB>^y30S9mn{4Ui9mhiw z)n)b1Jt^pDo-(W1yqZ8g*g_ebUe%^#@0B{Dy-c(f{HM>e+>Q<(Pqkl@#^0e=wj>`~ z;@=-SmfaRy12G&I4S2@%NGn9}--kq|KmUdSYV?EI{QwP958R*7D*68~-3!~f0La>c z9{>DSw)^MzKiU6(KrOniU~mAm<-ed7g6f^!f|I_H9@H#7&7`c~Dd~x0BLqADD{O*b znxC4io-#7k(?7DE{5N!>hN!2UpCxrm=cvP&YJV`lO{Syom3`dvq=zffzh^vTP zK`As)J|tiJos*CfimS7BS4vFD*tj+B+s(ZvXi_QhFsYSvf5LV?8mh~*JmK}~Gx8yCWYN0LIjEBCdy3hL1C*O! zph08-kyd&40_ZT@}D;WPh}p*nb#!BDT(s9um$bjt0(lj{n44 zld6UsvI5En-3%NqU6RbVI1%CCfFuzF3k@m6qHQ4wWF%x}7#~EMzXmap}gQsL)aOzk@+sk)$)4# z3H)gMg~c?H2US|0GzLVGHa#?~{d{eBO5Ht~HawXa9i_~rd;jm5Ee%b{lLSGeM zjA!#F41d2CDrk)sk-c)hCLSyfqV#N$ zY+^PmOnn7rqeOFBi5wh9A4Hs_Jgfu{#$7zP-Iw)CE+XcyCm)-nv%kr#3T zVTzO>`yv1h{|C+wcZ$=rJ#%4ZF=|_;sC`z;ka`~S3W6c|WRnV*19bE-f%wak9XRQ&W097V@`Qu5-DakmgZ_xcTaBZz=kNG?9eUWF^lUi{pIiYT#cO z?Ic}yg`i?UHGnBb<2Ol=Y{125z@0aTDvTa@1u1W!-#EEoOQrW!a=qoTRK;(&Ja1j6 zZqY1_vNc;@p-8EI15Ycshb8!+(4O}?)xlpX>cfxK>=DP0jUc@dqkKU6eqhp)5bOPf z_R5aP2X!%6+KxXwCSKkxUe;0u*X<+6jL1_AN&vyM=j+kUbI12CuT0F5dX#s?staN0 z`I(pSU4-{j)UL;^@fu%t+ptcrOwE2g+PA|yAmVqPqVb(*K?4Om=>4Vc@fJ3djR--c z#WSf=#SX+zWF5G!K+gi8v8sTN742f4PT@>*40eVXw%XXCl~~t;tYe>F|Lpa|2vY2D z0MH9C$UkL?KPg21>52RMV^uP7{vVxTv5J)vwh4;wrg0+aXu~?C0Nnu*ETlyMiX2q0^`(c-;Sows#8CY;Cqg zD=Tf=wr$(CZJU+0txDUOm9}l$wlgcyS?k~XY;^bP)9X3sW_~yGYJBsJHzLM}5xmc= z94cH_Bn4WFrHH0@M;)1t*+*KN^;>W6FR0&wYfcBjBSX<1x`Q2Qm<-3Gzy@DjdZTEh zZZGZ#iq`Gy2T<3q@kU{F@AW|Hae(ZzKgU}}+;sXSwP@VrVt_9_<%OjBfkBSkG-NRo zpz=EhlvofYj31QnQxu)|B&up07z;M(1kPU8w}(S9!jrEK6W@Qz1;EZGDD@(*_4f}$jrH?ruQ|qRI4cQ zrDv7|P*oNe65&s}`sF%|$Vy#@=Z)}o34~J>8CTBwb11A!*awCW(bB!&g@60UeOX5o z-hl-eim*Dcq8GSpt2)X{j~prcV>Zs?^4f{Sb`)8s-4M0ZQ&Ad)_s;gqZBdh90H10pLMs@sNy4kn!a^B1 z(IktNU(vVocG9`DOMyq!k4V&bNb()1!2q=uV8~WSdy;Do7tXIRT`izM2F|Sq1fsYJ zxA#u#?a1{aBonuX2Zk;YwMQ2OO1=>V%7AaU1?acNFH0TW9RyeM)Lps7_qy@V(U{C} zY)q_+l`gr>upf;fZ1!ssREZt)R|hun=T_KR@Z2X1368|a_WA|Vft(bojHh5BQ_^B@ zihPJVqg%u@LJGd$47rgJ)CalK(w5w(Vm|3!^(5yHrR`|+AveCr0htT;oJpnwUQI1QXN;l{Omu+gG4rcf*W1%#2A(`PTkBGdqGJ zKPzkYdy9-AOu&5>2LJ`VH<S1;GSvaio2acchb42MUIvwW04y7^?3i4nBuleI4ZeTp z{Vz!z*DLBn&zCzv8N}ZOO~HSd;fNaB{O9cEKeFlniMWJ^mj0ia%l}MU<|sh_p)6B_ zr9+X6G&rqRTm0`NBjOb|7##gxi8Jr+;w2-(749Us5F!}85FvZ1?)lP}(Vx-LtrG4W z9};#bb|WMbC>|6kCGP9PBcLY66&`AjRqSOaB&j8+MrgmcB85%trGXl89uc9S7NamS zfaSDB#`UB^tu3RWqoJpw@%hCC#r;V8e?wd9=t)lf>HK>B|B^O}+c*ihe?63JzaIbR zGUcT7WaR&& z^+Q~}bc7Fi&}eydaJYN8yZ_twNUBJvaVRQ26huJER3pRf@3UG^c+BTenO2#9RVaHt zE|1+`pb`HqX#ccb{$KIRit2y?;K%Ae^1TcY%)9>vfq@wpD&QK{EPy0LX<6TMaIWog z)!&ic3+P5>Uz{VKW?&M&WrQ=ehTV;34eJam?cX=YL<8o+ixmjuo3*bo{6d}_XcJ2o zXGyKxxP92Xf8xRs`5feF25&;Z&Ul0G!m))arNfAV&Ydnr^t<*lh+>VX5dL&)6(T8~ zBeaEn7LH>9IHHB4W`QR@k7&@>s|CnENWuuwqGrsb$Gr`*O9~fxFL#GU=MPh3GUqVb z@&y#e-vTB4^@gFXHLanojiIxHgRza1u7k6IJFT&s6YbySIg-Zzy(Cf6{>vCg(JUo7 zS2%GrWH_Y68Xh7KtU;-^M@-`;SNf|csn;fvTelf;k=#x}%a=#&LiqVrn=r+(iofB0 zK<{PMH6Xvv4~A^CI38s?US@f^`o6zB0(LVQ;JCpjR>xw}M3Ofj@>32X2XZ&ia*`G@ z7jCzL;zW*3Gu*(vr!el6&(GMgM$rw?A?Q=r`M)99I&C?bbCt5qMp3?7I^-lR%b zWMm}3s?OX{jlhJRuN=1`O4T=uLR|z3piN`!mmvR%Y@#u7x^G|Xa29NlBvp6C)Q>Ao zN^l}Yq_SvVEbXkBDy+|NZQKWEqNN%kEN|4j2KU=6sAl}LL>z&0WxuGkGT3=R4VHJ@ z24bRa&K0HUK679?8?J3EnNGQ!FQ^}|Cs@f+i~tIoTyi0MI?Ven>`%Syav{>P3*`Oka+_ zXK>L00AIIpU}4Kk-BatP>y!JHm?ThEV7*KzN5~ndAMFE0N5!;|41)M+iqLkp$rV6Q zoFZX#T*kUj4e$ak%Zg{KXsVP&s$b)pbU&qu^b=j%M~Wc5`}} zXPLC_c#cw(-j7$1bp_^mEL8`SQ6a)!i5}`{+v^VgCixyMnok^Mrk>lC?X%I9axQ3QmudUH^6bc=fFjJi2={MAN5r=ZSW zZ?YBc{!Jgefq48!U~5BV0NzZZYY?3e6rT^6_c!j~=MY2uE@~zLffNb>z{HO6EpZ61 ztA1r~u3#q`g^`pISA!4f6XyG_gyfXC-6rsDI9s zb_VY6oiFw?{%>h{p?`j+{!8UhsUoR}DGK)?0*fL(c8f&svjgP+&<&OmLjN6|!0HZx z4FD-Xe3(F5Z^S}Xy%LMl7q9DwU^KZd8=G<%xV>_w)^6GQE$FQv+3m6V$oBvka^jSe zlgpFK>NZo4r=~8SH}LLn&A9wTUS@sPwNlUYVNd<>iF5~|*a!%huktaT2u1JLd)ac6 zKy(o;-$3YTlM@4hdDUq_rUQj{ZaM>>h^fTYOw1_kBo!Z+njq56E5@bhYt(~-3(S$M z7Ha0oboMvTFL9}LeUWiYuXzx@2PeN^Kg40oDH~EYHn;RKx@7h=6IqY%&=9Xd*!CC?Jw4S?dc*?S4^xuXkp8h4i#=zZEThf=b9>#9BB;dU6_3c?v;su zK$=ov&hLhUQs!?LajtQj^b)GVNVa0WLwBbotXi_NDQz^TZ|I6vXkg}MQA|$wgj7cU z?(E-S!<>;+BS0@eWbYqmq_Bj}AwYT_UsQ#Nb;^Jp&*$!GUfrr7 zj%I_9tbs9tM_jCE9_8DEH-!e$6yK^rW_R1c$-?Tbu6Dv)_+4ZYTaH-F zGZ-8wYjnSx4uzb-z;K+K@hXyLWJHg*MrdNxIo57gepF1H#w}R!s_x9GhFbT2snWvW^e1Ty4V8E_d8Hvo#g_ zyX)|#D{xxHdUjfFSp^ajX}(7caLABNidYuI&@F>UhrjvwY(eJCy*s>F>;;#4)&^WSZf#v!Hjo}*wvGrw>w5^nE@>iDwuLYlXk1x?jDutB9HQwFaRM%_;2e3iGLU%rEG0X1>FBUm2Ce! z|Ibc8kPt!MoX8&zcxH6dBw%c% z<9rjmy6E~Fj>I~-Q0^`2-4qey%b&!S57!yi7*<&zY{=e;nC-&dROWh#gIPc(I*y2G zNF!_>tu&szDTFwtsn2G3YF#jj4&k-Gzqpps+|KwmzGgao^Gh)k<7}C`;FIq-p|+LF zq9~DW-(Q)OJUvWLwQDh@!cQhFaQxZh2p;(TmM}yvFrnqUgst6Yl`jhsCt?VoG7 zu4ak0?F%fNzXj``)&O7P3(O6UO+=YvlN6KRL?Y`@97kW4niY9Go(izU^;73 z%bc|<2#tNz+n@L~FdWX8y6j{efKCQyU zYQ6D*6%buX6D!)1r)ib4yt!EZofKs(>B6hUAccE~-s%Z(NV6-MwW{@{GM$lu!(!hQ z<1=f?G9~%m!aVcq=IB8V1vKYx;A^snY)=01>A{!~V+o^%WdSx!K%jgyNm#aQt@EM_ zHigXOTI0^Rnn$IH+~cp_-u}B`g;3kqtHHCDq9oF6!3QYWN|6e+o6I_TQ-=4>*hgH$ z_I^ARVwgPDyq^P*Zt?_SiQTz}omn?hmVI_a$u(4XMa#`(aUvpp;e4`SxM757@n|M- z!ZLc-;?u@cbt)7^7Z{W03w83&6XBIR)?u!4NMz@Rxxr!}QPDi~ZRwn7hC}LH_NoSkzX49}63hFuKW?QgJe#Yr;Bgc1` z0q?uY9Xy#O@a8CPipF@s^?M<2v}5x3_XWZiXwLo2KQJK_3e374`sR>`*Ov=dh<>a1 z%{>aQVSL|EES?MMWS6RqCw^X(HGP^FF}#t3FAAijhcmQd!~ZZK|E5v9{|#(LyMdL_ zO^n`bF8~c!5$Y5~g<*?NXDDcVDb^D46!kbqQtyOD<}qPI{P9+Mzb-ijcAxO{)(~n~myW=9K>Y=?BPYf#(iGSk)(`{X;b-J0 z%t0bvwEHS#S1$1?WYGQ0ICsMqKkCBjJMxj{EZf0#=W()c zUK1djfU&&-umDH`S}y382O{aIM`GtOUnW)9oEu@U?ww<^UuV}}tpc)Vu&1W4PXG1a zX6rov@G_FIHTpM$jzX0SMI~dTPwpw5=CKEavE5|6=j7EovWT=mJ}~@#g1lNY|0Hc1 zRJG*}N!P3I*Gij@c=9 zK0Z?|zt6T@F9DXrV2#h^Yv3Q)$pixMsv&2|hIRPUfy`B|K{Sv8rzwVc>=%pt^t;Nl z$H~lw0G0qvl>-BMH!~PG`t8+XW$Ycqdi2|ghv9Qqi1XK?q#>ert@Yor(XU3?iuAZQ z6LRUr05uH!egvUu35taGtg)0tne~*IJc7dBLt(UP3#t-oBpOK@j*~3nTrs1~$C_iP z7%E8;MFI&rXW+yDKQxz;QjgJH7^>8fA`=B|>-Db6tD6I1qF!kXANg26+bx*rho2vv z+p;-OdC|?|m<;pG4n)#ZL!>j0Y}~0wKd+{^;xsXS3f~r; zJA~`Oa9f;N!8ZD3WayEf;Gv`0&$OCQi_>>H?~&(3aHf$rP->LSi$rlzbi1rlrmqyS zY;Erysvq-tJgOZMc(qfRffXLGk_DM7!8y_qDLGD2a4AI-q3&xHm4--b)K?$TUpuzOn2XZqL?C^6(k~SQBgXv25KkdOS7soECbPpP zxFA7Exfd9o@XR+ro@qknhBT7rdHNWyI2Nv%2VsiFHC8(ycL&AoGwylhhm4ygsIpln z%6H}O1!b8$qxTpr`b)nIoq6?>XkPQkuX(RaPy*aEcu|sWArCu!1v3VmZ3>+6{5Iu7 zRz{YdZ)Dg&g)JAJ%DmZVMMD|)L@>faH|HDwR7lv0ykoEPMwJ2)q6VpOoRZw z$t;f|on|qEl7d1kQ0XUDh-A(Qlz~M2+XeGI*-n0wJ59@lWG#GSW$21-Jrm5>aRh>n ze>5BVL=5M%eIsOk$vCf3#k9B6i16k`Du?W(CK@s1Aj(yVs3i^U5O zswYcpY#=B@E;}iHqc25}+Wl%i-!zt*s%o74)S`%L zKT9gzt*bx~l~qYeKEgv@A_|`owz4%TUe9wfdrr$ZEIGvk^<3wrt}~SebxS)b1KL_v z@2f%ABA+=i+l@Nhoe|umk==6&VATUR7nmw){m z#^fC)+L5_efAyZBhHtO!?)OiOrDxXWo?Na4u$Fm9`4m%2y-^fvVOkw9ny z;P7&Bkq6vZ-(t1(68;M>%RzFBG9H(y?I5T{G1Z;N`C@$WrrgP#YZfEh8J-!It5v8H zYvQII8A8`B?TRacK&xiohX+1h`q>G{_{ zzJU9`e*decCrjqfP#T=K;#z-y8Fv^io*FHU0+~U?bVF$PZf;MxRdOLqzx1+n|C-R5|P`%m3+WDkA3I|hh=C>|Wys`O~R2$zjSun#WkhFI3I{!Q-eA_*i7$)qiZyVv3R zqp`cu{oPnQ+F8~wwX}{Ce3F#UJ{8NW0b8O~#P0$hq(tW66DsPw(}fw`fXgfe1zAL! zNT$HetMR}dUI#WH0Z3)}jGBPP1MD%4FM%j0Y8&Q#jqQx+!Oe_wyX~LUntY%p`1b2Y z?Ec%-TKXT}ng1Q{|A(2#ivA1i;LlQP1A_&BR9wCq9jyYnew1AD5`Y200M!*5N$b#z znv78QUpTMdcoLhTHfw_$9Wlbw=$^x;6Z3$w6Nek{z99_EAj>O3vJ5B_Q=9v zQfUWxg!TcCLN^^V^WXYR8A3hLvc>foCIn{sP0~WwsOGsr?V6-6S0`M`dD9wYZ$C1c zNV7Y%bXD+ChXOt0IvH;`Z^|N!NA^u84f3l84M#L)O&@Y|Dwrh%lMrPz%z2e0PThOI z)ai83zp3@_V_>nq=LVFONG=O5y=FUOvn?)UMZ9Nuur-|Kk3JuBZo>Y7x^0FX_4*6Q z*1v^X=Kmw8|D#_1uUO-7J%96m*d+)eXatkyFH_=Ta!b0X>kn=QZxg4lKI{R#&h3{5 z#F3JjJox!(_>uPV`tb}z*V{etBZ5#M##CJaVKQVa)s|hdIedAJBbneQ)5ri|kES)j zz@32P8f@PR5tNG;N7e=tB0prCHGzMZ1SIwT21%`_>`^H~(4xWtq>T=)`=gh)dXN?t zA_{@G;jZhhFzQ+S$W!Kkh;GzS{I7zkQ$c=p%TyXgWO$iX`%fM6(j#zI5scpLtFFKq zULQNKK$x;T8hu#pGtWimZ8cR_nn%~uo$0a|ioN_RH{L&B8zS>q+Wlc`-pUF!H&Of7>Roic~dHYID<5)QW2$L`FBZb1k zrS8NyVFT!$VZiz*9(dB{P;Ze?G9xc7uOoKIH^PPDNw1~I#INcD!oemcrLSsE<$P2S zBd#N~(%;p4LTI}8(o#}1_A?XIv@$iuhw=O_MMOXh8P1Ijjr5H4488$~pooB&fS}<0 zL>@LY&;z{1qxw=e(oKwXL;mSrcr>HAE&r=~;Wv}3cKn5I$ls#-Pr0UlNB2LXg^aZ2 zpV6QrQG+}cT3}zlko(y}FoH%UAu?zsF0zhfXY~sFjld1*Qs2su55JF+*(iaYRmCmi zO(;g&&7-B?pnd?YJn1}>V9wb`C((8|+(d6l^@-)sniYuT=MTbCV99GyyN%QF+rs51 zI*~R-dy2m=zGj0`)W71yTG1t+Z8EvM8Ff;}(V_d~-O!9N`t1z9l+a^uf6SgZ>KS%| zZ8EWlE_6uzd)y^%xQcoORjzx#My-Ovl)|5Ie0g;R7U0X|0VDu?AQvSkJaN-%i0l@P z&4L@n4PwR#C2u0o(7yeb**Nv7ndG0z)*pWh>^~JN{X4M#DK`8EvVVrI-T-iBB~u7O z8w$w;%!O}q?ll&T#i8w4&q)eDzTz<8>(|s31h<)N=U6+s$!7c2&nen(+zsBDFU@%K z?#YFky~a$^&zz>@s`MGv2;{eI<>2^g6@~+My<+~T)S{g9t+*RFYq-(G0?pGw@VE9^ zt$q7igR9)0~k_ZE=KsS|M7B`aD^o``>wZfu}ZKPZ)_ zPIKiLegl;+APb8dAl}wzggwR3PB-)3(=eFt#)Vr za1YKTZPgC@w$xr5#+bcdHV$)>&R!HwafP zM*PkltXA7-B45%<-4k0WNh}j!qj(It=@cr?WTa`&I~_PhImGz@)3sOBr@Y2x=@NL6 zQVFoWse2j{SG97io)VMmlUBjJ&bO9F(zjG({*7~5gY47Kla=&sadi_6Q&G(D%Mqji zii#o8y2*0&ebo}130KPgDg#^AHd&$RYojzMx0QPUeYYh0X}@>fwkr#NmWS4 zgPFZ`Pv4qDFg>3;iazqmAiJS7piW*&AEa;VPu!2fz*qU>*01oz+Wqfsg8pe_Qna%& z|1ZMaLivAB8Y{%hE4g0)aPI$Htwl;ic+^T+DkJeI;+M{E6#j zl_frIdXWClBd#g;>@D`GG4`X)PG4t`ZkAMv)pasqxg4O2cqcG2Hat;KVt18z8Sz%4 z6BIJRrVNs)5&4)4=icENeB>#}{d`s7LMAS%B--Jb2%Gp6B`kYGX8+k=vKCIr?-acd zR)nu5{ue!VUPN%wL%oJY=Kb-ad1MuBL!;rquD*7}mFiuYHC$vvXAVD)l z&syXj1cZOzYco#hX>CvUj}Lvq%>9O_H8ISGV|nr?F>8e@)oD97a!5{sS^z_VAG4!Y z4`<-{%*2{|dPYR?O55>!Rm0_xD=TPW!tv4&r(Wtw1$iMSmW{J1sFD4V(Y0|tedmk6 z^XJbeAs7`2!l+xNz1QgwgG=>nRqPJXfgtVFRTW@y_oO^xtR;b@uT&(hRTn@9AJkHt zgF3?pAg->J)e2qG2gSTHU08k|uojSVG#3#+F{C7}?G@G1T147uyX2GV6LCJMazyp3 zC7S4L-lhZrY+E;2bDLNmXv4``;cvGI? zvvh;S*j+&J<5mYd#kE2cHY7?Yuf3>MqsMYZ+chTJ?=xThp2SQ`Le4GH)}r(Q?W+dt zF?HcC#5%=?1>DE=WXXSuQ+bnI6mu`2;>^a20G(~;n6RXxd0rc}4Z-5-i4(~dyj-Lq z6ur|8v<^qWb(%e>nR%mL9$16h&AONPmF%m%`U#R*lK@-vR;+(UEK=ePLsMvofLU4_ z!f_~fTAdjj;FxGqyM9%jp1a)+rd*z<;Gtmm$}A0|JXyM5#pC(&9S9iJ*T0qA5^}B5 zhz{NGYk40aOEj~ zpu>lxbbe5z{GdYl>^O3nb8Knx4M`f<0tas&c=cR)w!b5SN$tj2`*&F~wg`->rV70m z)NA`#)H2ui&^Osw)$>I?7S83SwK#Y@H3K5DJgb16K6F~BlGl* z33t>CmF}pu@fCFQJe<1Z5f_UT<~!U!M=WSn=*x?@;fiy^t79oUMLO961ng1H`dOEW zI*nbXZZz*Wg$9szcfW7KZS57L4j%0yOoooJQC96XHV1R=a`>RKUWcUi{M;>44g+(z z;#Zb0%A0qF)u{n{QkNvVvvN#P#%3pd2scu3$8<(w_$Sd6DeMg!4q;5* z@n0PucZ^nq>93B@Fx21O*#9&!Q*tmi7BK%ei-8F>NGGL(tWWQ!-&5-?t~LT-Kn~zB z;duCr`fg@{JLr5w5d86eApF?zkWhi>scF}C-d&4jn!ldOo@siOA}kx=N;GhPft=;9 zrnFsFoh{B~x31!>&DoS=2|RvoaJu*CDYqV-vnO1=zi)kReYUw@EHh34ruw@&Rj}BT zq)TNpl@HR_f>#E=FL##De+%(8M0 zrp6o=n2^4NAX%K%&|M)cJrWgHWRzFRoaBIzCaG9i-Xxl7oi_aHWTBv(gqXHe-$a+J z)5O+j{xVO$v0nuFyx6a3ZnE3X!= z%_Dbi)@!n{D0$JzD`Kv0qodA0sbs1@orNx7U7RstRcE82g*kf{JZY(f(V}wF+}^-k zhw_wLu8StwA-%(pc~j<6?I}7giB3k^7JEgh z77hg;1opeC=;|5!>S$a0*m}tIXH>M5sgFHbt=U?|A#aXLQ*@SJkM=nUOF!Jjvhx&o zNu5WxeyZv-nhFh`D9={NHWsC40^2897rNWX$_ao;y+A}#j}Kx&!fx|J!tUr}$EZEC zxM;c|c~YGVFZR09&Gj0Vxoa`Y)9FA@nnFQJ*39?1*2d%-434X#WOwswx*=GjWcSXY zbcVd6oX+U&d|P$KYOn*_YPyEXUbx8&!rgX+&G3PnLxt5+OjSe#7R6v6w%X#E9 zMcp2VtKo+Jo9Yd$TJ5-7AF|q@GYEH&8M|834bWC2wJ=b}ACl?~=t#p2(pK}iG~R@m z20qoEVp3u7cUsU-N|>+JveZ<)29UqhsOa9yXR|P6!H&7}>(`$xCi*pBOP8^`*0K#BxR3dXpf_ zR%BH<6*yZd_{i5#?}LOuI^KlQ^^a#wRMsZn+Z(mYhmRanDOp?PJGKiuWWKJZPq21QWlrKF1Bqu8}M<%(U7& zXaPT=ruYZs<1Ku4h=MPz%v38eJ0dIEgw^W6`qnmetC~8U+RCpcIyBa_R~;GvliY^% zrM(pry&xV?De-SxWt*plTPkH8+%T8<*z@{*HY?nPS>Qx%>)2hDvw>ZZ z^en;fUYrRQp!Ewuq0I`#LFK8TGy@R}(I;pw3(}0EBC(eX&_zM4Ox~WK!FR>d@}cyQ zg7zk!nm|vEZB*!^v_~iuitLSp?#w+ZL*dFyNR(A=&E51u>FWm*kV(=Dtya--T;T?l z-g9<|<^Z$T86)Ft>^P-KUd#6V3X!czDm6+WR3DJ15e}%v5iZrsFfY45GVAOej}RG0IDae+)v2u_}U-4K1`7F z%9lEYiI0;e`vAt6*5!ylRO4r;zQ8C)F{EEWC8`!Zlvekh(Fp9$nAl<;Xnqvz^ngh9 z*!~*3Pb^Ej$5)%|hwKm%&f^=~;<4hQv?6R;LWxl`1+bdO9%L!fu?l&t;)omt@NJnB zO1aViMb-p`v~f~Vt%SUaIY&8DfO7f(#e_-HVU2`iSEhvUDR!#jiPIw&C`5!hiVwVs zF(<@ov}6JBWl-TJ@Qo>tPPb$9hS(%+HD67hgD}LWvd+r!jxYcAc!rFEb zwqV&CaeIf9khJmSV9CM{c;m(aDBPWWD#$ey%4sxa&h4fN7G&Y589mVOv2(niE6{@F zx28LY)>6$CLZ2*7Q3oW#kNnL{_0tQ#R;`v+@+yZ&ozcdV)q2&P0D?z2B9k7@LqNvk z$ENXU`zhSv8%IU7@Kv`hS`c8zl$WV#h1=SprbZUrbXazXTTpo8&aOKn_W)ZqA69gb za0l?PogMq-!R(<7@txGT^hFobB7{&W$=)*=H#$F&q@)CM zKKT#y4PmZA_gsOlfR1kV8BsBwJ&92ncz40B9es5|v6rKBFUTsPl~G`~{c7^DK>G4| z8uipUgV&(q2v_OjxoMtOI#I1&c|^d^ov<}?Omq?7E*w{Rim>+msEyyA&bcuF8eRS4 zEEy&CStfrVt#S9UPltm$P?C&{Y5$fEk~NpLDbT#MO{?3kks%q%nq6LURUC4l`F`q4 z(+r1tV4z_dLiRdd?ErDW-*P zENuHJxbZWHie#np9Zl0~P&pHTSf3evDf%`G3bu2V3dEgi{`R?qcFo!MK>xTGvMrn? z*_8^l+?;BHNy^o8ioj9bVE8O|1QRZn)gZP#EdSa>DzPa?Vl&))UDc6ScQe@WfYq+F z9lQJh+ioAmYWuG8)@I4T7z|4=H(F$|)xIMdxrk*K4CMIyBqD72Y$g=x%<&|+kFjp!Fpo=B7oXC0>e{J-$PHoRd1)sIS8dF z0;Pw?Ouy9(emO|F9s4{4r3Xs`NW#rCIaDK@rBC<=Br0%+o&0uhP~mp3#a!8U8Y(pI zn(ilDq3MqC&`J-&(hI=e#aB%D!wXYA@Fud7hUT3jLO=7&yL8O07_+XZ-qsWUZ<-j}{Fm_cQ zI#OK*>+*~~w}a*r)gP##k>U!9@kG}jxYh=4(K_FPS0u<9)LZpc-jc85{~UyA!39m- z#CM0A8WoJ3o7!=H0USI&LeJOj54k0F8pVZ>?xgas6GR&6Gbr&=hDzZ@F&$nKF7y(V zin9$rxTUU@=%h|RkR1SdVXT$@(9$CI#kYy=LTVaz?a#W^w*UFYdQ98H|I1|vA@oNn*&`(P!A2b&@_(^GEk;X6cIm7TV6mt{`r z1o+W8kuNbvxvZa1#y|Nfjon%re%};U>zhNY%ybx{n#u8yulS~^_{Gk>jg zgB_5NVSV6YHD)5;&R1q_gs{-Y%O;x%6*OvxVO?gmU$wWMDTp!Al< zqHIYkk-z)V@fXa>D?m~k{u4Z_CiV^)%u2Gym9w45u}$`6kg+B{>kPCjoZQpGnkxN6 zn~;D2x2g45+s;q z9-q^i-hGRlH7B$HWG-~%arS2=v8)P{^ZVxJ`_hcg?ELETLM*K~kMdvv0rC7mmEHOd z7jx5PB7LUVrni=VEGTt&oT%do9 z12w`q%keKysJZNBWBH|#+;R}=S>M*Ve=QF1`E4zhyR2DkaQxFE!&mact37H;xh8s`qcy*9x}$$C4& zie>E`0^?>%fFzs_UEy6ot36f`fbUA7-!c4tC;?@J)5nA8GeHlsVfag6gp(_Ury1p^ z2kXNp~GJ)gH8hHsQ+67J`#ZMxjD2L%QDM#j*?=ppl(=!Q56{Q(aOdv3tH>OHD_O~ce zqs(&2OG5TDaHv+~1m%TU2tj6%Hck`R6H}u!%^=hU(7-*-D%kTbM39#9W91m{!fBJ~ zb$flSBH)v4ilUz_aG@LYuWJuaugI6wLm<2TY zYc!0RS$zMD8-YS?a>pEI^FYS(h5nFyt zlUC%p&(f;8DX|s*G%e%KyFrSZ``b7fCsuY^@Yb_IvvCe*h2Qf)GcoB?Vk@pIZ)o54 z#QueIwYV&~8N-oN-}B(dckP;utN6|EnPL~L0lv!^eT%9(tbvpD*J!=%?H>W*@7iw? z3wP@WtI8KDtTVoCxCaQr=61DR*CiA`c5+3N&~{{$JVnt_5jT zhB868o`gk%9fMaStJ8?Z2MkLXzMYO6m|DksHpZV#T1otkFVxO*feW~BqlP`EQ20Hb zQ0M7v^Hf9W^CNe2pF24#7^jTd_0GlB+8Ba1GG&ldW7801m5;f7H1yU$QVG;!I~OOz z+j;qzEb6e-6kcc)V=(?b&sXxl`zWa6q6n-@ zr8LWd5Q2OS93!Xft)nRxPfUow3&2>#C>2yv>6C|Yv5)pmYXoH6{qNC)iUOA5@Hz-d zyz$7r1$s%;iLLMjQc0)-p%#V_VxZzfeE|RbqiVq23!|Et^ht%qX~N)`jI4Fyh}woiB~4*OC=YtzJ%WV4on2CXYbH(eeZcoi zc=GMn3LcRl>x|B@TrZ;p9YSF_)WxoW$PsZ~p|(tccd}J0zxznrpmZqjf&s zzPlofKDs%hcXSV$N^U_@?7WXAvEuT zfNQ)!nb*bN$D+_h@5*&HGpF#I%ftA~H0t+XmNsa3zI?DNI9iT$-ZY1g?vcg8;9%A5 zkkxN913@Q&t$VwN(*$ZWm}`P#tPQxi&M$DA353tdh9R*oSdn@Vv(m$!vHRYE`+y4I zKcHJ00EvP*u4WIl*ad~w8Z+OcFP8z{dUL=4!l!)@41b$pmfpC|=wj{@V%$xEjp@?i z;d0WicKhH{$p;tzxRlG;vB>dJpuP7D)7>nkTv%chJHjgTit&1gjrJJ!96dpD!i6`D z%1c?KdkF~L<2bpQ@knUL*SLCp``5ZPka^skvoF%H=5N*7|8xT8uk+>jpIuqjWWF4f zw2PHMf=|@o_w)Qw`)3J&;e|BH)sVrOKx@J}Q*$%Bx9JoQ{a+23bPV4Bq@>=y;WE)9 zFwd~!zM=YdqdEUP@l$)%vq(SL{GD0bfNMjR~`-%yTEMiMQajtYN3E^ASGV}cH9faGpbg26V;eX6nLM5n3a zWlh{?Oqzz%WosKx)}-likjd0sf^t7tL7P3;Zc$?!s*Th+LDrM8+dtdPMc6XSbZx(x?ZRPBhdwX`HgdP{0%n(m=H>wP@YAU%%v@{<&22rSh zT;fLP#9vSW$H&PhxE8csDKbcN2sS7!l0xV*iq{ez-FU`BMGhQYBRP>7Bu(|fhisXr z_e|XM?ez*R43=pU0gOF3S*6$dNsKI*9TGg^b%F?^Jv1~LBi!(wd3+g#~r zA{0Bxb5JocwhvslDV2Ias7OZk^1Z~sDKiB*==o4XpGt?}CvHLh#%*5uMR}A_z9B2&4bB^KK2~C)#pJqUUf-Nv~^H-S`=|{-)E`Z1|+P=Va7JY}rkBUVz z+OwJF0`M8B&CvRDtQah`yN~F`p~+P<4?E@ACTu)BAv_+KWjQJE9r(l^X31~@+~(Fq zf<)zeVbC?K1@!00EaD}uGkhLkQBFR%Urv&GFrcZc&zgY^=2tEg%&@Vb`K;{oTy{CXe3n6Cm zxVehnkv0$D9`mMVxs1+W?|e!EXRcTU*T3!YhHC=+5y{Ec1iEPO-?)Yq@zz4!;N8iF z>kkR{%P~uSYx#S?=d6M|4_A+N<>-Kv5YE8c%lZ>w6OpT3hTHOy7rKThkoiXw9*)sC z#ak6RCw1Bn^)lZFor%Pk;o!T2Ps>3hLM^OApEd$50dG}-@|R_0$CnSl;)FC^hd z&<`DHS5;TpDI*7|97jI3z*s>VaEwViP%yk5IyXHr5U^mP!)*k9V6H{S4wY`eDK|)m z){>I)?T*XVv#O!ivzDr-TAZ4|H(li?sjrO|@4%m*ygswlZLOyZ<*LeCPp8Tc&%1G3 zyx`vgR)Ddp8G1rgBgU9_Z#C^(7IVg&cO5#K*Y$dl-3W)az&#S0t@fKo7|VYBO4>7- zGBY5q+prP<-z2!SIdIDWCvO&}YuosScmhf=b02AXNEU}ZewQRZ1)y5Ny2ZM zV5loRkQ$`(hcBZ(%ZD~sC;3)&;i3EDD9h_Lb+m1U9ZDx5%_rUQZI#$Zar2v`)+S=g z)4%~B{z=I0E&Nc7bV;vke_t27kAVPlXWtDk$voI=DY&zL{NNez`+A@Vz6qgE?p}(& z4>66d-Xvdw^YLj z@Dy-uoF0`)KgRvYlX1h|Bq{nGilX>yDMZw&z9W(j#u7S4=Vt}E>Sndf|H0Tf1!)qr zS$NvEZQHhO+cv(oZQHi(Y1`(sZFf&kZ|}dcw;Ov=5m}Lu5mguYW>)1n&w1y3%x=ge zJEJAEEP@(O;*>d|4Q(d0sDxa7%NqSiEiD}7QY$=u($rN!sUpzP+nqSbzFrIa=DQ*C@mC| z5eafKm{d{Lr%+R`*cY5rKDV3GU-aHeW~%P<(W>Yp5`Fbi_oZYli@KN~k2^5Z zwljkiLJDd2Q*CRh>!Ma>7c8xstUgtAO0smn=LF4>MT@JgwX&qGw@N{)PQ}@i>pD*I zBcb${TCf@+YnC~Qr^`}eZSzpaI%X(4ayq0yoyp^-NxCXrz#&{we0djRu|Q9qVW7+7 z(4QVBK)RB=IbRx`&EsK0m&MiRy7*K{+M~_vJL^%jxqNO?9)ZK(Ug4pB-x66; zCM8yyd=gKUrzX9DJyltb7?#W5TI6o^9krPiRrcX^5&8Zs2c!OiP-}VBUsC92C?r(z zy*}P8bqp6>8Gn+qtwZU8d0$syS7n82J@=)>Zrp;ZrXF@7))`i%R@fN3LVCjG3``Vr zlY(ov6yJeesupcMiIaU%sh1p=ymP0e5;l{?VoTb%k~RUgr-iij$5_*1#M%UjEDhgT z3ED`uyhFSg_deTaTec|F#_@#B%FgsuJOc)H*k;g)DdJ{)V@W!iJVrt?XLDY4$=^y% zMC^Nm4el6bMDe9l#Pee?6i&FD9}p3izf!~`R(c5LnkrA7$2gTKB=%%1_TnU4c;Gk2 z@|Dtvyop?D-*zw{Xs-wh5%k%r`vB__&Fb|e)ikN|A}1x9xOXo4swwz zZ0ND_htQILj?-?HbBt6GQ<)UY%_CoY;wB6^LGSnVDAvtAqry&?AHb#&WV4O!vyYw@ za)Gh1&hwfc+iTF8db|Puk3fRw^sg?@my$?(MOIE0X?vkutlG*~B$}Mn?8C&2eL6p{ z1T1{kP1@6_^u&Q|!arLzIkjS*M+wtc9(s%!gGNqOC-}x(EM)XpbGf#P1=41|;$1o9 zGjXdP{+5Eus;&o4yUqLYsf|eBzoEhOjVnp*NELct-@SB>m+p#O%x2Xh$IA+vjtfd8 zNX(l6Tl!dTz_210Ten9LD}`ujS!~w)16AE24|%r>zhKOz4u_BQ{(^|hkyA7Jt_tcN zQ)}z;1NOOG?95HcBQnWbCmy8Vc27JJSG_f6e@mTHX%5VhEoEY)5c#gQw!VgjTC-)1 zBZL`oEjVAasDfLhg?nZ4L}AJ6qEx=V0DVhCPi2;K`N)J^W-hS4wWf*Z4=Eig1~F#L zBkxHnS2u|Rs;{A{OgSxrVG?!UGkhvx*=>;ATMyKgrY@SjUOP8h6AG>I1+DOnb0&}H z!>lwA5&*=N^!3+Z%$fk?y&TX4`H6{D$Ugx6?`btRFmS=p10|9N}fd@u*?iMM3qZuDD>THJ&V37}Kzx)+) zOw{=`RqGJyE8O{3#|}R9Tn^ri@mA=(3u}p#oM8|+c_B%B4X6*V)T)P@%22>rprL0L zH_JYv=0m6(`7f{~^eCdZp_sQo8OXcrL`SoLeDyD{iE_WZEUix38V4H zG&*(&g^oQ>u3=r2eZqJoX0fcH5}n3qiFUe_d)<|N$hf>I6P*T4?PUlThV2Ml`>c7L z7B&LrSv;|VB>(Ld)LS;AyR?b(|zU@EzIZPFPP>^Jx)x8CP&29)ENsD#E-9oUf7U4rQn!fR!W|3 zN?%3fM(8ANI_o$Jh};5TMo0IZlsUO}jm~X?t_q=so<*Thn1YxKo%?_6v!UEm(P5zM z=u37 z8!DFMxa9Ez)37jOOC>?U zjupw6`#0GUM2#WMF0|@&G)*!5pSw&bulrXAw;>AK7zlB33EFt~9K>ifRjnjb5vSS$mQscr;eihm5mXv~4kT?d#Cu4b4%x7L6-cJ&d#y zSw&Tn5z@8w^{J1SK7&z{52Ft?qp5C%HDOT?7YQkayw!E?o)!Gn?ndf|588M-xkf18n&5+X z@@-Rg%H;_xT}&q2dl_xwvRAOloDx4`?e~;4SFrqTE+$Q-(x(iiRtD%aupLo^Gy8EyJqyYHG06n)3^PO*Hmn8 zVQvg6F=?xifA-jbJOjLpq=x}7-z4}39(@;5Q&Ae`f$mnz<|tIF99yc=h?wLS6<2v% zSC3L%>j4-k#h9LZnd*3|OhrXmldGqv-ntLksL7qoP;%sQzzee3H**-V_`a28Q5p>~ zzA(aR1f`cU?*iY{WOXj5=N2Wj+1N(;cPnt_zO|@oT^Mt6R-DCPXc3Q|kB1~oyWXd*4myE4>bP!+uhhZ|MUx7-@gjM! zfFe?7cI%(@2<~QDxv+HQm^Bw!4{FQuxXw96-CJLT22ZU5`v-vE(LIAeKp(1|nyPGgT zxSRqImw6TUFllzoX51VO(~)qoP=O-MmTFAlb+u5a2f&iD8~@Hs7P)?;*>FDOu04EX zD6}kXKIY=!Hpd0XBy*B=Wm>h|)iM+M>M+l``fs)hs&HGQz z3$Eb2Z#)%$jW^N@Ph`Gtii-^dg~$qu9RnKT2GM+^dyOKDNKMS+Pw|t_w7mY{Z}GhT z@>7C8W8;qsPXI)CvAk~yXZ$eqE(H`MG$|cC>Z19iq8bZgSF}WJLDe%V0#<4`L?6_a zoF}+=V1swcv*Ydo59sOICtxKX-a;j@fys!-ux+|5iOQg|Vf2})J+NJftyJD*=?lva zvQL#NPKUhhx+D6AHZkZdVS06VDO(^k_JvDye9=^78A&-A7}8pK7|m*iE=$av>}m$- zhk8%Oww@*g+G8q7-F7YnMx&PAu==v@(+oXu~@R zoe*7-=uA{f3RPGs7=%)uNL^G6LI%fXm^D4XDiO2l`k`WzHfQBNQU$IyfuQhIxYHu@ zr--D=FvyI5T7I1-KEO@_v~K9n`XHLb6XyBKj&H+L-`^FeQ?YD92BK=O!bjSK57N-i zq_A{Co=9D^3SU@DY(e{L`M{{KhKo+sowjVWT`v0yJ4K zSKAF2KL*h(^?^K9$&*P@Fl}A~asVQLtJe<~JDgD;2MP|3A3{%*q!Mf|y48_Nyxt8+ z&ciDY@j$-SgfN4>>o^yFH6|EaXnw$oZBez>imW~b27^&6ycC{UeQ5^HhCJt~s3C$& zo|Q6y5VSGRWKY-T2w83aQIO|Twf1eWOI?QQD+C@>-EHX4(O;m`QA%(9#m}fh)okOa zINIFBo43GtqI=r<_r*B;avN|p2zqf);=>cj9aPsuvUm-Oo%OHBlZWW@*Bk2ZrI*_v zza+QWZF5OV*p63sLxcHE$lnDHBz4F`d|(wu?=u}OSQt>9UTD@c9_I4}Oo*!X#Ynt|qmCE@P5 zSSTuN1F7!?4`~_*W|7-V@Z~8N?P}`POr@&_0k_4EA7&ahXMMxFi6|G)g>AVe|~u5DWVr7>L~- zLDp}b4AEuz%{5pNm}HZwrzq;=gEXl`vmnvyKm0tN2JBQFHlhghv|}xaeIFdl3vCz?it~FSXYyC1)o6ENGS0~Nr-ncZCGW;76`w< zVUKlyJ8vP?`@(l85fbYELV~2^Th8KqfZ z6w9cAu~$=XmsvaJPwoq;gvIhTHk(E{HHW6Thd0fU{H@FTRTHfVU<^vk5_HER@4u7_Ww^wwzXJ$jFS9!b_ ze-`Z#yN(A`fZPeWA>OD$9OAoC^4I0XKrE}=iMfIHRmKhbR~Fwja0k6FC(?@HjXbOn zvkT%4g;khn6>`VPsZumfST_zJ@j@-e&GV+Q^lQMFE~-jcvw`Ml-4F*oYWKL0sV4s?`!>T&8*17k2UpM@lq9s&a2~ABd2rYi+q@0NhlCF- zYM8Gn2x5R35mR&&UZCwf^A}_CodV3EL0By~8>%=X3&$K&U@*tQuKD(5QBjLnwVo;~ zDYg1$n4lO6Om7<%QKCl&ezRz(t0Ud;;+c4`|K zWSCDVL!kpqe*<$WkWuD9Enr}*rI)m52X@GofhbXfIPvvYriOn?5M+ChG@&C8sEcT7 zXZL=IIpZqL`nFeXg0x{g{%@L{5&ZgSKK)2ODA8-sNR7aC=H8-7(k?S(y%-r(Op0k> ztEsk`FVnS|+U3%~1wfFP7e-w4+!7(V;sc4^hP)u-j4Z z$lYiJC}=tlBKz`Ff&J_^_yxcnpH%yyeBw2u0zaj$>;U^r>1{$cb#_z3*l{^Iq7bok0Op=d7UJ+w!k- zi(fLX--REH|8#so1vJ_ZI4;=#)_+g}t-Fsd@5rjXA|~*SCavr>(_iSH|Mh`-L6St3 zoS#|)ey9A(-!K$AsrDgD((6c)Bs01o)C4Z;!VTPw>o#cwU zdm?p9Xa!lmchA_dCIcu&`_e6lzcp7HbtqY&Y=c?}Zu`h~7^(GR+OLnBvomA=C@KbK zwBWbraocqfYV67^AUp2691okV+_MLR$K9K`1rHItRsAq5Yrfv9?2zVpF@Umq7t?1^ z`&iQjji3!S(T0Gahn`EVW}y^h2B!QTTzY^{h&h)ewpE<^DeWf?_{%%y=x%-%DJZQP zkXnX_bE+@g_WSz=`s&)=4CH9h^!IV6K{0x?-P{VIchRGLyBlujC*H8=Ggag z$oo9F19@4()inj)LQ6Kz1wSK<@DhLLJo=rfX_bRIL z5jTCM={GGnMZa(r)cXe-FVZ=czMv_p_mC#PFy<8dDM~(()+z<|M=pqZ)V~OJ3-Cu{ zFOaq+sy|LyhcQ^g7;K>X$|~R5%Jwa=#~j%3;w!>4i{7=G(gqe=e$fZ{3I5vw2O7^xX%a8^HX#4ao$^3m|%ZLx)lX_O>%(_sP~L1>?1Nth&XpdZ$eP4TOnZy9q>`mw1K=uLGq6 zztW1U>b>q&3Xi zIL`<}>qt3Mu0c!r8rSBFA!mAY6xvXIX&aN6>&*L&-^7pyJjW>In+KYBvzc9804#V3 z(zS}wqs{)->X%S-98`NOq(DayW=%0@S}QQU0zSZNhe*2!E)vTQiroUtsel8GVM8&5 zwn^p4P|7E4)lGGcCGDNJxVsegJA4!JEuI~je@*~B#!+A_;SM!D<& z-S8x zeY4GTk6d^1o5k6|vDSA}|4FC*wqFK|)OYdPko{}u2j4AX9-=zau(eU|Eo8I>D-Az* z-^@?j+x0z2kc;4UaA>4KMkE&%{l(JmTy2JgCjqJgQB% zJgd#;Jg!Z+JeC%@y!TCWg;MwCa_#OBmHM3vtunhr_G^)GtI(8jYuxT4oHWq8*1;6W zllET6w0lB0R3pFmi2ca)(%gM6VH1QBpbMbLu_Ig?Uq9d%lsl0@^V!H@flV}5xw?p4 z$mV1x+GcOt#6ul@TMNK9go*CdR-j;#PEUaJMdJeiRf#KY{7ie#qgPT|#?9TFF&EYe z+#LYtM4oyj?c!?WD!ZXl;0fKqI@(nm9^1Hkb#Axh!Z~a!ZDWfYkj@<9`be*jJPF@;tdU{}@_=kvBEfnl=gF4f!R1n5Gd%P=Zda&ywXt#ivs3GYF z4JeQwD!Y$;T^6x{N*GowG%!y3HHtd-ERCOfeW27WZ*!V_J&cuK-zYa91A#b#xR!;^ zD#VREu+*w7E2NO=CeUn|TuG{q(adPe=T}#Pbk4M?$Gr`$tR*?fq4>;GZ$*Ct(^)KYNmzmo z_9BaIuV&RkLjI3EMHDJ~k3!8|%qJ`RI0f0#B}LalRK*`VGQ>{RI=Cy_(dY8>o* zEEMc;B&=*h)oDenvyh4sb=@HvaX5e-gA??YzI+=X$uhs8Sbyzhu^1CP%=IpY9fCoO z(}Qa91^jt=(76!OF*xtVFumQ>8OsDvDc>dLc7#);J+6$4J%H_GHVQ zIQYPgLak`-(%MiqMhpuGe^}m>#r6A=T^V?Z7MT!aoG2Dp?t!EL2lhf62ZRs_4hmOfFpxb49P~QKU z(us?^1ug!Hn+TgKj*8ykahCpq%XwmA-f=Z|ephvPgfM@3PplXYCN^a_WxcT|Z0ZkO zuj5Ta1qM7fZ9K|eD4GWAlhOXcXzleUJ^O+%to(~hw{bX7+tl%BenIHb^TxPc{1=I! zm?_bU@bjh=k^Hs3qN{Hv+Hn7})q}r(bF|Fe>0P_{7<>ZvFZT_iU*j}Q?lH@J#;J$w zH49%$uxo!t$}PY*VQmtzEAULuE^Ggoz(drmYHI>XbmZt~wdNPx(KOq#%2_8@p?no7g6<%CRKLdGJT##=MxIzKXs7_lfi56bukM?=z zJ>BIo7M>@M_<7K~GTH|;MD3}+xSCiZGS}N;ik{8GetnP-&Y}lOIquQe0*sru#rHMS z+BG<<(ED|9m%QiMddq(vffHG>OoNYBJy3@^86n7E)6f_h4UM%F%MCzrH8juG#B(o@ z@uV`X%w87|X9%%XW0eli68?>6CtDjg^%qoXdA+V~VwyO41H_w~T%{Kjw$VK{1R~yw zd85ZN*-A3m3ZKw*AsD5s9HC-XvrP^0)Zp4loorFA@u6?Dfd`GHm* zcVqpSo%?Qgi$+x4_1qFim1JV~v4-T59W0sZ-jKGH-N>Ks{QdQ?(Fk@{7qQ?XxY%<{ zI2A7%bBJhOmeu)|RH!bqIPIemCRV!acq>a+O$3AFJqn-xp&)!TeB8(A!C(?2?wMS_ z5J@gpBWJ6JQzX};rI>4kOrKv*`yfV~1fZopjMMuRLRbXA7I(>oQTf1AoaYvH8qR4nY_*U*sN`w)0F;@aAg+46x0pM}ct zp7E8UEm!zvavm42ap9@{A-qzMsCPLy`Np!0GoVDznMjJfRnT+viyqi0I~sF#CI~Ks z?A|vm9#<)tU!sDH7b21_@Q~G?%T~ zn6+Vu6p~orAX&p<#}e~w<rt!2-656Y{Jd`Me5b5yYeEu)e)Yl0E-D$vhREb&uEC zt*rhBMe`d6pfJMF*th!vQ=@Q)3VH#V1+Ml-q1z?Y|fOJ}5jABXPXfn{B{ohnu5P#xS z-~+0HkZ_9QC8}4;xGbnHB}bc}|Gl7MUJHR&w2rk|(abG*U}rAn?`;c+kh8Xd<6lDN zf(On4j#|?BxYZ&^-ZF>{IawsuZJp6mP$5(ONQ+Q0&z&8z@lM4pRK(Ia`oD%#&e7D! zSLj@pfeDRBO2AMc0o}vY(!ft+GjtqgSZfX;`1|QI3EJywW^=l9o_(OI?8-a8-1ikWRPntYMPm$bVxD(*e&l3jdjW`w_ zM0R@8qqk!8{)V}nD z52q{f7KR1ff1od?3I~LJ2;q0-PN?4?@<($AuK#d}Uj1@r5DdBefiB;o*9Q#9QT@Rf z4(bm^{<8Umd}9F~*6yADVQi1>4G7%z?^pl5eY5yS?l-(QIQPEgk?_#=@S2g|KNs{D zggxPAY~Gh?hv5%FBjR_4;^$sXod2Nu-FJi9Uln(v@2J{SgJzkx_ABXsc-@g_=tZv& z-%Sv2ueLPbf!F&2(xd_iO>u%@!jdbej5)(bqA(Tk@~whF-DF3`?^gDBXP?T6&ktwNs<77foG=Qybr z>rZhm!&0dRHSo;BaDEV&IZ4ylWzu5YehO#3DS^B`T~TZQiFzCyT1=VTKR=(mfH|w0 zq)A^3xJd{g;lg0g6?&b z!;fJcaKU_-#|<_Gd>F;ijOi2v8a4`Mgf>+|80K+y>aZV}6G8xFT`nySEbogM*@Lfx zoN#pZpe!!aoM`T1%oRZb6S*=K0=OI~o-(pBE9>fT6O$WJlCpnmRE+cJ5M?|Z*_ZyH z6{r-YTT=&ZrSR@yMlR%Eg9|AHlkfxTyojGxl8plLfQOz4W7brRqU?uY`QW4@XGy%u z^588lcTAU*E3fH{$yQ*xF^mj}pM&;sM24T$ddm^ z7-m`GPqyKu*iDV0?6^#GC~sA~?x#MLK;69EMH0ESIltg%a1w9#)h<3vJx-_9@@lCU z*asy|!#?z~WD8ADDw<^Xh?};jdJ~dSR47BJ6bD-?suknZa>cGz1eaTw?t+mCslh76 z!Wz*w2O621KB7tl3#n-d`7c~`48HDI_eh@ESV{!@+zyv=#m2BNJ%H>OP=ZX%MVxnKF7**;6yNRZw@EPO-rJt7% zqJ7A&*USZVKW*G4@I$y?n%#BqBfKE(C_I6E!Fp&3sJuuRNZz4GKNhb8V*xWhEj*MQQSN~IWjKTbex4L_Z%B9MkldESUO`A~5raVR0Q@TW`AxM|ZOO9|*~ z&wu)Kg5|JMQIT9xlbYeWfM7YKOFU0P%kCfL(jlBh+Nqo$%ee$kqv*k*1e@_Fn-exVtBVn z9`fmBZjL~diI2n=OYUis+HE5T6HNwQG?9*>TcW9wP0}Hs(EB2fT&d6PV6cSZieF|M zS}_*cfi&f_N2eD0G9QP2&>1=Z_uRuau0UH*iFPDp;x$jrm=zy`zGDvFD!6+6r@fa; zo_AkTz=@D%0qYl+{A*>RIKET71hiQK$Eq-F>oYd8!h$o~cIg9s#U6CuvsX1H-ORko z^z~DLJ55OwaIWa0cCfYA+!oz8^J7ARm~hc)gt?32tF0=_6*yct4GD%}{R zj<5@OF8zs=7i|rx7w=Z2iR+EfKG_w|9`U>`ogLhKo>mtU-xq14U}RpbK8V#9YNNn~ zP^P|2s|mH~S$lkGE;RYs3$u_zuyGes<@p%&jK(mr4>|3u`j74xuV!g|7|iqIzO*l# zZhd_zmM4+z%Gz&~4=)e(JwcXxV-LN4(A{~4P{7mPK>zd9zRDN$n|ePo{KXpKS-%*9 zQnY1|A6?nmni0kqxa^X{aNx)79jPx#+#<(5rZ0>9s>7(t^VXfMFAV*n!5B0zKeX*WT z5}9SUhWfq~#b~gX^|$81Pkgo=|1fdNzw_jl@(oKrlCNZa33N&XM$*m{n$TVCmZamX9das)zl!?QgLOrZ+!=XdcdKd6cD(02JF9jiwhof+W8mdt3|ahy zC^)Z&HxDTW*weE3C3@UwJBMfT@Fp%9`60Qpd3(&1)D3Dhb6b-I2pkf6x|Y4AQm*0B zlUKA>434?vsjA0M7gwfqmI+SKs5UAN3FlD=+jP}NKaO@iE<$1-h`>3_Q!3i#u5NTq zFKvIwD{g?6)uJbf9g2G7QCd|4hANwAYY|~!oz_y-mqo9H4-%yw+oHRVSfCQwn0h0H zq*bvotaz2@hPck#$O*am9&NM7e6Cm(e7KAIfvf&ezdZ>gg?htzo%sjhe$5>T^}vff z^$$>`u&aOL$S!`%>ksQ3k@m{LFY>UK{{xcy=x)&drqC_!hiF&%fh$7XRs18mS`kQE ztwD@YA4g=uALARnTX!O+$2wZ|o^UaJas`!l>iMP<5DHkNdNstZk6T6H(H?WHd>+>@ z*vDCGNqi{hoMhl?0}c-H0QFJUu~!A~N$A+gL{d6H{I2$+m=KRa`@M%M(eMdvH)Yup zBOD!VcPV_`)uF(^nys@(T*s83iFUD!)HR@X)5JPVVKruK6=5vSn^rZnn2>8d>}t(t zh5IyDp&|g${|_wiI2|;_07=jfF8Im8u;m|JXUN`f)31KTvTsO>vmYP*1KRRsxKZh@ zFrF7E{ff>$d{KNt`F+WL;3+JaKD+8$apCW+<=j0SZ04b1(V=LoFftx*`te$f_T~{{ z`jIc=;I5AdT_EnA%kmpO;xyZXDTYTv(_{f(&4NHGJUf-q-A&{PJUnFgR=((Q7ny=Y z$6lsQ7Ulz&gGpIu?%M$`oe9*v6{qZEEWKNy#_CJlJmt{%##z3mV;?<0+}|$w7)&g& zB(V{$O5uf170KH$PBb$`iHKiGfjqx&?8C&PQ=Wv2JD+`Z%3G)&Hp8iteQ+s;U2pA@ zeS$cS%e@b9R|rC3y3I*k(W(P)h#8qTPgENlw(crsLJ@FZHC z!BwAm_AX%fnV1f-i3@bhjt9$$^KT13;TOGPQ49u;{#rMKEh6H_e8Hjk{Wyx~v4aVH zi&H*;Ax`wy5H^@()KN7CsRAc7!!$;Gi*gXw1os5{&?$ilsC?ner^`U{)h1TTBasP< z15@SB*@Rl(B4xa?5v~gtNCip#1d^^oXmd@)^h(Fc1VZO%Kp`=iER{S0)|dd80cDT> zK~j8--i(u7`-mlkY{g`EtTC_89ri#=mMkaSOUNf&2T^Me{#SfVIHCsc;k+|YXxPlUvtET90fSbi(5mH?IDB4o87>w zgpZ<)k8J8+9M?k>NX+j-rF*IpoO~gILl;a-AEDv{nacZL9@^)^X(JLz*&~!<&L9Vj zt8fV=?KXIK4jD;|;6FNjr}f+sP(FzJ56UdJ1SH?j?R z`BmM0XxiOCI$Qrw8JSGZK^mfrlDumgVrlzpn9lF)zD)I*cF}XHM|ee+aEcO#jvByB zDw+m@nsO>CWv^dzF)xW)X9}uDffxsA#YlyG82B`PK?Wbu#1)v8xx1PQW!_ zuI&*YYDJaPg&e2k6YG=$!^EH$7e=U~!ACQQ1k>R48&Me3-Et%IUncgUwUTm|}Ofp7`v>rX+Y|l!xXQOIdnZJ-a|ECzz$Rdx9 zcnCcqi($?vg(X*uD)4Yt6}!v;;i!BQ*hblj*>2J1dqQ_wqNU)w`zvjOvU7nKJ{*<; zaVQeg6)O$;7h#qY>X6m@tb1qD-YB=!5gP;X>Se^U*Wv*9#4ZH>Wy5v}Hr>ETr6sbO zYD}^oovb670#8-Zm&9}SQjkq$)@4Q3r5PHw+I}h?C)uD2-f^;k7UGNxLynrbi}k;j zzj}`DS`&VflT*DoJqN{wyEr=iiug}0(N0+aMKOS~7(isc2cj!;Cc$PG=KbWW>A=1~ zlWrLgch)+z04Kzaf00+lrzDP(ZIKV78yJqeOz<~m-~&MLeYK+g2`>|$JQJTNvn^3J zK1p^zm$>MgDZULaR8p` zMpEv{9_*=Qy!9OZd>RkIB-(|#iq3sDMk;POmBRefoBpJWrjvBeS`y*MpR{agHJSi{v!0i&bq^s}flFx2OL2r#baD2z8*nKU*OlwdL5one< zn?ozteK7d*KyVxvw5+K~ded3g8*hKaLAuvoGIYBc9}I2$fv4Uj8?9G%PWp7vFb{;> zC0%fhoaE``or#bLv!#gz>QQbXhf$Or{~Y7umG; zp$e};h0Pj7liz`fW#!J6egHnb4l;9b+4+!huKW-tn^SkbA*y%e@(exjZbEZ;1SS#f z>-p>0mBf&g>wIvkHU0&uNRtNEB-(efI`%VoX@_2O<0jm*#+k7OEWuJuP=f}PuSRTs zO&hjcQ;l&`8Pi!Dw5WyNhT-;iphP11D+CHdxJ$O`o z@vjm?h6|5QUpV|81#VGg5r_}pKB)l%<~|P)`P;_;tARnr;Uz@*r}H0M1Nsmj&!*?2fJJ361fep^ z+6+s*k;?GLl1O1)xs{xFn;dbk4@B??!CiLb4`Ql_=F09_z2O_+Dat|NQ2lxa)nhdj z6LN_hNgkn`(Nv1rju>wjY@w{$h=>Z}x%k#T(xM<2J}_%_=m7Q%pZsUV!SLV%VfC|haM0BW}p*+Y7F z&`B0>{iBFZT5SXa>5CxsM(C4pz6YYt;!e;9`5Coe1)-Lo0P&O^kQ~*#oy=d}wK&|Z z^$mB@DA9%Gl~gcZit*!@*8ch1?I;75d5+0oR6b}pg+MG;yI^q9#&A={BM6$=^zz_U zZ>BRi&>Q6JCZ2-ozj&=jxQP<{Q!YmQ@5P{@>MoZAYuT(p?%*qO#x%YHoxdI^MvfF> z63YcR9z=`@A|D}i7*%j}fAeS1Rq*)nhpAZp%>j*afW+)8OgNzlf!$*~yTJjXcoP?{ zb3lc?j}24mBhJ{z0crL@F|2bS&e_ifee_K>N_2o;ev>ZP<$$BVn+b^aEgv-N%WRm? zfY$M8F5KXNxOtrkwd-3lY_EsKf1L^b>RZ}J+rzqxyOn(#2cYA=%!I4&ZJgbw7N6(@ zq}2nf_@ri>>xA_BL^mQVphUtysTl=2K*7J}2BY|{aBj4tDmRcYyCuYg5K_}7TGATf;yv1@Hlo9-8-TGeg9_3uzTLS=hV0m&ef4y7*iyQK>n zCKm)|6$Gam3L-BYg@{T@Nd`TIn7$@J;*9Mtst5iP9XXy9M0z*8)HPxz@jIdKXv<9v z2T)#W94X-CK6e#2Uv%&<4#U18qRSDo>CeJ>G-k6*xh=U_WEKri`9kSmoZQ^hy?Sgp zH~XGUagR^OO~P{jBR_EH-#6D6MRtIoo8U;DC*?T*&~qH3?{MO&NsjM_5r~t!P!pyK zS>~`?hwpOfcEX&Ix%fR}c7(du!$2ojsoRX?Lrip@w&Qr&C-~aJ=cse+{%5NMlEjeJ z*4f(?7IfVAA1v$lscBSS0O)u6{K_|)>!UxM@7JDS+_w(q!8g44n}5KKrLHvjQ_kyOlm5HAACW55NCo zU`A~fJ<{g=^@}e2|6+GA{67rL{~eVeWNP}q0PZ$Ew$xI58t;L?p3*YUSXP(P0;PFxbfdI$@OCv}#Tm+~_tQ=8KC~C;qoixH8 zSIv?37g#;`eE6YIl%EkJ;jqo_`(VC@w_XH_5Uep}gi#En6LSOyBXv+Na`O`OETxCa zZyn?xy<+O(B1xE8IrE9*oDr0AeMnVZj1^DTd5qBt7H!7cWgs{cR`6h41 zpv%~tc+l3{Sq!+2;sT83xzz-Z#IF+(ab+x8#H#F}QoYEhVpCm9N*;nzonr!rOA<|I zB~ADhs8Ob>!JB4XSB=C)vaK;NgC*$6W39qtHlbPjT*M3<_cU*IP3T|L6AFe5$!*AD zZ&D1KyW%0?&%2DS5#y-!2hh=^a#RXUdZ9r~s7Di(87mKX7m>A-GAlKiO~{+m#K`Bg zH->GeJI;p5?QAr7?K`eyalCw9(@e7X5(UKlt*Tx!%8cAf*|yki(Nq}GOjTv0Z5^U* z(JTCrbksP7ZHL^!w9uHQDLV=diq`+owo!MK9N^tQ(95EwH|hy5`Cr#YykOZzb9(%R zDS_|e;C7?(M?0n`krQ{%z43OJ4}o!*y$Frq0~vWtX{dA^(+&Dykq405Ttv(ENP#zG z@)T}t2PG|E$pcQ%XQ}ckScvRJBkwK>nZ>qTYyp)F9DS$X9}j);(N)^o((gKx9nOob ziJF`|dGT_>`BbQ=6NW-sVJ0*13YS?TJG-O1rvW;kUXykdv{pmRt*(Vcs_8Fu^hqIz zTeHr-g*WGHw1wgvqb;_ZT%8>^AodvJ5^w^oA!IewocWrnPtL?Pb4{b5Tg1TF>#=Kr zfp5$A{(NIzeo)j6Kr{`=NVY{~fmvSTcg`s!aGQ8>$m}W~oWML(8*tERaJZqXIA8FK zJ4IFrLV+5XL?Z` zHVCL@iX7`<6s@m_n?t}OdqA9quk&y-MGZ;*fy`c4>KR_a#%(*`i;Bn%pI4!q&B}wqL4Wki_uHQ$^aseFcjs? z$kNg$I1@*lu?5DMOUAe`0OAp0d5SEWEfb+mM+p6CiS6k0)d#5oqdHkV{fz(EHy8Z{ zq<)B9|Ih8DM~Iey`CDQ=Hhw`|4}za=zgj1VA!!W5rM_qVmi%O#Km(qpf2_)3O8Sue zxQ@6B4$P*$fxfh7{?XA-2LRZdptiz-& zfwLRJi!$_Zz3=@mQ><^ABq7yr*hYNr23=~r=e{z)-5WoIR2`X6RRtnv?_ zNePj!U2k2vy$v0-{|=?FbTBG|ACia&Qi#q{`S~|S^A7wm`;?yZEq$PWCUGMr%{?Ea4c8@V?RE?_om3a1Ybg0v=AIkBgHszL-%8-O^yMkr1S_|mx z(*>weJGTb>ur29Z`<49DqWjQ(LXqf(lOzSql|)I_BEk*ZbK9CZ&Uf=ap^WCzFcO~Arx@+wKG$A$8o zHaqUj+U_|;XqPUHoZlv^BiLw@3%e0sh2s;-wLXCY9nd^Oy1@JOy`}Qq&$_$x!U%g6 zQNv6txJgBp(T>1!NuN`T^MBLLv9mG(k3K0+->fU_@Gi2MbJ`f5_mThv7My9ljH72* zNK}4~H>DkOh8R}~EVzbcuSvA0veV|GaNqU@?r~xg54CJ; z=QNyML_K3XBOObzYXmKmuX95y3BeKZ!n0rH{T4W|{atMb+Ai0}ce#go@|z}Y(jvO; zE9}nJ4bItI&e%)PvW!i7kE2wfCj{&4s`SrZgUFko34*^~L4I$;UzULJWS{Cr%YRf4 zWBMO$LqQ`8i~qY5m9!M+%Udmf~Sg{`5d1X{)tuEE5lt~yo=kuKUkhkX5}rYVY216eRqcdI#qgPBgQ;A9RS8T;@S)ue&G$W4USI_1~6DK<%&H&UCgB~IMW*b2S-H-#z}&eZ?n93;n+pg1j^ zP#IU)ma90Vt|P!hnQ}fTv_JHV`)H#~%_%h-wAa!aV?#6bs4Gc@`<}%WU4{Ly<{a~})M~M^Z;tbHupyVIGS!MqZ!yVZnk zDae_;B{sj3S@_G{22mhC^-{0_K0g$EXA*&%U%XS8Ca%>k%w;%4=pCU1{sZFgbAlIu zC^YzaPU?XEA1KQ2PmyPvoy$Vfr6wa_9MN-MT^I-x#V% z1WVvc$X+C-co$iYjcb}+TM)=w5}m)6unNH;&TWngJsBvU@$AVEhT~GKtHhql(Fh0P zni_GeCY=9zB_%w|!TpN!Ys)RUpA(jLiyn$6D`c0u&m!63jZR*#vO#IZIRmuIy(Q^6 zL&>XTso(FLg@QTN44# zTqw3iXF#a!*V~vRUmtq#RSe-T`+?|L?4X@!N;2NrAG#I{+&w?Mp9nvnx(P%4XU7P&WgWm~pPB!jzROp4djvDM@ z_jrpvoBd@v=k3j}Cn#OeFp?pu(nDY^gH()eKxHL8;iaqh2@Qa zJ51Kp=7;)7$|eUbQ)SuIq}-CrN{{>%li}$en691Ff%8PC#UrpjLbqD;MvIH*ScwdG z#Q9Z)E8_wBX60Ae9)$f!RcsxMooOg65*}epeq>|_eTYP~#*a*`0VD*dAG=vttIsFq z`tif><{3G3PS6&!eWLK;bken;=Cs0;{^2(I9k5QaUmOD$kG#)jB#bu-%IrzjU1jvsP;J2W^324cdc*;~9vDb| z_3?)T#CPzqS`T|ekizEVT8o|*XcK5^!2Gu4Q7>0=Qf!)E689t#90>6iCK1PBdCoI$ z7Pl%rS6{KlVRTqeyEaxAUSg1;yF5R>L(aKb3 z;1}Qm7rvgp;4!rLnCKV>eKCG?7z#BoMG*;pVH9BvevqJkJGH@T#P3MqTfuY>!H9b3 zQKM|7c-{sKID|;&8`cI}bmNK5nG)#a=uwF2jJO3Tgg7Pzca{>{ zkz#gqXyPOU=yOa6$3f##n_|;wGN7>enXQ{QW>uEfRcmRx zYL)55O)<>u^G(P~i`0!QEnl?!lcyH7eTLUF771akmx8G97*xid5bO;? z>h(zOo@n|Y1WPSZdn>;XWAeE{X26TqRr>4u+#u(8F8E zY{e`b3jOtYS1jSRPkG)v7?A<_-*=>uAIdB1#sdQ~rt^nU_j;vQsK-JYoiM;4s zyd18AtOL)|q+1qRr5_iX(0hf^^Sy_pSHOo1Z3*#fkCk7=&@5qE5aFO4N8;Wq?c1Fd zrV&@cj+uKz)k`B6e4?7R0y}6A4Wm;qBcrZR8e#7YRg@8228d1T{DI2-|2Z-Lp$C@%lYPG;1t1_3Lb%_hgCds^qxdB6G z-cH+7X6WG+m+FKRTip&><^c0VJ~>_%LHu}i-GqQ?>kCKLZoHr*&SYbHk~H*j;Y@n4 z6YgRVM{LNKUfc(5pKim1zOH9>Q|0eWl+aVeT4)z z2F1TgLQ``Vm8S>f!YvcEvU-T+HMM`;TRs~YYFE=-K4yp`ILl9|HU74i{?dd4+3Cj8 zG7{X+zB#j)41cIOzxn0BLbY%P_psP6k&W|?sDesu=-5_M`y~WIzkIS~a>Z6M;T4T$ z*LgJrI2YTciBAfquqv#?P#%W>1^U}71N|6PYtK$nh~D7*fEU*ZpXq@XvZB5@tfokh z>jvD#p)&uMDii!|`1p6sW|(F~e;L^*P*S=55^@2Q^z&r?frjwQB;A-NemqeK(Hgk7KGo1S?TXR%pM| zke9ARG2aJfSa%!3?=yu{lATpFLot^dqqlwCwaMyr`Zk=wtl)tC_#Cz@b{(d+M>`uz zP1unWTgbX~U>~A$o~X9W0%C-8Q9ZQ!%)P|)SW-fmH$QG8Vk;6S(?=YTFVA)8{3|0# z?rzJRSguqlJkJG-xM&NlKw-l1OC}?noTle9m1d$<*Fm*|YnwIAaQRP4wZRe@Xq)7rxF!qC0u0c!S+P=JFsFVF^R#a8DKYANHpcOwHKzp6R4P(K z*Dcasce`E3Ltou+pXvcMdF%d>;Q}=DeDFCPH_Vf70cq@-7I;W4mV%JGwA%J2Nr$olZWfG%u%R&qt z6ebl1=h@G}Ccj6A9s?#!qq&;;w%?IF9~Ci0ZIDE4=vA$;oy=Wr`iGf@=Gl)G#~1>w zM1;ej2mF*-aSRJTuO#PbzQEI0_sG($)pO8uwIwm?xgqXuDPTqmfnUwcmsADwV zU+YAX()axGXie(0r||(~K?Pcl)G&`;jON$$&S8F96-bn(95{;}Z;Q_qfbBWtNW%&m zhLlllul}*^{$BW%D}pafw`f4F7*JRVoK`s}-CjrfQ5Sq6J9CQi2}j5&RJRRS8c6uo z`l?=`f2s{DcgQ6-WJHDA$MvvLi{I?hXgsUicWIk<$rc#$N5{V8P|Xnoica001yxlI zG_5-qU!6;-D}|2+-VDj$JVZhxU8c&DDd>SbD$1-3WDqQHnxUy8R4`(GNI`jjh?Xne z{^HTseDLX(HIUq(42Q#_yGQikEvpV+2oX(JcxA-m&tf*zZN2@lYS-_LA%2@De(G-&uw`cAda`*WFZ5M&a*btI7gG-tL zW3*KFX)a@$BC{xfeKUr>YFm7@*;8wHR7u)gLe$swQfnc!I)fldiF)Kl4 zdI$aOae|+Yws3<10Jy^Z$uklE7h(-7Yey@4J!2zUL2D}mM?1UE(Z9c)PH8CFW z(;JsqhN89lqCM?)EAz+H%g`yNHzSn(S^qNOoy*)jWiP*z*0sr`d1d06lf3vgP1K>k zp$ZMM^M;r}{QCZPM$CQGw9{Sbu;|HpRsoq&4lFjRdV~$MlArNwLzo-mQ?jda;T{6` zcZi6*n2&?BDpH+nD(dzeSQ3Txs)@W}4?`(=n#~GjayMyrRq?Tf#lI&kgLI9-i+s4E z!GmGf3u^VEwDLdtp5Q^8gbgff8~Cn!ldnpJdd9m0{^m#>)P^SG{K;cAeIa{H(YHYXW?R z=^KSrMj{TZ@rY?+<^44qyEVrw^s2BTLI2#W8Gco5@8N~41abPJ!pis0!5l8kyquNN z_zz%7x$ee0COiCX1EC|TV|ID6fr2AS$MM@yv6jK?Vs%2x=ruusaVe z-I2J|sOh?-UJ=WNq+y!vL1x{~1e*a8Xbz%4l3|vSUtq>ws0k}~Q6}Z{CGgmW3jq6P z{?11Q(m)UiV>s>|_Q$2y`}@$z%Vn24a(B>`e|S{u_JGIz)mAVjpvV)$Xj0U*(YUy~ zOTCht>YzOOW1G!5G%F9yE(7VDhH!DfD-73*is;vP;D&|~r4E-3n zI=a1;zYVSqjuZaSqig384jS8}BhA$upsJmKw}G}6Tr|!w+HFX9jtN9z$?>9~v7Q}$ zPDUewz3XkHp@QU*2>(>soJMZZ=zP(E3FtyVOBmsPbYM#nonEuRA!DC z_DrLvgaC_|sb`Ny{Ut1zjj>&-`6_E@cR~Y`VgnR`1qtXG6>_-+&5TY?T&wOxkWzS> zT})eI>cTA`;0&@jPG14&g7D<3lLa@Ua({4vF;1b)ndS&y zRqhN%dP4zW$;{;aox|Po5t;b3z{Mnj=KiRSx=0HGFw;7ijiZN~RsUkfm@Sfu_q4&D zT|U?MBd1{MhD|o1*Otz3?~1@l%_0w(0+=NmmvR-jh8@Q!G+@TOdt+C1>k4^3lqu802MGr4w5Dc3}hy< z3lBrPg$$H2!_m^5R>3zlpTjB?G7@reI9KX=a~+1;yg1^3d}CgZ1!IuT+M!;l>nx#s z#_x>O=sQ7qm2UX;Eqr=uSu*Q`gXsn)r|WCm`5Nfa4{Tw~b%O^j{;%h@^>ZzLGMuJW zdF$A1{-0A~h}pEW)4aLsCF=b=N^;5ho7QYag=wT%-2xaN2(VC>g53)UZup;EiVz~g zgeVATXljA3dofBN`=zP1($-oMUtD(zH>xJ6E1-A2=XQUqfRIF8FFE+KI_FMm`C^-0 zE`257FI7L4)Gz|7k+$`#mr=K>oBKkQ#;0TPmm$Y1MSDgr;c+;1PL3jkgz7Zemf254 zo4_n#g0sgA9dh`Gm#MssOG(si@Sp+F{*Vv{IVhdMLK%88kaUv_K-C$ckJHeCBn#}y zY&HKhUW6s#HCr47T6Rb$bQ%$ql`*iG1iBXvr&`sTG%aDyAJF0t913TcY{Q))UmmEj z^fm|I&Y-2LFLe_=U`>``r=?q+a^Eoo^h1;+7`Srr3@G1V04s3RA6kAKTN%tL%Cu)A zvfL9rtwXS^$2cwvWlDu!mwmbB+H-+_e?kD1T*iWs{0M5axivfh00 z-iUuKSAosy7^J@i;=NHHEPrF`@eG>Pa$M_tyrB6ul-ems|B z`?<7*90<08;F$O6P8y&;Gu*AEtnEX(Se%XByRq(X$u-19A`I}qIEjWDuq|uzPjCLT z`daP3B~+^BR`e{u%*I;)eP&&3;8nEs?xXS`yM_@cz588w_yZIAjMr5EQi1;iwQZT-F?Gw>FHTzSx~C*I!dWZC0QEsxmn7ZhNi}e zrCCWjsjBJ8iKS5kXvj&gv-#;tMG|1xv9LMl=>0b0S1t$p1t{!?L@205D2xnX-am7) z;*ygpFSFtkQZ;*rbp=EGmeJ7B(38>lyki1ld{TUXhxPS!f#WQoH2G4)zT^jH_{2pZ z{sndVODX&0Co=Ir`c(4&J5c{cm?9u3(5TX=A|RDpud}&(N=@VGN+KZOhp+8>v$?tR z?Fdo*J5ZbbzI2cs<^8onP9QiUAeVcuns0{9p-;K9`4VsQp$Ko~I|PD^bxLpTue0qt z{q;&7RD;FMdr2hw`GI?arXnCb4qtGl+S}&|4%7R0cINd(K)ytG{9+GrJ^uABu3j=i z91g`2&*mKw;}hc(@&yP@{Szp$}pvkAg{o%``rBmC>%LI#eTJqvd@Toby z0GK{sUv?zL&uJUUA`@BHZ{mU&7e~_zBh?C#M_a+gam5-JW@rmGS@>uZtLo12dgZJ} z2P#mECTYM!F^w<%8)c(;y-H#da2onNF@kECrJqFQih8~(NZ1zQyp$*^|rv*5@p5_ zp4-P6i6rxm+4&xV4O^yPuN24hr9Vs3Q6q+K$7UVg`~jbpDNh;Z2Q8P>Wt(T=ts-O7 zj?XEny}Pamgrc1Sp2H_!VT?v4Y^I4)!PD@V!sV?p!TZp2Z)Rzm)yMEIxi^CYgP<}7 zV50yQ^OZv$Amb>QF9=P6Hfm+P>Y1T<^G0Igeo4WvAAMxS)$U`ynG`5j+&ffmCEF2{ ziR3qc{d5fIo(V^3sJHK@lab~M=g?EPlUHN1tPJO6-7ig>SQ%xh)brIVb!Qy92HVAo z17mc+CSi$_L>(_^CRQE3#{&#>5g)JrHK*A@Yez&^z#wVokR6wYnS@3%$_N7$w|~+a zh$LtqGs(z=mBj)gt+kn--&d%QnpdmZ(U4ove_(IuFC43;hS(_Hr_w^8f-cb}4S(6; zL2yEl8r$XG;I)AG^)AE`Xd#?umH*ew8y~Ptj6>!FL;>`pwQmPr$acNS06=F1Bav$M z!mZh(M{CqCC9GdZ5@VbA-J36-he!U#Q;(Q}1p>m~pNJ`V^Nb;t?bE={Eb!S-H~bs7 zpULt$YJolgBD{pSKm2KCW2`%1fB3}am_6tv!O0yo^H(p}mwYzCc8Q3A_h%ytepd9P z!_=u3ZvbYst;$X{q!n`#aY>_XwVCed%nWwTI6g<>y!R1Sg1iK~S7Wt7(@IBv4!-PB%@BFX?9rYT3 zy9ch}mc-5Muzpkc6!&)wUv6Qr>8akO=;#n??42$2~CrhYEk{Bc#}70CSR0y9{yF|q&yOW z@%~l~Q{K18Z%7LXT`IVC>E6PS%;AWwn2(9(?r6l1tm&k#mz=GoLVwEJw@JZx{^{t5-q!h^F zg3^>C6cf`&rxPzG5&0;oG)H|KlMoBGj(j#|>9%*1i>;8xSKJa&Ad(d&iQG1p`P zb(CXcP|`eB{$}1&6pu_O1m>Huo~)*GnGtb3&nxNz4kR z&`jDHjkKP-${*IHIO@NUheTY#BqifTis12?YJx=KO%n) zcNy>ppAHo5HM96|(~Q6-OMPx2)tdN_hgIC5_>I8Ur`jAEtUKs8_vIRBPC<7o(6{$0 zygSQYcw7{x>Ei-y()01lZ2BYQ3%=Y<>0tKYUJ#%cZbV-{mfPwZt>GIGjeXheI9@@u zcdWR4PMH_a3&Wa4=^Sa8k1=emgSRW?oq}G6btf2;opi zlIJZFipZPqd-8jZ;WCja)r9kJkzR%Hy*mMcBZ#Whr+Ea97;-?H5gEMc_f8&;pvKVUFknKT{Q=kK1IB=UR+v79w%$Rlw;0az=7X>ixnz0QQz;0cX5KtetEC_I>Ip{Me z%L1H7GIG>H{2n!EYgx&K62^K_fTJ`dw`5%JvR^uy-VtE5% zsDO<~@CP9b084zy6&^b|)MIa3tcTq#EAIn8IW9dJ*`gQoNS-*#xbg@|K>Hkhtq zpN+W`Q9`@F{SJ51**70E=Ioswm$Ixkzl0N15CnR*5ak8y^7T+7H=rqHmpIU{2Nf{q zZZ$$6``yq{BymDqP@pN=MB&F$Xb96z&}9r`JOmmqCvI~_R=0~rcD3h)Sjv8L&WNd@ z&rxr*Zuw=9Gl2ymjG1 z2?e6WK5F4jK#7AmU8?gP^>%Zw-WK{8!en^K@6st0inwD=V;b-f^BUk|lcjrVEk?)h z%0HUhX`pDy$j+=%g`;vj`OgfwwvRpwJdF_F5=OFg)AUV;U0bjHt$Z)`%EY{W;-rFv zfuYgBzoeL#%nI=aC=@W5FCt;~ixs7pcbPk8=@OP~mIRE{l^{)C7kqd%m8m4pi-96^ zsxmdGk8MV)3a@dr8_3x=UZUw&lC7<~!E8tS+iUX0X|A#j()Ua&PjVWh>MOIF*>Tag zSq4js(;AQ>M|Zn790U;tfJeR@=^fE5)ter=8&IRgA^uuF+CJhV?bCGRP%$I_7`MJ{ zk=B#j8&{lxl0M|dUqDjde_T zh`u8Medy+i5tg+g5B>6ro;m`PQ{Zm~N!LM2pTi)Mp#6@e_fF8=FbC~O-YuOcQk%>C zjLys}*A=x|!0rF3P^2z#!M{SSN2C}|(4!-;T?KboMJ_33!IgcUYiJ4xxT+Z~Lp!th zCZu>nnafMuCUJHTsz8-ys3XTJhhoZ~477naxS1Q!B?#&jW%a%-R!j`{SxxtSNFXD= zW1LqC!u@ex=l&XOmQ9*ihji0Stj}y%F_r^py?bIdlaKQJd@o85EJwiqiiJC+t6mBPx=Ln4)m+BCsgpV>cv&wCQ}+INsfG zJKlJ3dbdcrkbB-W*96j6u|`Q}pOUY<(VN})NlGp5+tEtIB#~u1dkCugiM-Lc0x_af z5-4a6VhQdknHMP+@4x6fUWRyiKLE3Fxq^+uL8kqq1{hHK*?8b36?%o>l+hAw7=z8F zB$6Qrg2ipwuzR751<|_G)f5m8{PH{C1h7&32ZX_!i1LZ4=;v2{&MI;DQGa8M*gpmb zVd;~LV}f3jhbGNCalM#2e_ObkSc-uM`KW}WV;Z+CX*1VUjhJ}q9YmSp=!5d9Z!{~G z*~o?5D1Z_#MM%wI&x1O?(p>x*X(COS)!37 zn`3#qMGF75&?a9K@O3*F(}|hgNok@QJ+;p$yGp^Bs?+i!wj{Ma&zP6=RQPBiwU2s% zo;ombN&66VSSZQ2bc$)rfR~;l3G?AqvyzF zFqcO(8eE;MsncJ%CFq7H{S_~2NmqO#sI5lD%h55yq|Dct&#^IrUE#2{8G5y|!^7G+ ztiGfvAx-jDZbSR#5{lOjUNhaCFB@osl0C7VtIOfzr$4}K8?|A1_I1b=I{3Ws1Jj`M}QVK0M(tT#NB?*R10*{ zK(@`l>#5Qdlsl$PJ<6VYoKa`OlVZ$$KZ3n-S5>G$5FnOUYLU=HgVzWMy*kIoz(ck^ zLwA2EnjbWcNUVfKs#-#=dhrU{`vH;VyxHJU<4d2j^lw1VrMtCmfEA6x2=D#kUYz zZ3l47G_M424r^OFUe>F(EwY$~R_w@v-7Nz&A z5&V~Zl#_%tQ^<(QN(< zK8F9;0;TwOP3G^`SOM4HUnT3`-~YFBykBrU@IP{n)BYvrIP10Dp-2J_JrAh195kB#Dk;S#Af6MFd0ygaiam*HF(;*I4&U zZ8Dl#4-w{9)aV0!fD%2EEyG=)zmfv}60*l*pQO0|=-~bB>*v2xEQp{F>*Yi8KQxnK zqEP$wm8#yB9j1SWWBX31M$W=WD{~{D8Ph3l_w_rXq06x8jTj*JPw zr71`;NHHP>gZK_%&#BB0MS*6f6y?`vM8+I$1`VUdS{c$=k13;CoCm^Z%CAgd21YOf z>zJ5nmQlVR3h?YNENIo!By%zsYFsJDffOLNZ__yPW&(f>E z@&W(YgcU04)`+T~kBP=Rw&`inY#_1q{*C_glyeT^0{Y_!2w>{pL@RqCQ`wsIdJWGa zP2k!eM3r_xl{~h6B&6l|^<8rmKKLIWd}w$9By{Aa?!%#y&G;pVPtzHP9yt#=$22Ec zqpW`dUcn?9i0RftbEC!WR3q)qvgY993IgRRV1?x3-OH0lnz7BmM2eHkjMgb)6dD}G zn#gX~2ckpQL5%|2rr`55>1i@Kj8!k)4=g~-JCUNTh90z9HfItZE#FYN#)kzGpU2j- zMoz?R z@}h_LNU)b6Qq+qxmo7ZH4Vi6;)pPB29-$YWQWVN6iItj&?yEr38meDP%X`*d z5x=f6QDY8V$Ip<;FghY@6?q1nfr5x!#4ro$#nX$=6VE+Vw8*H{3JTq2Y@|*XA1ht} zGSV5DisX%=3V4F}V6mBErp|DIS!CpDpsLXyupiZ5jlcULxauXg!wy0>RD=10?N`1F zaFrcVjQ;Z4&nCo7vkLGa0~|=GvtT9^_Ss=mHT@&-?LJ!oKP*87!x#fJ7X*i%y7Gch zv$o!pRwWeWrSfHrjl(AZ;lPp%i$%PV?A;ksV^w>|(-#8gsbZCYj=|_3J*HX5eL{23 zx-IXg?n8{!`oov`V){Dw#ep?Rtxy6iNY*phX#keQNUcm}pu-!IO}3D3;t$;SKV?7W zjF~bY`3bqb-weA%R5I%&IAS_s5+^Nkh%?rGV<#>7-+W)8s^Wh7sd#%qCU^z0ymNm( zJRNQGg3xdBHT_J)+u`Y@o2@veow2KcCm_gpEdsEM#uLneD?q=o2XKwUtKZm*izad< z%9k`=d5Fp9X<7Au50I-r{9;bT2mMpXaG!$-7b~L4lJGz@*eb6hrKbwXZ^H*~un5Vo z)u&)w>xz;suH|bdL#1Z`Oe@O4<+cs(N`@irPeB?=M)$zU@+`9@N`Q{~a}w|LZ79>-_ayW36r@sDotchZj9M_aK;h!cZwbFq0Zu6M9edJ!zgRmiR! z;#J6iyPokS)2AC;ua z-7a(?dTv)0&ez&nQPa)Xvj&(l9 zd_sl0+2L8~wjgK?1ad;|SEl8ja-zoRGuH2{k$H>uO$A({`!OlphZik}?~}Wk_AFiR zZ|jc$EZdIgjt~UJw1U7GK&~>;BK(aIv-x--hsYK}b-tFS@cGIX?b&<+X+JrwY*`O?YjFXNgGrc^5cP~;Akbl8eq*q z>xE<(>q0U-{4||U%uc-%3AN&)IQlutp^~x-rQ+5m*r~G)UBM^LTba|RsTFCLSy(f) z+on~erhWQ#H#X!H`w$D2>25W6WtpbP8)B}pSBEdC%YaGH71m;p8#lBv)?(MGwGhlr z(#hFGE>MpA57hbsGsRk|j5xT)&Q<&l8QMe`Qu{0cDb`|s&ZS*QIpz~TVM5K>U=TwM zC^(Bc!;bWcDiABIogJ9zwdvhN_S)b!wKa5Ni5qH-HMN*M4k(nAdLT6#pvHkR_n9@_ z;e#{Re84-SULF!HxXv~<=zB;cr)N1ac?7@OP>;B%58{jpuMVfCDprp?=EJszheOcteU2~<^_-Vb^Q^NH0$+8~>)%dE zl+TD_3p6Sn*5?brhk=H8+Y>aAAAqxC2vkn4Rtnec^54o>pwxSN)2{ZvNh4eT+URIg zD8tGtcPo#qkWZL85ob&?F_IIyyw1&{`1p>+S~uza4VuY^<@HBsXfJ!EQL%WvW|FaS zx!#NvhhgPMBi)pr^&lS6L=gWa96;*Cq`0Ym| zDylsW;d&Gl#A6vPNmrJgQ=!MY>?7gn#l13Xv#vbWE3W0)u$uln4xUUQ+J(w;^^Dgf zGczN#9EQ+m&@LCY59->Dfeo$ykb%E8kX6?tLa$^>;36?&!g_J$mvURTqeK>|l5Rv+!% z8M<3%d=z0uQvw^3xey^?iLz-6m;&Z-xI%#zNugXM5(2wEvXO&eu|t8jFHA+%4A4!t0&XC450@IHPK;vtOQ=%nQ=$#(RN6 zX$z)*0FJDyvi0={d0bX?$y&s%E@K<0+y+eRKRo+pf%nr9>@^^m=q;pTmZ@Wefx^%t z{)kjj9FAO-i<*Ju?aQh;b8$vT4w87kwQDg!y&*(`D*rYOwO-026<|*x zxJ!p>j&^34kx#R%6Bq=i5802U`#w;W6{4746xDO}a;FdH8UMUPaG>|U-Zg2p8IwUj zRTS3#mh=L(6X_F$jr#+LTLsBg*oLcp zY&mo$eFWG;jIn3T_8WT&BG9!RzPee1%`@F+|A%RXEXmwhBjIYcRs*pwor;RRZO8cf z&%Tt{pXSttu6^TU+>|)9GT-B(JbaVOR7bryGV7%&DK;ch(LGo&z$p?E0=Xeu^~UAv z=u0uA<~3L9H(ak&bG#@Ert8wt388wn9)68l?KLyh(lb7P-fXjf^qN)v?%nra`Ttvrqadp1vu^<|@T0_1Uw^?D z6_>ACTT_TkKWrMQ-#d4DOSw_60@@bQsZL*eLJ&6_me>m8O4rVw=HnIjc52e9vuhE6 zhTf*;aL0Rk*EKWIe{S2OmzPblMYDEDm|#c$Cy@z$Pg+%CEUF0j8f@M%8$_c!Ly9&l zB0prC8G$eFRCwsXIYq;dD3g@%2n7KYXU#6|tJmj~c8C@>BCdfI&M+@h?RfQgDR&gA znG<~`1Sw+?^LGlhvDCO<1KH)wqXO!1ks4;a@e0N2qYWsjbWcAT-|oC*Y{C3tN^@!S zK($VPEIMwfsXEg*HJ#{fmPS$R=3Tn*{zgA(hHUxx33mLC=vDu>(f@k{{h!(6a6JL| z-tQ0yz<+~~=2xO1fYvs`TwI28?z18|Sor*tC%)ON+izr|<@MHUoR{NmRdh(6su(zq2PM)>+_ zGer2;0tyGB%EglPA8e`&6!j(yxmb0RvK5+Zns(o6LgH19#yNFKc%`grpd=U|i~!pJ?gZ>vN5 zn5IG34t^W`u%!7rP3c!`ucg3o6CmE z#FzhdnO1FZK2PsJVZ~Kh+ zsWb`$k|sp>V#aUh^vCy?F#E7Uqyk(ElVH!y!yetwe3aYaF{SQ14<6%TNo!@JNIUU# zNWc6-bOy?7Z;|7&h{wuXE}V!JSINgViCzc~^q1J?2rCo>eni0}#2QE2e=i_*c)%sT zdsoDP17lVagciVto=VGHfOBbF6R2-Q+g>&ZA4SZ=7W&I_pX2wW{^p$UN6vq{DN51E z#M0=WwEzY3($)%}>JoiRUm<*v6U-?i&H>f#29n_jhd~5{%+lf?3OE_LL^$^7=f>Mj zG`s>ry7~j?ZMR{sL{m0I5mBf)Q5hfie*TzB=X|{E+Vlcw7om(*`v5~feUe{VE*Ow-fZ#QmC~WjWz)m`}J)*(`vkVYuA(4=Y-j=Yq!rJS(c~;0!<(Ih!lr zoXgXI(<*b`6jQtN(6Z@_Z?~lj2$i!jwXbzHOJEmK0~Y*1esux?ISRdZfL0aNcU{is zj7p2oCSr@bm#42$xM#CYin?NiJ|@H6_~`}bq|t)xnJIL6iv3?hi3MoMld_Yo-vPg0 zyb0|L)fP%QlT=b0i`{N37$8O^)(EW5^REzP>O-#lW(%Ja3=T{#1_>VnR86i-ebsiX7yqj8`@jz7^+Tc=8*4Mo|&feiy3*N~9P3&!%Uo@(>vGJ)!R7}V8IC=iWeKN6wr zYtzPm%r=%AGuIVf7nZ*_m9DHgxiV3X)Dj|cIv-n8SiI~YOepoe%+6L>iDs_{caU_V zBFdxJQ1Kc3Z6Vmd$4x}w6<0Ef&$MOLZj;Ya0JAf#aeZE3YuN}j_a%bc1xAobmGj7@ z2lcnxl!{h*1oX*Y?2r8acEyvDot~4Co&7)cI4e|F>=l*KKGuy}*pGoBn5Jt>@$y-z z$-|J%ge6wY=LjsRK~UY&G8<86^EZeJ*tpnE$WEHW8F{u?d^xb;QR}I8133)Jc-VIH zsyd$zaJTU8ovj4;j{KxnnKqp}FWtwU8||$;M1R1I&EQu<{Xll%YNySi-K zwsCszbK~9{C*r(cc4S8Wfy|7|SZj_kB~Z>(h)+0~lle{$-($p&G}yf;531}f;^tJi zQztY)nAeq1*Nsg>)~n-lrlPS$Q~A;w8n`5^R#pGq$m^Do>zt8l)^k}!z$l_R-kF~k zyOuK5SrcisjJO$VQ_`VDa|3{(<9?nz)=C4B$0S;7t|%Eh>iUX@6h+g{+ao7HAvRgX zC3fGenja459=g3MyXzWLktP$r1!c|*!*v1(U?=)gQLmeXGIym>I6p2aA);;Rrj8-oo^s-1T0LZVjViarr@ahw!tnrfs`gVh z++bY>yE3^L-Qy6w@!L-VzEpzh1gg_ZM6D!nTKE0ef=mbB)mS!wOQtsqjfA8zdpTY% zeVmL69eT$`7mFGEhoofFwE~c2%$&dHU(!Oyaq%?Scai1a>RR$AK=&*wF|N}ld8qNEsy2OQF5_>d67WV5JBilgwJ8@S^qwc4)@Cy`d{jO7 zWbPTmr>a}HRk;v^Qj#uaRoREo9o_TlIF2HG#=>($lqO`T^zo{i+S(O|t|9B!_%jAyMxRzrx&ymVK@qXIXe>7kPRrVAcfzTFT}kevt2PUh)y|?kKDp?b(O6> ze>hNY#W7&~IQHl#*xOnrUV#FR!IIMBoo}7n?H zGOVz6_5Nr)dUX$Lo0(uPQkGQa49&>JUDb4>Xt$jS)Z#QP|$&%gqT&lsG6kC>|Yn9p*C!^BE% zv&)Is_p^txtQd?fW=9pJb0mh@43~AIl>j~l88uN6%EJqJ$_hg`KBE509x0TV_P4#^ z#zem*1)zR7v#jjLej_>W*8{3bXWC(oyCWGf(C+B`Zrt=dhQSEcH;42eSGnx{Yqa1hCDy1=-Za+6u%K%bkPhvcL|n*gWCp5LfEH95Zcg`yd!EY z?W)fuzoYN%u4PFW17)zdR9?`mF74{e-tMU~^V0lw{M}gzSK}5353BK)+G?^BSC!a`5C8(UKOgkm9y?;YY5A`XSr3 zL}sLM+d}tX*C$?yAzNfi)GPk}+fb|nDFKC>!qa~<@}2}QzL?;@ej&mCr{vK8_;*mn z$<$QX^8fKYOliQls2pYg_`ZCYU1#%f5J7>q1qphFP6q9n{@EkiBMSPL4fq43NHHD^ z8-$gXeygB}s!g}57Fi8*k7}wF$)*MQsu8Ce@T9t7XUk{KXQ%nHSnaySw_)oqGWOl; zO~Jsd!8h;xoaNB_^7HlY7|c}H;|4Rp-BwquHs-Z*(OR5{EjC6|`>UD#8N_#M9Q|39 zoa=>fRjrH^Z?1?3zHGfpIGV;A^H*M&9_KJPntP%iFluHK-^k;-8QghvriS+7Q3v`m zk!{QUzQ^ym=f{v@)cMJqcaZs-V)C3v`WW}=vk^!=^cG!RUF{@pqn0N$?KU1Qp%922yOKCM`EY6%8Ky@QFc^!_u6HQ&D@ThAEsQ5CZp2*W4T(yhljKE; zAWaE5$Dv*6+Xsu?Y_L950Y{1yM}gHcootvWXhsiI#17Y}Pk+ky*o8MuXz1-Q_-GP<`S~`a4*85!U41oP5gou@^88W-W{(;{UE-;%cDy zgk4F_^yYQfW5+L(kTkb?-iMtj%;o$2TdHC|bbj;E!^~ytJuWIzu%C@ozLTdxW%z*l z+g~Yg)5IKZFwBJ?d(BsR*bYs;Y*m`M%Kr55P*RJcl*qID6RoqP_d7<0#hNM~SuJ$k zORR}V&3FqnWqh4u>OxToJA_JK0vLqDsKeiP)qeLEd2xIhV-xD)iG#BDWet%l3$7r^p>lYr}rX#~%3 zobJO(Gu=J$P#)F>-yY!sF9GCZ?3sbw7{&PO@A854Ce3d~Y+#z{W4r)pz9;%>$tr)3 z*u&EH34@qwj4@tVv!o%#FqU04;#^|dNCsW-#KSF0STB!&kS*Pb(yAnCY#Oj>G^1$~ zv$@t=kouIO8K15#nYxu5VXoHWScj1gTvQ2~>P|v0C9pyvCmL$J;4H!Iy^C@pQ(oQj ziHxr(Ekb$j!SeCUM+Jr3Ii0I6FO8q%$pq8FyG0icBq<;KQU?^Z6nu@TDp+B(R?(XP zBr3wGgw~h4p}9d48>QRVDD24AY0e8|Rn9i>57AwE86s2A?adji$*SyGTd`??ge3@7bO)z z{WUy`v$KimF{$guPLipqmLTfbRpcuA@+H-g356{fpHmoHVsa0pvX}C56fzy&CMz15 z_nKR(GYR6c+`uz2!v#zfoa;!jM75d2>j=d?dmiOmwabWK11|3+XgQl#(4&6`o=Aa? zZp3L)%A!&d0BX{!jGgr|cu0erH4uxTN;tYATH@Ht(Qtovzweb49888fT9Sy3c)X8S zXzI)FCpbbR`!RZSXdLl4^+v%ps$w;903aKbHeBKhH>r1zfzb;L#W)JEAy7?9j7S?M zt|f2mnoky&LO*3a3(?3KM+;*XE@(M_?*CD9X0t1F9LbSmBQldxx_H?g8H%f*Jp>h! z!ncL&sWBAQX>{d6cXFky!?J0KUb?~sRIJa`hXqhZ98m*_4s88&F>b^OAVS=Tb`D^g zStOyIHUYS_p<`3F6epz+30aC`Vd6yg#GWxNk+}ebKDM?t2=0|LQ>x=)y%b~^ZZZ9G z@ukHt2Pz4ej~9$)rHbX5p``(n1V+ByBhAyPMq5J_?p|2kbBgb7Yw`9Q%%))eA103!f>1Ry z)T!IF0e9Sv+!&Yj#mb54e(h5Mcza~u$kUP=$S$#_XVUy}#fJso*jTgohpU08;@pkh zFF>DLw-tQjiUBGl1j z$s_Sn2Y_NDB}?FP)_4_YKeprslb19lOGwWg;NYRjhhFHyGAOKQU6gS7Mgm$l{5pY! zCs^)})t#WpyxWsU3*qG)l{nPSh>SaQ%M4xBs$;w%pp0Y0wTiss-`F8Liy5=d96Dmr zC2EZ)2+A?WpHLz&2 z!;^GyCSK9;^b-y^r9A2~jzQb=Xy%BDfNitG!*sn4XH5L!)$E{1Hsrl7ohLvU^f(>q z+E<+=dH#gC>G(2KRs9yl(WzKUnusPV#@8z^?7_o00t{m`gD9DPQ1Yjl&3>dK2g7K0 z7c#^fGVbx;*&+LW8XqpXJ2cKg`_ax%c%LB=&Y4QvTs)y^(?=CW{%l=vV|7MYnP4Fp zCzJM`Kg|V@Scekr*5375Oy+ty_pGxKJ^Bu%Q!f~TL#P(I3{nW*B@0lU??H3n?&Ngz z>$8HrPD(@%++u$+M?CqR3{c)#^X=1XDN7;1g0PAhPYGP&!5%VZcSyq~?FW9{a|-;c za>(2lgEd*TFGbkSja0BU6TgdNWI;ue=)Eyxls+$F+99~N&>MSyZ(2*V2qGp z3K_NekPVWyVz`v#xa=jH5BSmw!-Z2aRzDFzlP-rG`?>39-b{$FOlDOIU;nC-+1Ib9 z|8bwc6mm;xSMG{&yth145d?dQ14CZRe9#1q^x(JBF?Q=~F25~IW{l18_k zdAFVUch#rkH~EyCA#ts!^(ogJw>PWke*@jX z(y1#XFDlrmQ!b>w$sQHP7ihwUt2I1jO>T9gVRu>yHjjA`Bgf5Ea+6ih;)T{}PtAmq zF|12Eu}Wp!71&g9Xn0M^@tm&z1phnj0)sbH$h1z`wLEz8gY61~TOer8RyV-tx~Q6F zoz6V4VXza}`^7@wFtG3(;tdCbiS`C8R3g}L(0Q-w1aDpjtp&)3QVJG;J}6IBxk?jCcvz+uhaEv> zy3?Q7T`O>qYtz&RR1G)|oEB|?7$lP(Kv`sJ(;Y!GkECGCGSa6ot;{C=CfTLyB;2p- z50YDhK9L@0bQBfh_3sKVNFVe0&)IfmtuEN`n~)Do_}PMH%njx%SzuEzl82|I-z{-^ zaXaEZBy+{c>Afw&XTTz#T&{1`q_q z3WP&_GNRv=Dop2!;|-62f0JAu3XG6^VqF&W9CQ9gr+9PrNasM7BN)^c{UMn+e)uHi z-(&d477U?&Ge6ub(?;f+`My?G2?z{^JgjIS{mza)Fke&hi>W@euhaT=Zb|7KXMbp2 z<@>aK{`EWbYWJo=;*DVqf8Dm`3H_?YRBCC&3-NO)BTd~8^}BN_Za`b>T-m) z-UfmDoyMR2lQsR<-ul0v#=#E)lvtlA#T-8f!!rV^Bew!nD6i%dsNcC0u;1;>sXv_3_mOTTx&6B>>+@1%8w>C^G^AZTrv43W!97jYO{rPvQBLsMQPl@?OO?}aAG0J~cLU#7n zJ&UTOvgBOvEax>NJQT#oSf>Il|j63l2PVbMrOav#o5Y8JiVUQuy>?)S|mHFtMSIjvaIVNaO zhxr%czVuj3gRD-@kT6U38d0{4wBl(R$_pyYFe_b+|7DeO}a z-MDpZ-V368G74%Zs!?^I)+*n9WTZ%fQ}z?PpD71rkrrjDHOD#F+Wlp?lp4Il%F4%m zWT__?WnyhTz96*@RY)q8-eaBn;CoPEJ#ay9C6lZu4h+OM3Z0Qv0uTRS*oa7C##68> zc-ZEx2Kt0k=$*l7nWXFCWM4{X^XTKy)yBu-5;|*mlj-ONy0VDJl^?Oo+d^EX{+mw) z%Ok&fr{QncL`0hmeb$zIw=T+!Zaf5AxzDrqHae-7Z=J1MG$vNzS(P@nokT7@uO5|8 z1*n(vkr%uD9pzWX?dQyu)wLZNYe6iWpS*b>3NAKTOu)9OYDZ9Sw89@gkoo+ z(3?ufnkGE@Yj4=V&>(0`H2&1zkr+l-C05KPafF4TVpdbv(2=#(a zywQg#1PTn4?BibYgXle8jPO0kd@qj>^vKuIlJbz-YPu3bJTd2|T- z=#NGvFWoZLOjy`FWr0-UYF-yH@`Po*qNl8l(l$zF^2;##k^WHjgr{SwlaP(7))1dg zm@Tiw=IECZ4*?DSZsJhjC;1&>ELb$!F-kVvqXe ze&~=fFO^?kE?VqEZa9TU;_7JTu-`X~tM?t`m-|3Y^zaIlLZej6y@Y2=8)MHSE#k_2JhoG?Q&_=g|6atRU>td<)=27BOO4w zdJNA)1Fioze`+!2H+mAM`X%*|t&o7ZC1%L>%(k|PGJ3=s!(2r2nnMjO}e1jQ?LxrM{D^ktc(xhYN$1vlA0LyD7telv)1QbmRX^NOmj9 z$`AaLEpDJ|Ap+gOEEYnrS+S81ff^Zw=|i0h9rQsLmR-rSYoEh35`aD7lb2=H2h7D;{(6YfpXmE024Wul*+fl zM*v1*#9hSuS$6X|3!>iFE3b33kL=9Zf?70dr?oi?W!|MTT=A0(<+UJqxE_Aqci)B% z+zUt7O;)&e&73`@o-?0KwqUN4Mpkw$-q^^`W*~}{mUx1{ao#w%U+1@V30kQ9M8*Q> zS6#(Q>GTj@HEVd+TuV73Z?gw+pTNZupRow2#H_j(iruC1UMQMknJ}da0jGp}hI9)c zjnwhn<1lT0Zn%ryXyB2-v6qd*S-p$epclkr8+nBpi%c41nTe-}NU(hlk1&2GodgO6F>%rXKL;glJnU=GjHigCr}nIdtk-M`S``$vnQkJ0lDL|Z2+@j$ zzsYhSagCk?wN<-ei|jTqYObt*Od?5i=fA*GqbT>3;^Be+B1^9~vz@2Aoo`(HzTfWA z`atpjLK4x3#1jIyk?plasYOX)CR$!`j|RI+3L-+E)Yf5R+o&ItVn5@X_1~oB~oH`g#V~aBjYPf%wgJ^T43Ou!08cd zX_*cS;g;rFvUZ@@kUr6*=kVjne#7oe{xEniAWL^p_>u0diiu=%WGB3;&}IT^v7K5j zHMsRDBQjocggB{pp{MhuVSS-XgzEgYHmL@8D>HN%+Phz1jn+2RAR zNKttQ5C;wDDvO<#=T^GJxsdt^Lr`^u1qOwktw!ortL7OWvoz5|rr%=>;)cW|A8bQv z7QO~%kmtjQUAwZhc&NanbuvFFnX>QUI^-K5n5rSUrGw0Mho7HOg< z==-@s^gnt9D#F7cNfT~Dnc$I}@D55wKqUiZznp_tU|dASw_S!Y53|@vVG)?c+mTSf zEFw%4{%Gag5ZRE7?v315rp!I1z9cTVG!$HOXIt;{BOz7{kq};yv}mJVl%I=y{Tiyh zT*O;CF_t``E%piYd5Vki8l#ywgLFYrbdSkTJ%)S}QE-gkhnn?C?=sA&g$Vfl-$9KE z*gtzu|Mr(;|1oj?KZbq&`;~M4|Eq5{!^% z@cYPLjY6D;`VC8_!I<<2K7pgdKLZgmFvAi~GZDTc1q`A(W?TfTd>P9vOT2Sgysx^S zkFwtG4+bTEl`@ixh&7-y6`4Hlwgu^7DUx2y2B7TM%8vlDkk>7t%uU8lG2$AL!AW7I zBw19)^l{;+Lp4#j=iw8 zk?2Ll5qCY!B-;24Y`)58sNplApK~%@#cw|^cNJA{ah7uv$FHZEH+k9oXhYA33R*?P zkYlGoKiD%gl+aUPU1CRPweTR#dsEzLuMhyQ-=a=wXEUfr_m1X8X`{P0lw5aYK|d2a+uq(J98FxpsSf3nkk)Or z-LHj$C7avqj6WcWM9C^+xpxCs6UOjCPa8`PfPrDmjX5+ECva^&zP&zk4t;CTV2+T1 zY3igni1kBaqp=z$xubhE|l=AKa%67PBIw(zuqg6Qn9`6{bQJ(v<{lz{w`nw$Lw*J+8Na^JVtVQ(Q78 zoymXu@3cWUghdm>zYn9qKMCUhoeCHJAO8LPZ!nL$y|~`^8VS_WkMdK^8#({MRT=At?PzKse27T>?U7 zS6p-@SdF0V`jKe-#`Q@7)W+=w(8n-`&7ZKD-V_2ljtRn~k z{qZ2#=dKd}NA!u#{^R_bsm^t)-s+#!hqW7xk(>5SG~n994fVU>C33v)KJ%5dx&+0=wfuI)2fvrBLqe<3KbIo}JFX8TOAlq@UA4 z(XK=Ucc`un2r&-?f@?I7T#OKcnS<*!c>YV1 zVfT9c3o!mkZse`2V5d>R*xNnDXwE;fRJTk$_1i;yuWcCQ2Pi&oaXx!hr3&ei4M*<>o==}Q03udge7 zjb%)bNefFOb}dj_r-Y<%&U76J?*#x0NFb7oCh=hC6TV7Ym#6L1>!P=IL(Kt+B4^<$76@0i4DJpaU@SFibrGRnipFHz&1pMzbOnnjh- z*I1R+RHAwO5^|p&*Gm+6Ko8cq6{1$wYuCugk)g1(3?%WdDW21=@ilo#p>dY-J!(++ zN4i{ye>7a0k|Ix+%hupwbT>9ng{IPzWEV?~rOVrFBX!sk z3a<4>Mr7C=O+G_KhsoOMEDAY?!vvQ{$!p*>UYSCc8cU$TNKJFRDS=t3)<*cQOtUaj zLN1*#+$6TdFO#|4<|1|Wnm8_F96=GA&Q{JZ1S?Q=N`KhYWho+@U?La>>ag}I`Uq&7;mVX)&~m1B~th?GaNIZZQc8hc~{JD23;4kGQWsG>DccVZaf?vE^#=P3hGoD$H|YeTBaK&g?-GhQ@%i> z#UD-i&z})+SOhpb`glR8~?o2y`{&RnW4v zc4;>wwvfht$-!jOvZhyE{Bn%c7Z37wWmQ^|Qf*~UYd_O}9G@Okoqs87YdSOYOyqDm zO5u4%?lMoZk24Vrta4WM9?Id~0V7CIu{%;xv7cf9Vk@nrlJe%;ht27UE(l#9cknb@ zE$-$+6kfyo$U$dLi&>%N{>6T0T!)ePX0uI()orZc+n4ky93+A2kKsSkdmJ;#>(Y6z z!mQTH)*P|f4%8`$hKFl^qqknD|4ovm$+IAl7sqw9z75sZz^}-aFutkMXMc{dq@i)n z$}7XW%6fLbk_p4^7-nmdl8g~YKo(QhpcJe;QF{qJyx+|`vmF;(HqkPTO8L8ZzJ3ml z5iU1&_GPW6NAnwOqh8CNOl1l|m98j72KsIwoZJeYV_q8QX~^pZ{eXK5z!&5mJ=218BUkfTvFPiPYDTPkvM8n3Wb04qX}`v z%9jf!h9fJjPofWf61%ukVjkK!c!prsEC^?nvUC1Ujs?9d=C>zHTbH?vau%1@4el1A zv#Y4B&=UnC1>Auq8htxZQDvg$YU}byI1QkBM*MnhlbfPgF3Zy`?(1m8+ z{rK-+jXb8g*mOqYa1dppqHrnUZ~~q7*q&{?uf=Kn9Gl3J3}|F^Fg7LJ4#n)zmg{ze z?Nq9)EGfmy+430+$;GshACl|FDlIDt__I9D-Plg7TNhJWW`PKV>lEhOIf?MSNd+xtS-`qoGbaU4YiVhU=w55<$*XcM z;N~~-yi{InrVAKaf&kIf2UBB=mrZCl6i_#%jSeB_Q@Tv07cqL5E?*Wm&ut3dL5hh< z#$uU~Tv%BNt6p0?si~<3r_i2lx7zCm!Es$N?=S?c=%)!nT`a{uX+8fIsx7pd%WY;1 zQCVt>zpZH#+Gz6d*U@%u)gx*;or-t$$P(*tZ}8F8?v6;E>m3*uCn|7v+M0i_m_4=9 zi!B{xT5;5&ATp2KzxGCjffq>xoATj=6wq9ipF@n;==P~c)kR7%q9h-PdzLI*TPa|r zD4@Pr(@WVOKA?St`CL-+SzQU>*rN(k95w1UF;tgn++}$UE!MbADt=7{l?EJ&2m%_C zOvr*GD9h1(Kz6h=#=U$XcC>G~y9}v>$fCPue|H)*Il!Z9NkPc8Cb6x&UXi#SWr$F2s zkmet1NZFIEDBUyWYRARKAw$5>6$_4^u+Kio(U}qSHkFteW$Orw$4OI;YDy8XFH+o= zE4({}z8EhQYg-sgz$A{5u0ru>r-_*RR;sasFZK$UgYHwcbw*p*2C+@??Om&Kbu|0K|uG zWub8@mK&_Q+G5|oL+b&tZEN4CwNHtSa0^8DCuXhEJc#M4+2ikXYWU>A&%4$cqNJwQaXae`2$H!gTzA;@wfUa#s|0lOOhP(^ znDQ~>x%RtO@)it1W9%({omg9&53e>?l8qA{5^cr}9<+f>4kYmwX*mt4k0#4C@^yYL zx=fJ`jufMmZ7Mo=Yqq99(&{nT82$ijA%LyXIbn?B`w;XnT)JGWrbhQIo{A z0sWFMru8o4pdAYbY;L{MA(oD^DsF2_HhWaHi;MHsRD2fYcgft8oG!W=x)@pA(V7pG z;~*X^|`DB(ttm0AbJIF7;xsrcL z(vp0bv^pSO2PPfhRAFkDAc;LD!GD#?(7bXh@&n58)&jZ$c&=ZtMT6UA7ru|5YiTEV zdt4s;PTe6j`e9z*#JatUQrmwOSJ-uXk(GaQcVWBoWz^_Lc04DH)DGgfeErMSn{(!( z1i5^ZmO_N*%s?fNJ+z*^<9Tp6ec|i&A`pny{K)-oQj6934GT`udD<0Z^$q(i^zsc* z*A1iooYS)#aht`5@P*MO^pTUurld4)1MbNK{|%+zZnKs*|NED?ow#4W8{$jTKKH`W zdq!K2zrAja-JDgEgLZ|a@?&?}yg{7ibifU39=QMK`?;C)66 zkoDBtNv`C{Hrok>_sTX3jl>pEN(V{;SLLaiOme)@FBs^YUpwNEN45|&YrkoV4*5Z$ zHi+UScLyqKLL0W?RO&$zA{V$WdeO9H{kI~9spkP%d4Mq zddGINGzqraVw*sC?!qH#YtYN(G)r*jCCBKYoZ__(7yA=mPVNUk-isVAG^187@CcR-jbrY)MHstUU${RrGl|b&Fi- z)|#$M*EcA5S5K?@hD#~8AH;sb;|!h9sq!O)a{vk(+?4=Y3oU}nAi|L8|Jmr{ zID?<1iVZYlwydPs-JA&@>{tJku?!pqjrG=l9uRsRuWx!si|$~xS_55~52i-9w7DYQ zT7IDpw8@?lLPw~u9v?ArZCK@>#Yv{smh8;5d4YHfpgm1?>z#g1*oySY5&o-u+UlEg zHM_JNIB6R>rBH&d)xuDAyw($%x8ZiGmt;V384Tn$ATnD>8Q;Ox-X~X7lUH~d0m~B~ zVk!I`Wjkw3@T!_(xsY~ECH9iB4H+&5^s)(m38jTYg!yPcq{{oKOk~b;$i817o;B)a zig6b0;Wb;Nh^S&;z$Bbe>Ky5SyY*PRCCOq2akv)1whjLly}E)fl$e{{i3iPYgdtDV zN?ReMUKY%+jrtakz3*#>WNhM!G$SyYRsBn@`3z0$+jr`OEgyou1v3Xf4Crws9Rk7f`>wH;zKa_Nf=Spl>+r{oV`F16-2+xRi8%EDmn*aAwAL&;~~#XB7%^7_`wK} z0=%FS%VSVbAvB>$AvHn(rch8ktw+*6;g4cJCBHc5kD_M=)U;?8vS3~*;v-%NImWwi zPy`k0WP{Er&M}n=*?qjkCIq^_Ac2$5YrE_UGH@d#&=VuD*}9g3gj=|#=Ua@(WhWAW z?U017PSUqUbYx`7&@flac-!ePaqd-eo+Psl>{$g`MN6tl{0d!zjWq+PP2}CIx%OL- zcS65EcdxhI#|!9E^lqcC_npZ`YcW!&BiLHi6V%~xL46c;f<_7eWnw@wY`gn#qg&gg6g*@H99=WVv2uKr^Oy`3rsDhxwJ-iC&ff-Y6X<0@ zp^Kn}c{sBU>4w+~V|xZQ;|_RJe8qlu;}w7I<(34{x(DBL1Drn4>z0=d6`}^lZwpUq z=}aV>36b6WrckCTlHT%*+=7i!SsN6lNsCjuG7#LBscQb*u)776ei?gL%9cu}k}fc_ z>Bu3p1^aq#x$jHV9eJxd8!pz`dJpRg7r5~OYH}GVt~%7cIdb=A9sE_Q6BOPuRk+a& z>%D2mD8P-2+`4yEXg!%zrW0DiLj3^q3V~Cu6P>~W$1$V@3d3S$PjmgrgFU%huoJCz z)^0F*{i-c``epu2+U2{v1?vYua&3JT``YbB@h#CQ4RmEEwsNg5V;h-JWUI<;fk271 z9>@&(j(tfE5!&z-nvXC+X+fD=v@{mM9vxfE$8a9Y9DS39^4&C>+yp+WQCB z)T5A@7uzXiSa2E({_r)9ad-vVi5bhuhyL)@He*jOhQzB)46hCVIjRapzpCMVr@BF0 zw}t_PLs2rlpQa^Qck3i`Rk8Oc_-tg6^@elXC~Qr7E+#ggK7ftyEo&7eZ7bo@FLdjs zfx>3*BZz*v=lH}UV8UY5)`zz%(u$#E*yx{(wn@XdJka;s0aW7sumebEU+?r1+yN?v zA4-XUVYnE*Fei7G64zgm2DlgYsm2!P$>^YoG-e@z^qj0U+KY-_KH?BSW>tDkwCk)e zW3<>9BicQPvo6}5xht0e?mn<3lUyTJqa@fQcs7H|LD|6^R#Qr4PMIu9liZ#FuN3jx z6Y15?m@@JuC)}+Y>5XWXCM5E zr%UPszjn^A@1p5v7iHaVvlOonqbcyU!3|P>1^%E@3&pRP?ErzBP31#q&Q%xcTzGsD z&J-~_b^cA1yLP!-D*9S^dY;ZS{aRh^9BKm0tH7x~r{Yr_fm;2{a)P`&(5Wzoez3!T z_D#|y3lZ&MD34^Q@4ycNA3;P869p}#*rO}TZqI@~Aa5uu$rbY*AE??3QwwDs9SuQG zIacY@Jh0P)Rw7VyJNKYSc*x1<^w%yI4{W1Gkry<{-*v|M^XQ!Yv3GVFFV9YGIfD$d zrmm^Pvl!y`L*Wgh$>4uZVJ*LH`qNrLvG|EL%Qb2-!Iq9#!~~e@Ln;i`#zWfbrfS-p zqLi#vwZB7hyW!b_C?BK|Hv125wtH}GT|gM9z_8G$aA3{RU4;5`Ws4HE;~c|P<1_Bd zhfYFv&028?rN;_vo>A3w7mmG+_!LQFF?QC_biHh?UDV2=YD1c&ohMP;MM(^&J3+r& zRh`i7#Z$H!{ua9@HQqLJ!qf)V=wV#>)-=ZL-C}M;0G;H}?Peh%WV!M6tt+nLDf)5#(|IpQ_`)hc^9>GPat&g3XLa? zbg*mFjbJnTF<}4X=YX-bA@$-m*IjzH{ory#^5yZWLhwufH2AieAMv{wA%NVMQOgUd z*X#TPZFB6ZjD-2(Q^xL%wf)Kyd+QMWpE zuicAFuPk;H?w!h`oFA)pxpZI77n{FCew5-7+D9$dpW&=@&-(rBwa^b7f7zTWZ${0> zt2m)7x199CIoZZHXI?kA_HhY=^58Le!oxS7S)^7~Zsj6L>WlV>*f-oMeLL?{PhYCX zyxpm~N4Q;9zuaa{oeJT+GP(4LZ352Nv-6$%Ncv0o$lJG@LQ(%@e3{|#vEX6v z9ehUe%V-Ak3t7J1uiU)g8)pXgi+RT83tB$euh_iDFWJ1_uh~5P8)Am~OY3ZfKV;KP zGP-I~(F_Fl^_fG&6ZAzJpHtc^PAsWh4dyAYC!CiS z!wlg9;XU9VPr>C z%lFvm(+`DqmTze}h@XL)a-D8vWNvI1V!B9PLmIoKxhilI2ycR1%E+bv2_K1!Gd|wN z<#NwW$9{@6jh{&5#VRactq{kB8;{+2D=wKLhT3ysviyZqnzNtdJfYqKvcj?7US-4n zgh>zcUXWEj?j>z5MI+x3&u}S7A zhZ>(lQLeco{2ss*?u41q4kZ_XMg5iKlH3$=;p4dox$=`xRQH2+_R_%=SMn~ z7gh%AFr7|Fj~c?WeEtXA*t0|IuBe^YGNBYiv2zfghf+3&xHRa(J@~Pc0qiMy*8j^+ zp*N1NHq1lM7DjeXyo~@qZ|(KhWt>fp;AXO@&KcvkR> zR0oJVlWLEOpMoFhd!bVp6+2C>7iz}T+P}#Q{v5BoJ^<~0PHG6mzTjQ37fI%eyzViR zNU;|z;YpiFw->MBkteUT2P@<(no;H51d`Lvxk7<}j&DlO{n5I}p3tt_ z1COk4QuH(VJ%ukz+J%>Ws(1eTA^QWFFU_oC0g0Sfu=^o~1PH%y-rK((;osDHCA}fN z3;LrtPXY(F@A?jn-|Ahd9{BS+{dl4T6z@sj^qz+=w_snsKAJx~J>|cH2u=wOM&Bu3 ztG}_>FMdd8KHC%WehbkI)+Q0Xb4);fX&3AKq%s43Ck9LL+^t#7jsvFUI`? zdKLqnoMTh_4na|geH^=tg*hY76 zie@0bJ_R4ei?4LtLV{`T&diUPPB z9&Q1X4`fCfemphqQ>0Sxpg{mEhU{E2Ih=@`dq6BctF^aK%9wvn0S7EfJEQ7he>pLM z91562$SDkrn1BY=`(XRNhXw<9A6fxo4LZo48xy2kWdCg&Wq|om_-zCxbZw;Jt+^wz z52<*cz>wsta8$s}5GE&~l;wVmkZe)`(MW-Q)Zib=K~yIE#v##5BDwx;BV>3)Wrw7+ z5R_q;ELyo?EF~+T(p?BmQeNm|M?! z#2;F#TiJR@0gUF?;3KJexiw096+1)>fI%*FcHtl0k<;ot-@HU6T!OCx&I%bDvv@)` zTwPFQ_>NbvzTp+Kft5nYFumQOz@7f7JhAT5@Iiv^ZOcMn2-6pW_U&-(Z}VL)eq~xg zZO~ai83&+uJYb7VyPN5$4KNDAVNH-pBl&zO^H)=m{NuoTyt))bVV# zIMa*H)&7Zp?XEqT0~ny^C0b?d)F7OAB>p=|9yiV1hFvf0)BsR7J%}6G0q@))Z|hu^ zal9@LTSJ_9j0FM{Z<4yK++F5{Q(KQVi=?-ZS>F!QNy1S8Xq(fRjI;5n#rMp8?y1P-j z8wCX6U*9i$@iTt)T`!+!xh~jyXU?3NIkS7tK?vNT+}HOeOx}6P1ngh_MpT)BfX|Ie zz#i1QwgyGQ%N=G}4W3~JoZhI+2O}6g+<{Rw3iQxAq?cw3wFOV9lputFCceLGJbW`Z)#N7FP_GJ#cu8%uYw6UJ=o zj2_ocCy7qG!I*V%>^PRi)2$epD{$)LYC*p_v=3euvvUqrg(k7|32hbRUqmePdx`wX zW@U-jAW_A_hi=I~BwRU)&g!DAX4ifLZ_yGuNYOYaSwy~O#D8@ygEKe zYpw+q4yIm0vq6_cQE9igK~M}b(@mKMXInD|c1hMI@Q(%ZOW z&u76A*}=+>y#s$q--g_cY~KsNV}T{{fKPMYzh~K95*6vLGOy2!peyPC=3Hs&bSQZM z9*691n_d~NLf9$Qcv)CJ&shF&E%HV8PPJFPxkM&-`8^D{8WH5<&%Bn#ue59TKTDp?Gb&c-h< zTz-SW@bVLj1oQaJnV>_2Lq?EuM`jS%c%iq!c51hSKgB2~{DM&AJe9832J&jABlyb= zu}(C{oP)VE&>LP!#WMX|iaDs9qs7Ib^eHfiI&xMvk0TKHp#jBefuqj#vv$LJqeW<< z3Y5x6I;#7Y-NoIrcZiM5y`>G+)MgVn1?-q>_|j0Wgz|YRl-ej|7BTY6xuaUq@-1N{ zBgaRY12CQa5ML1z%rFovFr+sud4sll@%iBk9YV?qixGX&U}7S*-|>favGTq4VsClJ zRJ%!E6-_9CX>hes@L~3_8MZMZLM!qYSs2Fo)Jzksc7AI{LJ3K0Y;_mwMOgxbz9M6} zO{OemuY}a+Q~e;YG+WH~tCb4^Dqrjo07#up6Z)oR+O z(>jTjt$dc`MnC#yM*s_?QA4smh4>`YcQ=%V&w=KxcnFohZ>bMXHF%7;3m$}+95=sa z1D$86!4&@?HW=Xm7JPCD(WY(^G}sG5S#x+_+ZB@XE$eb)Mj963R=rH{7-UVMQw@Fi zt4<2vzK}BOWI4zh+%@YG#`_LfIW5GUB+~-+^dlQDC=W9jl|G2ji=IXkNxAUACu8xv zShA1aI1PC@ykk(e&W@gN$>w06`I-vWNI`4`&aUTBU^)YP8=WCFZp+a^D;w`6}KwiHn2O}|2GE1)fu9^2kDV~dO5tQc+c)`@*@mr~N2 zX(+U*+KL%`q76JhPGN`Kf5)N+j@<90Ezs2qMtgWcss&V^gMQ}S#ev)N5x~$)UzU__Q}NAAUllx#nPIjMPByMPPJ)L4S98jywf&j zIWaNKYDqM55|(X78I9uiRvb!4WyUO#dV1`Fvc>pp)FgQfBgYehWCM8kJ1#LT+bhL(j#axrC--6QQSqszkPe%IwW*(gF?<*iIew%VVRKl9xNmWSPH6?xbSXZlJ@2tv zLaYa7Z zTvIVGrI>@Yfw|Yy`%G4}ClhX>6$twv-a})-jUfXb#CeRV1?br0q>%bW)hcVKB9zRN#!d(LgHvWCx5~4&|iK94mPAY zEBqW6Hn7@83ynu6_ndWZv@-_{2%wo{{>V%HB8!(TmIq_QT;}BERkQ$D!FGP)DC*6&6H)rM6*6!z`zlRyUK#1jC{1^d=hr-kFocSs0BL7 zA7)g`bh0S_D^<*m83gc+OiA%-Uzagfms(pvxJ?y-KyWbwrv+wC2?WNr2=YR@H9;DfX%h- znGxbHi`|k9Gg1oc$&zyg$hG;0=V1fxhaofg9}>HWdE}DCg13BsNnPC4<>Zf^)QMKY2k|E-mV@qjwsE2E1l(c*0g5e<}ihbCUC<7XG~# zez?c(uqNagL|A_+xUFN0s`^9-@fGbHBn<6aEb_yguJwX(wGzIJi~3aum3Zs)OWor~ z>w~o^6`pG(I#(%d%E`3HhWu#|%Pz+`b?dH*J(oIh_>WqyxU=H%`TZ_XPFZ;U+}Fuv zP28zy?}NxN<8{x)W!2T7Ws6(F;tAKoM88l*=tTg}?VNAKV@S`oFiiqwXwG=j{HppH z7~jd`O_S?Bh+Io~YsZ?R1(>wjBJht36L<_|6DreMNYEuzoG!KZ8a2?9R&|u>FN>Q{ z+_0xc>OcBXa2qbbs!xW$c>zbiujQ&Xu?9B5!ip|kx6Re%WRy_<99|_?gq98CI{aV{9;|VOp=}PL_qC>Oc}*gVE|hc!X`k+M*yi)J zT1HLUbeQK4%3{rWG%m8aO8pZw%5Wv~J$k_yo)_iAwYNy8%qqA`vGX6&X0WBrHzekpNG^Dr=N0=Wd&mg+o zl7qU6${TC1svSP3?gIyDUxkwTh+Q6Ut%SZQK~}2OLZ$*w__GFF1=yU1JK> zJ2a&5cXyPT`-{j}t8vsH=OrJ6Ks$8yz~6OCKr<>8kW}_R$=!m2(x^z@ye@xayVw*{ z$Q3p>rv4B!)^4=H^P?M~>LLzh$hr+j$8;+AeVO~SC>-&3JWbY7K)WzGN~R~gFh+Vb zCL4kTOUOpdxWvy~Um!GP?$*GkIsFp4`2PyIdP2axR}s|RyZn4Qyz5QFT8 zTvXDpMU*J8GG$u@7|bNbr>;Y#6UB0bN4KO$x9CQG!1zxNcU%IZ)l%l!dn_VJ0u zW%Le{C$_>Cv?pY;&)6g9A1@P5_HcCKb&a*>;?chBPj}uYY&C#>FZw7VcdsFssy30T z*8hbqJ;`NK9f=8@se2$zHpk?|CE66(!pdA0fjg@-w}UJ?cMn?w(K1A^<1ABXL7-$n zroE{<59$Lc&5!V=&FuRxD1yN)HCvpIvQhT?O<4)u=p(STUU6FqC0(d~2;~9o7Vzy= zjcpVb?KPZAl6WGSScbq)o6jM?2tIHYqu8z|BQ(uhmvpW1v^V?esQ4;!UM?I1E|}!O z(EGEIkSp@7GtQaA;K>yoP?%@_%5b5>@+u~fln>Yo?LDJXx=grJ?NjE|H}FL_Ji0b{ zN?u%R9OZL;OYjMyO>FKnfgv<3`5Cd<0E7&ZMh9zA`S3(dU1zWsBuj*%=|*Z^Yqn$y|Ft6bcU}1)V+nV}CQZXFlMyuw)rR{L5WI`Tu`H8eP>ZN? zOV?W@sliW*>Pm+b=;zpx1X1nN*I{yV)>#E!UMkPNUZVT3otW1n!XukBmUqlqdBJl; zASJb`o#nX4=C`{}>UVi;d*In{oZ^X|h~;r|-yeOgYzfiBFeY(`aga_muhtb?!J>Q- zFWf|e*dYQ)nMyXg=DNXG^w^oIc6F&tX4)$;%k4NxUDSmldX^F9437M+VCT`Bl`OCs z^eJCLm|+i0t?8&y6SS(sP)<^HS56MMaN>S^Y^M9-DdLN&L0{V)9#wNqkPX$HcT79{ zwH@k6A(KMZIaPP|%jhDK83HEv{GHoM2u_5WvN@qe8FXMIdEgBP_n(7#Hm)isc}hup zArC&?7?5DWa0cxs?N+NoIyP1MNKv>5S->NJ9k24hp+J(LsCOQrfcoXsu4e^#i9%`F z1uBk(Sy&tsrSo7ECwUmxa#H8|&`CJ`d3Z2MrC|ic+rmo)zH@KnLq|_|53bd_dpifv z#I&2cS6@4Q=Kg?9dkteUl0x;u7dQ&}#GQDJENu5_&22aselK z5fim7rdc_-x*$2n`^4Ep(LsW=+5>tt@>{LWLJ2(EJnFX2XX;3c4b+Y~F!2zWEDrXM z*9vo~91~cZTd-TPQ}}7Q(Fb!_%h_kNMaSslUZ=}~Z#7$IJfG}C;!Bftf(=Q#k5-fE zugwkRd$M@Jo1{4pWnAE!;1*nT;E5t+B&)K4=5oQRR(Ew&lICcUK6Ft;*Qb1u8I&B( zPe=H=dkFKYlZ)8I+{^wt#2f9RCvdd^+rDbJl=j-U#NXD!4%Z&mDw;^s-eMq$pD(|= zW)4OOW{~m**{cl2sJAg^i}DomHBWKT^|)hd%#FwiU;{6#QwY!+0rI*D>1&3^ z;&ibQQ?{t_+SeI;5fjr_y^ej^%pK>r-)#zb`uA8g~J#1iO@>iKJrdm1bB{YrA++ z?jNRta$iNhgSjzqn&gGJ-cE(ZKhJ3myWX-M_Ml?9pho`0EPF1~E_fp;c{bE0eLXYn zYhsmgaV@^P!HWv@vA{aY12JV6X-V-$L*rd)Sl_3K+Mwd8kqU{cr%rVqj9P2Fa8@T% zE3dB&=_bkWAQP1ze{2y5_wy$RG&*@?9EnV)xPU3iG$%~B*t4G){we9bG%oSG@UzmB z<821;3$+6J{3$mY8h@ot+SOO|H%R70o*jB^i8q0FZVWuT^9R>RM!^atsfm+fY&6&+_ z3m`T4PQ@2|3IepGPLTvl2wiALLIl}SUWabJ7qlX9As$H#Sd=`)_*@1J+1wL6AgGTw zAs*qq`#N${Q}BSy1!=@Ja8cycJ)cRmC;FGTga#A9~! z$bkK{on%2rl!?HHl4p*n&j7}fykfw%AVd$n7dnR;uo}?kMH&zZ*1WN}KtK$Gel!o2 z<5%XPeqO$x&%oe6D_mzQYXX~+VYyfL7$xpiKO*#v3X>Hy2P2hboWRJ!mo=p=8bzW> zD;2A@5U*TIOrE8Q`M9auzhIf&R;`Z=dJf}{wDIA(Hk7SF%?r`|< z!1(Qnd(wvUkTW~d^~ZfWPjVZv4oG{*viq9+_62DEZ+1(A?(^il6GetiLCU}vQ3|@F z%Ruo4?%3;$d{GN+IWCA0z5uH=7r9HR(T5Rb*C#Gn(OSH<4^}Ipbe65KG6kEJhC7*6 z#05z#{3D5?qp{T7h?KG7&?-FV`w^=ucDj5h62%vZcUL=`n#j~I8Y(CS+VZa}P2pEx zq582Fk~58if5;iAJSha%)*qwDP7xoeFOz9hMB;Qg-j#dSS*2tx%+Fv!Myo%|fH$Zh zY_nfsH)aXH{*tzxEq5#zH-)8N-*-x!oO@N()Q-QczVp>J<)@_keVglog=rSXEs~Q- z*1E%7dxW-`AH-$5nAM&I=8NzvQ6;W)igU@!KV1~(?$pui6=gEcDhnr14nbG+<}@bT zV8~3Dseya3y)=`>xDqb+`1zn?aTdlxO;p2Kg2Zcdl1+xF0X4Bqa@y&_Epo3x#okf} zI1z*}-3C9BK@(BkOh~X59I|j_hU_I8rn|F|Gh3=mW@L6TLS!)vSe$34|ksIFqa z&#q_0W<2|`c4?GFKWq828h>Q3hyMY!ghJ623qk>R=3KR|S)5-UL4mGt)-$E7sz>iv zrqf$d^vE43yG;X@lDtlEj5?$@N(EXW5K01Lr*xmx258<@zkW{^e0~$GfPyX4N%U4= zUspz(B1zt(CW27Sg8UNyIo(p!;eH3sI1%PbY)W|f zso68ZjGkW3gNHB47e)H43+p1{qxNN|CZ^9QB+)oIma0jsA={bWznD^eA};#oC=yxk zQa!JEoVO8EG*=3H#f5I=p7t!SmnA!;AOdeCx~S?3C;sD6o~yv52VK-u=a?wNEyYcD z;NRq=+~9IrE@NhcCYHe#ptLGK!O5Kg=QRku7WHCGG&s)p(vc+&f}L zut1fN4-fkW`&BfKmdnr;rh8mIr%^_Y93z`GemaG5lS0uomxtg`y9a{6wbO)3_zCp9 zH`AoDGj6m^k@J1p0mrW8{2P51R)0kgsw~YX*k*8pnGf&5Ls=0!<+I9SA~TMcIZd#d zjEL4UDMpJ-&|+pxPhc29aX~jmp3qgNSiV?dxO-)u(t*O$I)UZpp_l2^2|>jQvGXZO zK}Lm3;!VA$__;XT-Gm@>bMGNTVq~S0;odDQf*kL0!Qc;b4=#H*xfDy@F}5XTj+d&q z+{X`>zmuUfJV~f}kAQt-y}_oLp3Z+Y{DzB-)qgdn`Vj|1SiUI7Bg@lJc7v@*-?zl< z!5V_`pn`NKJ88!&vh7v^w4G`nVTys5SXt_@x)i!=8^>E~Jdq8MY6*z0pk9-*lz{NY zb>-VKY?wZf7}zI@+qoZCd^gVI5YbJr(il6#f>h891H~7N1);vyUDK-o0%t9;|2@V@ z(}4FgY%?2QksB(zCIOlQrW@C3|FaQcO{j-7O9k3~^ztp3?f0#kAIE3P7V{{31hZ<# zXoi?Yxdv~txlWds(lraQQ#?L@T&z>y!HfVs)|VPR<1Sto=BwKJp?AZuT3exWLE;Q) zu0T6##(iia=LkN#)WYaUFh1XLU9LfaW3{YWu^opF>`b)`zxyz3HFWQ}*`EDu7?@VL zw^sOj_;R}KhW1O^1wNsulBYE0lp4u%+UwZM553L&qEZA>Viu@9!XcGXr?0SMDAmf7eGO~2 zJ&uJ#@Q>Q?--R)yw8EIyQfnk_^!IRYG%~keUtI36d(!eq5C`(ZGNLItMRn@)h_y)S z$z^AQvtbi`x(jBOZ%|29P@2sz@9<0;eFl>BP@<$Q`b?%XWHE@tIr+Rr;YT%`yv;>L~4^W-Tk53N*J@$ zrB#jzPEEJ_Q>-;1SA3(|-r8MQQ~1FzqJ`|l>?Lc;rk?9wW>P)`H?rEaQZ8tV(xULc zQj9BVar~H}P*j+=RfTtqR>D0B?uM0D;zj}FaS zbBskslAY=TcHxsJ+VyL`O|*#jvXk+VW@R@R!-01}g|ZOlTL!>;AzvGk-l>1PM>&9- ztCvW~Tu&d{SA3Dbfa=g9+jNCK3Ktpj@JvX+LES5XX051au2dU&GSy+yu|q!xZkAPsJQTozPS_E4If z=o6-3a9#94+yzTIZ+0M3Y4Q647KKkn6=mjEmPzD(O$RSkT;n zeKt?{UffX@U~wB`U}x~}FPXvjd*&&sYiAiAH;5J8EIYcP$3kt9W zL44sg1BXjb(kTUkIvHop^q^votIvGsRgV9r_7Rd3q!&rTIoP?J-uqIM<&cMs#V(pk zCHWseo`q?HI2?}q`}^8h^PDpJ37JiBY)`7aGqL6|$~BqLk6SnZ;u!{+SUYig{ftg* zYe+@0fsTdW6Kc6YXQ8DX+=DH{61VtSnJ69_$cXU+!Kk~;BaB23Ig#5V4yu;zqqZE& z`MZvv>Rt4Nj`YYnB+}OuT6@AsLSp&j>p;kW@;qu`_~apTuK>Qmh7Ec}vcq>$J|3w) zpvmXB0_L*LeZ;(1*3h@NLj?PJLe-V{Sf+;7^`<|qC~Dj_uiGyEZC*D2`?eZ6?M}bv z)cJGDukIyQmp=0_<;lr*J;g(O;_z@b%8)0{~Mxs`0V+N}## z*0{0OL%mU^ZECeX2@E;%ez2Hvghmj92EEeBC{rnJLHeOFspJpp$6KbVhLZB|@BNjr-XNT5#{tWV{dk|8U)ojoi}u5>&4LNw;a7aX>VUsk5P< zbMiYnrHQ$XLWOUPu_!3`sUBhKlSY^O{5=} z+~{Y(GyNzIvz1KI{M0Z&VraAONv0~ApS|1Gv4ZaS1H^MRC$$?b8WHYVKN*FZW8A=nL+}kYbEa_(wZeiWIrwRF@sD2EAd3U7sy{v;W zkjsZ7GF>X-1oqKCcXruI-FX|Jl|TG&H=bV_m;ASt|Fgxqnfc@Q=r?$mTH2TET3uo; zk@J-uEIivz2(L`n;d7`-OyOWmDW0~iS3Y)JnSX^i_|(NhpaF{CEaH{erI(Ltu#YN= zYC>mL3|Wz%$%W!H3)_eA4?1)Hrn`Y`Y%tXh!@E}{3ORcpb>r6BZ+NaZj=0Z~P8^#Y zx44b(nDdKu;FzmeJ0dfppXqRb;|qHQ6x3p7@=p_v>+?6#`%pN*E__;^&L$`#dSMgo z8;ER&1ii1oe`cy1j?$Giyr*0$B7|EFH(;#jhT+qW2Ss(R@$`WaSB&dDN@Yc3L!^* zCR6ex{uxU3%U!|x)%EqgErWvr732MlO??4wa%dLG@QS2dQPI6(^72QN%Tj7WkRrrh zw3tXXHr`-c7OvzDmUviitAmHRxy&9IkoIP4TnC}S6ez~^8KQ#S$EM|Ap0jgschPg7 zrJAAZLddpb4ppLRxaU?Kik!HUo97|X?8zH3SlDuKiEoRj#Glt4a1B5!R0%h<2<7$LQN7S~7@@PdQ--2@ zZ$4>&9`n^JI74Sz)t5bKBbk(Fa}%(|i*8MU6|BQzMk^Bk-F zG8qH}!U;!OE5w1zo5Y^RwSJO(0lv)}=uae-h2X2ZzWxQwlP61^c9NK-zN=JurJ zXYwDk(~(7!;;)N8F2;mSRmJ%{xS^Mv@?fyHeABo#3pQx4PV;CkdTT#1p{gSLdb>^Q zz{p8;UR14w`dC1@segPZQOZP}aZ$-ZypFR;&UjZ|ia}2^^Tx0}KwUMqFF0o39!{-6 zm8MqcAOb7Ka`kN;>lIi*n&t~^yFERx_$H#sH<>*B_d{G=0zQGJCvxIju0*O7l3A^a zB6IgAXsz%2q;l#!y<(wQ+%@}<`n(cEt@JswECw=9>(spD!$iWP3;5Ko;eeds=T-zn zlO~+gQqYf#W|4V2P%?HfP(CUTaK2m1D+RMM9N;^yY$L=vlL(eAd3w&4v$|$T=QVI{ zFfrnBm#K(FC?t5f`SscLmlng1nHI zq(00eV{OETLN1WPBi6YzH7ojGNb@VFsb;gU?FCsg(^#IIsvrl*@h zUV}cEShX=4;}qbnKfQoOU`T=u#T{kz$;wh}jWimDPvIX+dC0n$FN`_T7!Z>Qt(ZFj zo%|+$1zDCw!5NWo$T1b=&@F!98Ebd`!pU??Hl7>yQdDOt!U8m&NN^rD*HqzIR@kPZ znhUq(6S#X9O#SOr{p)r_9ohJ}<%A-q-rCZ=_oq%f4CyEo(eLH2CpRV?UQ>CZc*r*t zs*}Tk)aT)rpLVa6B!nh%8ArdQ4t$&+_;?UIfdXfTipTQao7!X7N*5LQ-B3#btw-%J z`!`rkp6&A&?N0rf2eRZHbvrh*??&!E5E^277L$S+)%Cc4z?Fxz#ccOR)93J+R_c^9 zp?eGQjX;yL_sz`}ipVu3BCtP3K&o9R`w@kE2lnC{m3ViA=FXLKgp?L5=T48VlWUBW zu7e|{>lTR*A|I|%U9aP5Z`v@dR`9OSF%Q{~86P5eRRf` zH~gDiWaS?pN>UOP;HO1K+oKd=61vh!(i7DCG^A;+L0cKCv{6mXf27M8=(^3Cw)dUU0q4eMhip%qBOf z7LO@jHqU#hM3~v*lC=^e*&b|C0d0ZCYFR6w1l8~6F1#FCmFBxxs(|!a0VnP*G>-x; z{>SSqv)Vq+ka)qWpNN!;8a?xk-(!uHFw{&4lq0Y+?7a(`RaNgp0+PHC$#%)9^iO~7 z6M6dn;bIN2F%SvDk1}D-pJu|&^#96tB(3yy&E<714Xi9k82>#;`*+flSzwl!y(aP= z`Fz;>!!XfH`aoo5WL3VPVTJyE#QO5Vwq48B=KNS6FZV~hRQ1#k0zT-0F>eg6&D-%B zH8s8Ey~ARyLlEK>n(-VQ7E+#Ki(+TmShPlC%g@5kpo&drn5&5x%T`k*XwF0rW?~~o zB*k3Pu8#ekmf+OblOnTA)UglQc$B0D=ea~?Pl7gv??L4+noAE{i^A;NKTC8y)8M*Z zeUUC|qksKIDy%Qv**L@(@1<+z1|lgEvC!xOM4Ol34*ta~iN)!a=*)6&NCQQQwe+&V z_+fVC177R3Lt!7{w>|tKe(6mPgEVXrh9-r0PKr5%+7gtCT`PP9pnjvN-j942=|h}I zO%;;U?!0^|+6}GH#aybwlYih%L~aDc?g(2zY{FD9{^txQQ)NF(3&?O@{xHK~|D8tscM}3jhul*_ zyQVv(VF8m2?*OsEx|=LCPzs$(po^UWCNK<)Q|`JL73&Mg3@OU6ljQ|D53AGF{A#;Y}2L%r4?>&g0fV{u#zU(#qzD2}vo zu&-cst}$)KN9lV1oBgX7WIc|TEG1zzRfhRe&^C@`y_ zHP+V5zIO5#$|hEHWT_U-A`jvu8V*-}>fTRfP|H|iz*2rbp9KGDWcubEAGD$6pv{Oh zB^Rmf#6Y~bgP|~G9VJOJfe$;ngk0^gJW)D%LOHFwt{CR~MsEgE9);>CUvjm|dvPXA zF=$O$w4d752599{sMUpI@RfvA`#d=AV@M&T;e*||*S2uyo^YGLF_;r#4wj2$gOOkN zA}igdVSyqQ4%yH^Ol^9cJ!)i5MI#B@oqK`->1Omi*`_=tCi=pQRI=4U-E#MIO~$$5 zlxKGP4+rJTBqQs4A&yA>o}pva_ngc_>wt@ZVs#0EM9N})v}|}Oz;eWgJI$m-Tc)6O zLYosMOgXeQn_wdS0n~`OkH>5ji$#RG(C$_G_5~p$0*i;3&lacSFtJtIh8rfo&L&9_ ztovv?x(nWxCVIW3$2kAxhIjE`^@9G6Qa~-!c5fW@N^#><^jY1p`{U}sIr1yZxk&*R zE1{-`z0i!52H07eN#-xI?!5&KmcY$bCl6WTQ{R!3GQ+uRBrDMD!|DW$)UZ*%t6;wm zo6k=833`aVVWY9yDq@llzTrq=9RIlk!8L9EL&3eX!<1<+EAIPGyl&RWx;NO7Rck0= z1BAt4X_Abh3uA=5m7ABdt{S5!F;mW(xq*Vd?=?EQn($Dj_EjO z5Iycobw?Va>C+8U=)!pw?ct@ux*!PB*Ww+R3|Z)DUyiP6K9N?T>1`0NxmZRl;%ZA5 zdpg3F3zIPtoSsl6BTN1c2e-z$%VB^6eZKJn* zCr8J89u*=)!S@iW&g+;Km+@cQuS{9<*hh40$TrQBjs=4hmB?1ZK`*mz&+XVRSq*o; z7Slh)=xCHGa)A;#qLX(Dqq7?zn}@eLcwqMXY)Cww@V{ ztlkw5#{ZlQn_83u@8X51oAf2U1DCwaqsS=doaUEfZ$&>r-E51xpw_M=szNlAc;ub|#yfT$`@q4Sr}`2eaaQ5Z^KoI( zJr%jvXNXD8_p`&JLb!avJ`gZIsrvNkucD7<@}*KMQ1E&5*NVn>K+!yfX0yxjV4j2&Nmu?+WLW+E}V$#C&*S(*k1Ad(!_>Xq$&pwwo|NEy> ze9~gVLJEp>Qo>(fVeW16>kGhry8(;1;@-MK3-FV`f0wJ52Ue?BFtl|r)i?Z?TmJe& z1H4>rw!nq*08!X(T?iDo^!L`j=d-u6Fx8hgG%~a`wA440G}W`!wRQea^!}QW8+lk@ z4=@V;hcOMP?H3tL-DR3Wt0WO= z2B4^bHG)}g;T%wY758m{9*Wq|Sbftj1kk@v_^WZGbglVy_00?|4Sr&K+LzQ??ts$D z0V@!`B?SiNFEhSEf6v}qs50EC0L?i7UgI_s8t0Fh-?9Hj^f^>wfNAzW%vX4S%w+y{ z`>9CS^^pS1=>K6pApBz{%eR?0H2lH&KnIH_{kq#tlKd(2+eFl$Dxq+|f?xsfa=RZK zGyW>^+l0+je|T%4AJhPp&o#v_$0`8)UdI;Kbh-|HYF0<&FRQL?hh-vMA${14+&?Uxzf2~IMS614dMLjW+l+q#-~{L75*1S1OM znOzZp!3oImd8?=|$BO1JGx+rE>}_@R?SXQbnXaC>p^V-WLw);i8^XJ-H|Jge)DVbC zJYT55S*G?cga7JB-^*n>CPb4V02Rss`4jM>-rtTez275!TcVF@uQTldj^_X55?6-5 z$KkWLw^g<>2W;N_J7b@)G5Zv6K!a(3OTQfvS51G9`TuFV)i7Cn9>8eMf&V}V{M)f$ z^~=;B8OhOeV^c7|_dP@U+O>Px{3__%jW$R>cfAJ)&G ze&P*e^d$VI!cYIb6a3esk*6KPuhxObsyj>v|t8fUYG6Xzq405$gII zpl>S&`vGD_?Pb%@0IuO@dQMrzHBV@wGAEz z&vlRYd1vv0aU@z z{`-xqr6V!)xu>`S>J_*p=wZz7F+~h54Q+Mpt!%&Do-6JSA6ftqDFD2!S)sV!1BzMN z^E(3{idMkKx1mO3w!L)#6bFFb?x8O6zYYBXPcsq!atRc0+dwZBx~0|3gx}*y82-d~ zI*2#kl>o+L2pEsxE#z79ZzF$bIuV@_B&`668c?eEEh1Lt?-4&wV&HfF{HbX5`SaV2 zSzbCt-vIFL1Kit^%4PjN_eVsEbLg`gz#AL_&3W5=+_Qg=2!#E=9KyFHYwx!pcuTX{dB4Z|vKi~wPKUCi)bAKzv;f+??cGgFevkF9U*A9WvkfDhn1=wp z9-x2TA@R#GQ~J-)zv~n9(H}lJ0(gx8TKWqYI1?=YXVBj_Vb76t)lGop53p|swX%vo zVgFd!wA&^UpOeYw0QI(4LaO>R>UWjgIVg?u8R-omg@Nxche`FHA-}KO5FA3~&qH~6 zzyb4p0Rm@jHNVGHFf_I>{9bIq*0JZh1};tl;BIR*4+uHmtF?T0Gyo*E3fAVP_CJcD z&N%gVhXMc4{Xb8AZ~({nJ4i)aT?a#3yZ=ld``Sqyx$;)4AcKIsCHPvUk6V8iXlTf9 z`kjPY6{3Yn4`@MAz~|ofw}hj=kNdt4gdE3D+X0%d6!7o2bCQ#>-{XB_f8?)q>V_D~ zO9=>w1VC?xF@>o=fc|ZV*sqCbRf4PPK;4AaAE@2j>KOa8zeW6Z^wLZ_CQJdWa}>x# z=x5q zUjhDg@qm%wS1$i$;?36rATRXZjrzBDy8YBcjrdmxw_lh4nsVKnVs8#mdH`8&XAlZN z;{7uZ@kLGFdNu!R7R*j>-2-S45NBXGdb_=;$$x?MZDS(uN#)%FS~(C9M(l5`4Fb|Z z|MQFgaFhBU4Y9M%%MCwgMb!XS;ue;a@ei>-9@`(jbhzsecm!ymdUZRg4rls9@DB(1 z!*}vI2LVCF0D;wR2^_}thumKo?E~|is(S%E2XGs5w{ByV`yaWDA7@B~=K4zy0nZWx zB;L1^VcG|Oi2r|#|K+V{uX+K;0}l7~BcrWH{{;Pqqd7-w%XgnUS_q(d$y=Id5dK5> zPbvbaJZalK0P%n8OOe1+hM)PWA9isdFa23*An7Q-sso_Y{7+Ax75#Ji4{8Ll)5xvs zfP01lT<7ia2bTCBf`3{p$o2}n$(cx1#(t$=$W1DgDHRN+$kL-dae z3iMjcaZo@?HV62%+X=3{+8=_ypH(xJGcsQQiXH_NeLJKa0VQ`km`qR&<7zFM?GcnAzwc)A z;XRk$15Hc{1eM!4tDVmug8v^?j6f)pN_s$K9bn9IJDRTh{}c2dlr#RmiW2{=P8}Zn zXXtMaMgP81$L*0jR_8D9ep2B0`+^I%UoId4BG~`ie!mRizh8>@`=oxpTMBFk0@ZJz zeq59I`|R@DNl5STZ{hww#fiVq+PmFor9fWt>oR`ZH2yxVjPsTz2WEao`N4d^uPZHo jpO?XY3-faBH!%N^1p;u?ARxBDza^lgR{IX{Vj%wq_V=&u diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/configuration.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/configuration.rb deleted file mode 100644 index 5571d39b0cc47..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/configuration.rb +++ /dev/null @@ -1,105 +0,0 @@ -require 'thread' -require 'concurrent/delay' -require 'concurrent/errors' -require 'concurrent/concern/deprecation' -require 'concurrent/executor/immediate_executor' -require 'concurrent/executor/fixed_thread_pool' -require 'concurrent/executor/cached_thread_pool' -require 'concurrent/utility/processor_counter' - -module Concurrent - extend Concern::Deprecation - - autoload :Options, 'concurrent/options' - autoload :TimerSet, 'concurrent/executor/timer_set' - autoload :ThreadPoolExecutor, 'concurrent/executor/thread_pool_executor' - - # @!visibility private - GLOBAL_FAST_EXECUTOR = Delay.new { Concurrent.new_fast_executor } - private_constant :GLOBAL_FAST_EXECUTOR - - # @!visibility private - GLOBAL_IO_EXECUTOR = Delay.new { Concurrent.new_io_executor } - private_constant :GLOBAL_IO_EXECUTOR - - # @!visibility private - GLOBAL_TIMER_SET = Delay.new { TimerSet.new } - private_constant :GLOBAL_TIMER_SET - - # @!visibility private - GLOBAL_IMMEDIATE_EXECUTOR = ImmediateExecutor.new - private_constant :GLOBAL_IMMEDIATE_EXECUTOR - - # Disables AtExit handlers including pool auto-termination handlers. - # When disabled it will be the application programmer's responsibility - # to ensure that the handlers are shutdown properly prior to application - # exit by calling `AtExit.run` method. - # - # @note this option should be needed only because of `at_exit` ordering - # issues which may arise when running some of the testing frameworks. - # E.g. Minitest's test-suite runs itself in `at_exit` callback which - # executes after the pools are already terminated. Then auto termination - # needs to be disabled and called manually after test-suite ends. - # @note This method should *never* be called - # from within a gem. It should *only* be used from within the main - # application and even then it should be used only when necessary. - # @deprecated Has no effect since it is no longer needed, see https://github.com/ruby-concurrency/concurrent-ruby/pull/841. - # - def self.disable_at_exit_handlers! - deprecated "Method #disable_at_exit_handlers! has no effect since it is no longer needed, see https://github.com/ruby-concurrency/concurrent-ruby/pull/841." - end - - # Global thread pool optimized for short, fast *operations*. - # - # @return [ThreadPoolExecutor] the thread pool - def self.global_fast_executor - GLOBAL_FAST_EXECUTOR.value! - end - - # Global thread pool optimized for long, blocking (IO) *tasks*. - # - # @return [ThreadPoolExecutor] the thread pool - def self.global_io_executor - GLOBAL_IO_EXECUTOR.value! - end - - def self.global_immediate_executor - GLOBAL_IMMEDIATE_EXECUTOR - end - - # Global thread pool user for global *timers*. - # - # @return [Concurrent::TimerSet] the thread pool - def self.global_timer_set - GLOBAL_TIMER_SET.value! - end - - # General access point to global executors. - # @param [Symbol, Executor] executor_identifier symbols: - # - :fast - {Concurrent.global_fast_executor} - # - :io - {Concurrent.global_io_executor} - # - :immediate - {Concurrent.global_immediate_executor} - # @return [Executor] - def self.executor(executor_identifier) - Options.executor(executor_identifier) - end - - def self.new_fast_executor(opts = {}) - FixedThreadPool.new( - [2, Concurrent.processor_count].max, - auto_terminate: opts.fetch(:auto_terminate, true), - idletime: 60, # 1 minute - max_queue: 0, # unlimited - fallback_policy: :abort, # shouldn't matter -- 0 max queue - name: "fast" - ) - end - - def self.new_io_executor(opts = {}) - CachedThreadPool.new( - auto_terminate: opts.fetch(:auto_terminate, true), - fallback_policy: :abort, # shouldn't matter -- 0 max queue - name: "io" - ) - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/constants.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/constants.rb deleted file mode 100644 index 676c2afb9a692..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/constants.rb +++ /dev/null @@ -1,8 +0,0 @@ -module Concurrent - - # Various classes within allows for +nil+ values to be stored, - # so a special +NULL+ token is required to indicate the "nil-ness". - # @!visibility private - NULL = ::Object.new - -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/dataflow.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/dataflow.rb deleted file mode 100644 index d55f19d850c34..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/dataflow.rb +++ /dev/null @@ -1,81 +0,0 @@ -require 'concurrent/future' -require 'concurrent/atomic/atomic_fixnum' - -module Concurrent - - # @!visibility private - class DependencyCounter # :nodoc: - - def initialize(count, &block) - @counter = AtomicFixnum.new(count) - @block = block - end - - def update(time, value, reason) - if @counter.decrement == 0 - @block.call - end - end - end - - # Dataflow allows you to create a task that will be scheduled when all of its data dependencies are available. - # {include:file:docs-source/dataflow.md} - # - # @param [Future] inputs zero or more `Future` operations that this dataflow depends upon - # - # @yield The operation to perform once all the dependencies are met - # @yieldparam [Future] inputs each of the `Future` inputs to the dataflow - # @yieldreturn [Object] the result of the block operation - # - # @return [Object] the result of all the operations - # - # @raise [ArgumentError] if no block is given - # @raise [ArgumentError] if any of the inputs are not `IVar`s - def dataflow(*inputs, &block) - dataflow_with(Concurrent.global_io_executor, *inputs, &block) - end - module_function :dataflow - - def dataflow_with(executor, *inputs, &block) - call_dataflow(:value, executor, *inputs, &block) - end - module_function :dataflow_with - - def dataflow!(*inputs, &block) - dataflow_with!(Concurrent.global_io_executor, *inputs, &block) - end - module_function :dataflow! - - def dataflow_with!(executor, *inputs, &block) - call_dataflow(:value!, executor, *inputs, &block) - end - module_function :dataflow_with! - - private - - def call_dataflow(method, executor, *inputs, &block) - raise ArgumentError.new('an executor must be provided') if executor.nil? - raise ArgumentError.new('no block given') unless block_given? - unless inputs.all? { |input| input.is_a? IVar } - raise ArgumentError.new("Not all dependencies are IVars.\nDependencies: #{ inputs.inspect }") - end - - result = Future.new(executor: executor) do - values = inputs.map { |input| input.send(method) } - block.call(*values) - end - - if inputs.empty? - result.execute - else - counter = DependencyCounter.new(inputs.size) { result.execute } - - inputs.each do |input| - input.add_observer counter - end - end - - result - end - module_function :call_dataflow -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/delay.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/delay.rb deleted file mode 100644 index 923773cbcaf0f..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/delay.rb +++ /dev/null @@ -1,199 +0,0 @@ -require 'thread' -require 'concurrent/concern/obligation' -require 'concurrent/executor/immediate_executor' -require 'concurrent/synchronization/lockable_object' - -module Concurrent - - # This file has circular require issues. It must be autoloaded here. - autoload :Options, 'concurrent/options' - - # Lazy evaluation of a block yielding an immutable result. Useful for - # expensive operations that may never be needed. It may be non-blocking, - # supports the `Concern::Obligation` interface, and accepts the injection of - # custom executor upon which to execute the block. Processing of - # block will be deferred until the first time `#value` is called. - # At that time the caller can choose to return immediately and let - # the block execute asynchronously, block indefinitely, or block - # with a timeout. - # - # When a `Delay` is created its state is set to `pending`. The value and - # reason are both `nil`. The first time the `#value` method is called the - # enclosed opration will be run and the calling thread will block. Other - # threads attempting to call `#value` will block as well. Once the operation - # is complete the *value* will be set to the result of the operation or the - # *reason* will be set to the raised exception, as appropriate. All threads - # blocked on `#value` will return. Subsequent calls to `#value` will immediately - # return the cached value. The operation will only be run once. This means that - # any side effects created by the operation will only happen once as well. - # - # `Delay` includes the `Concurrent::Concern::Dereferenceable` mixin to support thread - # safety of the reference returned by `#value`. - # - # @!macro copy_options - # - # @!macro delay_note_regarding_blocking - # @note The default behavior of `Delay` is to block indefinitely when - # calling either `value` or `wait`, executing the delayed operation on - # the current thread. This makes the `timeout` value completely - # irrelevant. To enable non-blocking behavior, use the `executor` - # constructor option. This will cause the delayed operation to be - # execute on the given executor, allowing the call to timeout. - # - # @see Concurrent::Concern::Dereferenceable - class Delay < Synchronization::LockableObject - include Concern::Obligation - - # NOTE: Because the global thread pools are lazy-loaded with these objects - # there is a performance hit every time we post a new task to one of these - # thread pools. Subsequently it is critical that `Delay` perform as fast - # as possible post-completion. This class has been highly optimized using - # the benchmark script `examples/lazy_and_delay.rb`. Do NOT attempt to - # DRY-up this class or perform other refactoring with running the - # benchmarks and ensuring that performance is not negatively impacted. - - # Create a new `Delay` in the `:pending` state. - # - # @!macro executor_and_deref_options - # - # @yield the delayed operation to perform - # - # @raise [ArgumentError] if no block is given - def initialize(opts = {}, &block) - raise ArgumentError.new('no block given') unless block_given? - super(&nil) - synchronize { ns_initialize(opts, &block) } - end - - # Return the value this object represents after applying the options - # specified by the `#set_deref_options` method. If the delayed operation - # raised an exception this method will return nil. The exception object - # can be accessed via the `#reason` method. - # - # @param [Numeric] timeout the maximum number of seconds to wait - # @return [Object] the current value of the object - # - # @!macro delay_note_regarding_blocking - def value(timeout = nil) - if @executor # TODO (pitr 12-Sep-2015): broken unsafe read? - super - else - # this function has been optimized for performance and - # should not be modified without running new benchmarks - synchronize do - execute = @evaluation_started = true unless @evaluation_started - if execute - begin - set_state(true, @task.call, nil) - rescue => ex - set_state(false, nil, ex) - end - elsif incomplete? - raise IllegalOperationError, 'Recursive call to #value during evaluation of the Delay' - end - end - if @do_nothing_on_deref - @value - else - apply_deref_options(@value) - end - end - end - - # Return the value this object represents after applying the options - # specified by the `#set_deref_options` method. If the delayed operation - # raised an exception, this method will raise that exception (even when) - # the operation has already been executed). - # - # @param [Numeric] timeout the maximum number of seconds to wait - # @return [Object] the current value of the object - # @raise [Exception] when `#rejected?` raises `#reason` - # - # @!macro delay_note_regarding_blocking - def value!(timeout = nil) - if @executor - super - else - result = value - raise @reason if @reason - result - end - end - - # Return the value this object represents after applying the options - # specified by the `#set_deref_options` method. - # - # @param [Integer] timeout (nil) the maximum number of seconds to wait for - # the value to be computed. When `nil` the caller will block indefinitely. - # - # @return [Object] self - # - # @!macro delay_note_regarding_blocking - def wait(timeout = nil) - if @executor - execute_task_once - super(timeout) - else - value - end - self - end - - # Reconfigures the block returning the value if still `#incomplete?` - # - # @yield the delayed operation to perform - # @return [true, false] if success - def reconfigure(&block) - synchronize do - raise ArgumentError.new('no block given') unless block_given? - unless @evaluation_started - @task = block - true - else - false - end - end - end - - protected - - def ns_initialize(opts, &block) - init_obligation - set_deref_options(opts) - @executor = opts[:executor] - - @task = block - @state = :pending - @evaluation_started = false - end - - private - - # @!visibility private - def execute_task_once # :nodoc: - # this function has been optimized for performance and - # should not be modified without running new benchmarks - execute = task = nil - synchronize do - execute = @evaluation_started = true unless @evaluation_started - task = @task - end - - if execute - executor = Options.executor_from_options(executor: @executor) - executor.post do - begin - result = task.call - success = true - rescue => ex - reason = ex - end - synchronize do - set_state(success, result, reason) - event.set - end - end - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/errors.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/errors.rb deleted file mode 100644 index 74f1fc3ddab9d..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/errors.rb +++ /dev/null @@ -1,74 +0,0 @@ -module Concurrent - - Error = Class.new(StandardError) - - # Raised when errors occur during configuration. - ConfigurationError = Class.new(Error) - - # Raised when an asynchronous operation is cancelled before execution. - CancelledOperationError = Class.new(Error) - - # Raised when a lifecycle method (such as `stop`) is called in an improper - # sequence or when the object is in an inappropriate state. - LifecycleError = Class.new(Error) - - # Raised when an attempt is made to violate an immutability guarantee. - ImmutabilityError = Class.new(Error) - - # Raised when an operation is attempted which is not legal given the - # receiver's current state - IllegalOperationError = Class.new(Error) - - # Raised when an object's methods are called when it has not been - # properly initialized. - InitializationError = Class.new(Error) - - # Raised when an object with a start/stop lifecycle has been started an - # excessive number of times. Often used in conjunction with a restart - # policy or strategy. - MaxRestartFrequencyError = Class.new(Error) - - # Raised when an attempt is made to modify an immutable object - # (such as an `IVar`) after its final state has been set. - class MultipleAssignmentError < Error - attr_reader :inspection_data - - def initialize(message = nil, inspection_data = nil) - @inspection_data = inspection_data - super message - end - - def inspect - format '%s %s>', super[0..-2], @inspection_data.inspect - end - end - - # Raised by an `Executor` when it is unable to process a given task, - # possibly because of a reject policy or other internal error. - RejectedExecutionError = Class.new(Error) - - # Raised when any finite resource, such as a lock counter, exceeds its - # maximum limit/threshold. - ResourceLimitError = Class.new(Error) - - # Raised when an operation times out. - TimeoutError = Class.new(Error) - - # Aggregates multiple exceptions. - class MultipleErrors < Error - attr_reader :errors - - def initialize(errors, message = "#{errors.size} errors") - @errors = errors - super [*message, - *errors.map { |e| [format('%s (%s)', e.message, e.class), *e.backtrace] }.flatten(1) - ].join("\n") - end - end - - # @!macro internal_implementation_note - class ConcurrentUpdateError < ThreadError - # frozen pre-allocated backtrace to speed ConcurrentUpdateError - CONC_UP_ERR_BACKTRACE = ['backtrace elided; set verbose to enable'].freeze - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/exchanger.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/exchanger.rb deleted file mode 100644 index a5405d252283e..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/exchanger.rb +++ /dev/null @@ -1,353 +0,0 @@ -require 'concurrent/constants' -require 'concurrent/errors' -require 'concurrent/maybe' -require 'concurrent/atomic/atomic_reference' -require 'concurrent/atomic/count_down_latch' -require 'concurrent/utility/engine' -require 'concurrent/utility/monotonic_time' - -module Concurrent - - # @!macro exchanger - # - # A synchronization point at which threads can pair and swap elements within - # pairs. Each thread presents some object on entry to the exchange method, - # matches with a partner thread, and receives its partner's object on return. - # - # @!macro thread_safe_variable_comparison - # - # This implementation is very simple, using only a single slot for each - # exchanger (unlike more advanced implementations which use an "arena"). - # This approach will work perfectly fine when there are only a few threads - # accessing a single `Exchanger`. Beyond a handful of threads the performance - # will degrade rapidly due to contention on the single slot, but the algorithm - # will remain correct. - # - # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Exchanger.html java.util.concurrent.Exchanger - # @example - # - # exchanger = Concurrent::Exchanger.new - # - # threads = [ - # Thread.new { puts "first: " << exchanger.exchange('foo', 1) }, #=> "first: bar" - # Thread.new { puts "second: " << exchanger.exchange('bar', 1) } #=> "second: foo" - # ] - # threads.each {|t| t.join(2) } - - # @!visibility private - class AbstractExchanger < Synchronization::Object - - # @!visibility private - CANCEL = ::Object.new - private_constant :CANCEL - - def initialize - super - end - - # @!macro exchanger_method_do_exchange - # - # Waits for another thread to arrive at this exchange point (unless the - # current thread is interrupted), and then transfers the given object to - # it, receiving its object in return. The timeout value indicates the - # approximate number of seconds the method should block while waiting - # for the exchange. When the timeout value is `nil` the method will - # block indefinitely. - # - # @param [Object] value the value to exchange with another thread - # @param [Numeric, nil] timeout in seconds, `nil` blocks indefinitely - # - # @!macro exchanger_method_exchange - # - # In some edge cases when a `timeout` is given a return value of `nil` may be - # ambiguous. Specifically, if `nil` is a valid value in the exchange it will - # be impossible to tell whether `nil` is the actual return value or if it - # signifies timeout. When `nil` is a valid value in the exchange consider - # using {#exchange!} or {#try_exchange} instead. - # - # @return [Object] the value exchanged by the other thread or `nil` on timeout - def exchange(value, timeout = nil) - (value = do_exchange(value, timeout)) == CANCEL ? nil : value - end - - # @!macro exchanger_method_do_exchange - # @!macro exchanger_method_exchange_bang - # - # On timeout a {Concurrent::TimeoutError} exception will be raised. - # - # @return [Object] the value exchanged by the other thread - # @raise [Concurrent::TimeoutError] on timeout - def exchange!(value, timeout = nil) - if (value = do_exchange(value, timeout)) == CANCEL - raise Concurrent::TimeoutError - else - value - end - end - - # @!macro exchanger_method_do_exchange - # @!macro exchanger_method_try_exchange - # - # The return value will be a {Concurrent::Maybe} set to `Just` on success or - # `Nothing` on timeout. - # - # @return [Concurrent::Maybe] on success a `Just` maybe will be returned with - # the item exchanged by the other thread as `#value`; on timeout a - # `Nothing` maybe will be returned with {Concurrent::TimeoutError} as `#reason` - # - # @example - # - # exchanger = Concurrent::Exchanger.new - # - # result = exchanger.exchange(:foo, 0.5) - # - # if result.just? - # puts result.value #=> :bar - # else - # puts 'timeout' - # end - def try_exchange(value, timeout = nil) - if (value = do_exchange(value, timeout)) == CANCEL - Concurrent::Maybe.nothing(Concurrent::TimeoutError) - else - Concurrent::Maybe.just(value) - end - end - - private - - # @!macro exchanger_method_do_exchange - # - # @return [Object, CANCEL] the value exchanged by the other thread; {CANCEL} on timeout - def do_exchange(value, timeout) - raise NotImplementedError - end - end - - # @!macro internal_implementation_note - # @!visibility private - class RubyExchanger < AbstractExchanger - # A simplified version of java.util.concurrent.Exchanger written by - # Doug Lea, Bill Scherer, and Michael Scott with assistance from members - # of JCP JSR-166 Expert Group and released to the public domain. It does - # not include the arena or the multi-processor spin loops. - # http://grepcode.com/file/repository.grepcode.com/java/root/jdk/openjdk/6-b14/java/util/concurrent/Exchanger.java - - safe_initialization! - - class Node < Concurrent::Synchronization::Object - attr_atomic :value - safe_initialization! - - def initialize(item) - super() - @Item = item - @Latch = Concurrent::CountDownLatch.new - self.value = nil - end - - def latch - @Latch - end - - def item - @Item - end - end - private_constant :Node - - def initialize - super - end - - private - - attr_atomic(:slot) - - # @!macro exchanger_method_do_exchange - # - # @return [Object, CANCEL] the value exchanged by the other thread; {CANCEL} on timeout - def do_exchange(value, timeout) - - # ALGORITHM - # - # From the original Java version: - # - # > The basic idea is to maintain a "slot", which is a reference to - # > a Node containing both an Item to offer and a "hole" waiting to - # > get filled in. If an incoming "occupying" thread sees that the - # > slot is null, it CAS'es (compareAndSets) a Node there and waits - # > for another to invoke exchange. That second "fulfilling" thread - # > sees that the slot is non-null, and so CASes it back to null, - # > also exchanging items by CASing the hole, plus waking up the - # > occupying thread if it is blocked. In each case CAS'es may - # > fail because a slot at first appears non-null but is null upon - # > CAS, or vice-versa. So threads may need to retry these - # > actions. - # - # This version: - # - # An exchange occurs between an "occupier" thread and a "fulfiller" thread. - # The "slot" is used to setup this interaction. The first thread in the - # exchange puts itself into the slot (occupies) and waits for a fulfiller. - # The second thread removes the occupier from the slot and attempts to - # perform the exchange. Removing the occupier also frees the slot for - # another occupier/fulfiller pair. - # - # Because the occupier and the fulfiller are operating independently and - # because there may be contention with other threads, any failed operation - # indicates contention. Both the occupier and the fulfiller operate within - # spin loops. Any failed actions along the happy path will cause the thread - # to repeat the loop and try again. - # - # When a timeout value is given the thread must be cognizant of time spent - # in the spin loop. The remaining time is checked every loop. When the time - # runs out the thread will exit. - # - # A "node" is the data structure used to perform the exchange. Only the - # occupier's node is necessary. It's the node used for the exchange. - # Each node has an "item," a "hole" (self), and a "latch." The item is the - # node's initial value. It never changes. It's what the fulfiller returns on - # success. The occupier's hole is where the fulfiller put its item. It's the - # item that the occupier returns on success. The latch is used for synchronization. - # Because a thread may act as either an occupier or fulfiller (or possibly - # both in periods of high contention) every thread creates a node when - # the exchange method is first called. - # - # The following steps occur within the spin loop. If any actions fail - # the thread will loop and try again, so long as there is time remaining. - # If time runs out the thread will return CANCEL. - # - # Check the slot for an occupier: - # - # * If the slot is empty try to occupy - # * If the slot is full try to fulfill - # - # Attempt to occupy: - # - # * Attempt to CAS myself into the slot - # * Go to sleep and wait to be woken by a fulfiller - # * If the sleep is successful then the fulfiller completed its happy path - # - Return the value from my hole (the value given by the fulfiller) - # * When the sleep fails (time ran out) attempt to cancel the operation - # - Attempt to CAS myself out of the hole - # - If successful there is no contention - # - Return CANCEL - # - On failure, I am competing with a fulfiller - # - Attempt to CAS my hole to CANCEL - # - On success - # - Let the fulfiller deal with my cancel - # - Return CANCEL - # - On failure the fulfiller has completed its happy path - # - Return th value from my hole (the fulfiller's value) - # - # Attempt to fulfill: - # - # * Attempt to CAS the occupier out of the slot - # - On failure loop again - # * Attempt to CAS my item into the occupier's hole - # - On failure the occupier is trying to cancel - # - Loop again - # - On success we are on the happy path - # - Wake the sleeping occupier - # - Return the occupier's item - - value = NULL if value.nil? # The sentinel allows nil to be a valid value - me = Node.new(value) # create my node in case I need to occupy - end_at = Concurrent.monotonic_time + timeout.to_f # The time to give up - - result = loop do - other = slot - if other && compare_and_set_slot(other, nil) - # try to fulfill - if other.compare_and_set_value(nil, value) - # happy path - other.latch.count_down - break other.item - end - elsif other.nil? && compare_and_set_slot(nil, me) - # try to occupy - timeout = end_at - Concurrent.monotonic_time if timeout - if me.latch.wait(timeout) - # happy path - break me.value - else - # attempt to remove myself from the slot - if compare_and_set_slot(me, nil) - break CANCEL - elsif !me.compare_and_set_value(nil, CANCEL) - # I've failed to block the fulfiller - break me.value - end - end - end - break CANCEL if timeout && Concurrent.monotonic_time >= end_at - end - - result == NULL ? nil : result - end - end - - if Concurrent.on_jruby? - require 'concurrent/utility/native_extension_loader' - - # @!macro internal_implementation_note - # @!visibility private - class JavaExchanger < AbstractExchanger - - def initialize - @exchanger = java.util.concurrent.Exchanger.new - end - - private - - # @!macro exchanger_method_do_exchange - # - # @return [Object, CANCEL] the value exchanged by the other thread; {CANCEL} on timeout - def do_exchange(value, timeout) - result = nil - if timeout.nil? - Synchronization::JRuby.sleep_interruptibly do - result = @exchanger.exchange(value) - end - else - Synchronization::JRuby.sleep_interruptibly do - result = @exchanger.exchange(value, 1000 * timeout, java.util.concurrent.TimeUnit::MILLISECONDS) - end - end - result - rescue java.util.concurrent.TimeoutException - CANCEL - end - end - end - - # @!visibility private - # @!macro internal_implementation_note - ExchangerImplementation = case - when Concurrent.on_jruby? - JavaExchanger - else - RubyExchanger - end - private_constant :ExchangerImplementation - - # @!macro exchanger - class Exchanger < ExchangerImplementation - - # @!method initialize - # Creates exchanger instance - - # @!method exchange(value, timeout = nil) - # @!macro exchanger_method_do_exchange - # @!macro exchanger_method_exchange - - # @!method exchange!(value, timeout = nil) - # @!macro exchanger_method_do_exchange - # @!macro exchanger_method_exchange_bang - - # @!method try_exchange(value, timeout = nil) - # @!macro exchanger_method_do_exchange - # @!macro exchanger_method_try_exchange - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/abstract_executor_service.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/abstract_executor_service.rb deleted file mode 100644 index ac429531bf7bf..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/abstract_executor_service.rb +++ /dev/null @@ -1,131 +0,0 @@ -require 'concurrent/errors' -require 'concurrent/concern/deprecation' -require 'concurrent/executor/executor_service' -require 'concurrent/synchronization/lockable_object' - -module Concurrent - - # @!macro abstract_executor_service_public_api - # @!visibility private - class AbstractExecutorService < Synchronization::LockableObject - include ExecutorService - include Concern::Deprecation - - # The set of possible fallback policies that may be set at thread pool creation. - FALLBACK_POLICIES = [:abort, :discard, :caller_runs].freeze - - # @!macro executor_service_attr_reader_fallback_policy - attr_reader :fallback_policy - - attr_reader :name - - # Create a new thread pool. - def initialize(opts = {}, &block) - super(&nil) - synchronize do - @auto_terminate = opts.fetch(:auto_terminate, true) - @name = opts.fetch(:name) if opts.key?(:name) - ns_initialize(opts, &block) - end - end - - def to_s - name ? "#{super[0..-2]} name: #{name}>" : super - end - - # @!macro executor_service_method_shutdown - def shutdown - raise NotImplementedError - end - - # @!macro executor_service_method_kill - def kill - raise NotImplementedError - end - - # @!macro executor_service_method_wait_for_termination - def wait_for_termination(timeout = nil) - raise NotImplementedError - end - - # @!macro executor_service_method_running_question - def running? - synchronize { ns_running? } - end - - # @!macro executor_service_method_shuttingdown_question - def shuttingdown? - synchronize { ns_shuttingdown? } - end - - # @!macro executor_service_method_shutdown_question - def shutdown? - synchronize { ns_shutdown? } - end - - # @!macro executor_service_method_auto_terminate_question - def auto_terminate? - synchronize { @auto_terminate } - end - - # @!macro executor_service_method_auto_terminate_setter - def auto_terminate=(value) - deprecated "Method #auto_terminate= has no effect. Set :auto_terminate option when executor is initialized." - end - - private - - # Returns an action which executes the `fallback_policy` once the queue - # size reaches `max_queue`. The reason for the indirection of an action - # is so that the work can be deferred outside of synchronization. - # - # @param [Array] args the arguments to the task which is being handled. - # - # @!visibility private - def fallback_action(*args) - case fallback_policy - when :abort - lambda { raise RejectedExecutionError } - when :discard - lambda { false } - when :caller_runs - lambda { - begin - yield(*args) - rescue => ex - # let it fail - log DEBUG, ex - end - true - } - else - lambda { fail "Unknown fallback policy #{fallback_policy}" } - end - end - - def ns_execute(*args, &task) - raise NotImplementedError - end - - # @!macro executor_service_method_ns_shutdown_execution - # - # Callback method called when an orderly shutdown has completed. - # The default behavior is to signal all waiting threads. - def ns_shutdown_execution - # do nothing - end - - # @!macro executor_service_method_ns_kill_execution - # - # Callback method called when the executor has been killed. - # The default behavior is to do nothing. - def ns_kill_execution - # do nothing - end - - def ns_auto_terminate? - @auto_terminate - end - - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/cached_thread_pool.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/cached_thread_pool.rb deleted file mode 100644 index de50ed1791c6a..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/cached_thread_pool.rb +++ /dev/null @@ -1,62 +0,0 @@ -require 'concurrent/utility/engine' -require 'concurrent/executor/thread_pool_executor' - -module Concurrent - - # A thread pool that dynamically grows and shrinks to fit the current workload. - # New threads are created as needed, existing threads are reused, and threads - # that remain idle for too long are killed and removed from the pool. These - # pools are particularly suited to applications that perform a high volume of - # short-lived tasks. - # - # On creation a `CachedThreadPool` has zero running threads. New threads are - # created on the pool as new operations are `#post`. The size of the pool - # will grow until `#max_length` threads are in the pool or until the number - # of threads exceeds the number of running and pending operations. When a new - # operation is post to the pool the first available idle thread will be tasked - # with the new operation. - # - # Should a thread crash for any reason the thread will immediately be removed - # from the pool. Similarly, threads which remain idle for an extended period - # of time will be killed and reclaimed. Thus these thread pools are very - # efficient at reclaiming unused resources. - # - # The API and behavior of this class are based on Java's `CachedThreadPool` - # - # @!macro thread_pool_options - class CachedThreadPool < ThreadPoolExecutor - - # @!macro cached_thread_pool_method_initialize - # - # Create a new thread pool. - # - # @param [Hash] opts the options defining pool behavior. - # @option opts [Symbol] :fallback_policy (`:abort`) the fallback policy - # - # @raise [ArgumentError] if `fallback_policy` is not a known policy - # - # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Executors.html#newCachedThreadPool-- - def initialize(opts = {}) - defaults = { idletime: DEFAULT_THREAD_IDLETIMEOUT } - overrides = { min_threads: 0, - max_threads: DEFAULT_MAX_POOL_SIZE, - max_queue: DEFAULT_MAX_QUEUE_SIZE } - super(defaults.merge(opts).merge(overrides)) - end - - private - - # @!macro cached_thread_pool_method_initialize - # @!visibility private - def ns_initialize(opts) - super(opts) - if Concurrent.on_jruby? - @max_queue = 0 - @executor = java.util.concurrent.Executors.newCachedThreadPool( - DaemonThreadFactory.new(ns_auto_terminate?)) - @executor.setRejectedExecutionHandler(FALLBACK_POLICY_CLASSES[@fallback_policy].new) - @executor.setKeepAliveTime(opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT), java.util.concurrent.TimeUnit::SECONDS) - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/executor_service.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/executor_service.rb deleted file mode 100644 index 7e344919e0e6a..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/executor_service.rb +++ /dev/null @@ -1,185 +0,0 @@ -require 'concurrent/concern/logging' - -module Concurrent - - ################################################################### - - # @!macro executor_service_method_post - # - # Submit a task to the executor for asynchronous processing. - # - # @param [Array] args zero or more arguments to be passed to the task - # - # @yield the asynchronous task to perform - # - # @return [Boolean] `true` if the task is queued, `false` if the executor - # is not running - # - # @raise [ArgumentError] if no task is given - - # @!macro executor_service_method_left_shift - # - # Submit a task to the executor for asynchronous processing. - # - # @param [Proc] task the asynchronous task to perform - # - # @return [self] returns itself - - # @!macro executor_service_method_can_overflow_question - # - # Does the task queue have a maximum size? - # - # @return [Boolean] True if the task queue has a maximum size else false. - - # @!macro executor_service_method_serialized_question - # - # Does this executor guarantee serialization of its operations? - # - # @return [Boolean] True if the executor guarantees that all operations - # will be post in the order they are received and no two operations may - # occur simultaneously. Else false. - - ################################################################### - - # @!macro executor_service_public_api - # - # @!method post(*args, &task) - # @!macro executor_service_method_post - # - # @!method <<(task) - # @!macro executor_service_method_left_shift - # - # @!method can_overflow? - # @!macro executor_service_method_can_overflow_question - # - # @!method serialized? - # @!macro executor_service_method_serialized_question - - ################################################################### - - # @!macro executor_service_attr_reader_fallback_policy - # @return [Symbol] The fallback policy in effect. Either `:abort`, `:discard`, or `:caller_runs`. - - # @!macro executor_service_method_shutdown - # - # Begin an orderly shutdown. Tasks already in the queue will be executed, - # but no new tasks will be accepted. Has no additional effect if the - # thread pool is not running. - - # @!macro executor_service_method_kill - # - # Begin an immediate shutdown. In-progress tasks will be allowed to - # complete but enqueued tasks will be dismissed and no new tasks - # will be accepted. Has no additional effect if the thread pool is - # not running. - - # @!macro executor_service_method_wait_for_termination - # - # Block until executor shutdown is complete or until `timeout` seconds have - # passed. - # - # @note Does not initiate shutdown or termination. Either `shutdown` or `kill` - # must be called before this method (or on another thread). - # - # @param [Integer] timeout the maximum number of seconds to wait for shutdown to complete - # - # @return [Boolean] `true` if shutdown complete or false on `timeout` - - # @!macro executor_service_method_running_question - # - # Is the executor running? - # - # @return [Boolean] `true` when running, `false` when shutting down or shutdown - - # @!macro executor_service_method_shuttingdown_question - # - # Is the executor shuttingdown? - # - # @return [Boolean] `true` when not running and not shutdown, else `false` - - # @!macro executor_service_method_shutdown_question - # - # Is the executor shutdown? - # - # @return [Boolean] `true` when shutdown, `false` when shutting down or running - - # @!macro executor_service_method_auto_terminate_question - # - # Is the executor auto-terminate when the application exits? - # - # @return [Boolean] `true` when auto-termination is enabled else `false`. - - # @!macro executor_service_method_auto_terminate_setter - # - # - # Set the auto-terminate behavior for this executor. - # @deprecated Has no effect - # @param [Boolean] value The new auto-terminate value to set for this executor. - # @return [Boolean] `true` when auto-termination is enabled else `false`. - - ################################################################### - - # @!macro abstract_executor_service_public_api - # - # @!macro executor_service_public_api - # - # @!attribute [r] fallback_policy - # @!macro executor_service_attr_reader_fallback_policy - # - # @!method shutdown - # @!macro executor_service_method_shutdown - # - # @!method kill - # @!macro executor_service_method_kill - # - # @!method wait_for_termination(timeout = nil) - # @!macro executor_service_method_wait_for_termination - # - # @!method running? - # @!macro executor_service_method_running_question - # - # @!method shuttingdown? - # @!macro executor_service_method_shuttingdown_question - # - # @!method shutdown? - # @!macro executor_service_method_shutdown_question - # - # @!method auto_terminate? - # @!macro executor_service_method_auto_terminate_question - # - # @!method auto_terminate=(value) - # @!macro executor_service_method_auto_terminate_setter - - ################################################################### - - # @!macro executor_service_public_api - # @!visibility private - module ExecutorService - include Concern::Logging - - # @!macro executor_service_method_post - def post(*args, &task) - raise NotImplementedError - end - - # @!macro executor_service_method_left_shift - def <<(task) - post(&task) - self - end - - # @!macro executor_service_method_can_overflow_question - # - # @note Always returns `false` - def can_overflow? - false - end - - # @!macro executor_service_method_serialized_question - # - # @note Always returns `false` - def serialized? - false - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/fixed_thread_pool.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/fixed_thread_pool.rb deleted file mode 100644 index 4de512a5ff1a1..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/fixed_thread_pool.rb +++ /dev/null @@ -1,220 +0,0 @@ -require 'concurrent/utility/engine' -require 'concurrent/executor/thread_pool_executor' - -module Concurrent - - # @!macro thread_pool_executor_constant_default_max_pool_size - # Default maximum number of threads that will be created in the pool. - - # @!macro thread_pool_executor_constant_default_min_pool_size - # Default minimum number of threads that will be retained in the pool. - - # @!macro thread_pool_executor_constant_default_max_queue_size - # Default maximum number of tasks that may be added to the task queue. - - # @!macro thread_pool_executor_constant_default_thread_timeout - # Default maximum number of seconds a thread in the pool may remain idle - # before being reclaimed. - - # @!macro thread_pool_executor_constant_default_synchronous - # Default value of the :synchronous option. - - # @!macro thread_pool_executor_attr_reader_max_length - # The maximum number of threads that may be created in the pool. - # @return [Integer] The maximum number of threads that may be created in the pool. - - # @!macro thread_pool_executor_attr_reader_min_length - # The minimum number of threads that may be retained in the pool. - # @return [Integer] The minimum number of threads that may be retained in the pool. - - # @!macro thread_pool_executor_attr_reader_largest_length - # The largest number of threads that have been created in the pool since construction. - # @return [Integer] The largest number of threads that have been created in the pool since construction. - - # @!macro thread_pool_executor_attr_reader_scheduled_task_count - # The number of tasks that have been scheduled for execution on the pool since construction. - # @return [Integer] The number of tasks that have been scheduled for execution on the pool since construction. - - # @!macro thread_pool_executor_attr_reader_completed_task_count - # The number of tasks that have been completed by the pool since construction. - # @return [Integer] The number of tasks that have been completed by the pool since construction. - - # @!macro thread_pool_executor_attr_reader_idletime - # The number of seconds that a thread may be idle before being reclaimed. - # @return [Integer] The number of seconds that a thread may be idle before being reclaimed. - - # @!macro thread_pool_executor_attr_reader_synchronous - # Whether or not a value of 0 for :max_queue option means the queue must perform direct hand-off or rather unbounded queue. - # @return [true, false] - - # @!macro thread_pool_executor_attr_reader_max_queue - # The maximum number of tasks that may be waiting in the work queue at any one time. - # When the queue size reaches `max_queue` subsequent tasks will be rejected in - # accordance with the configured `fallback_policy`. - # - # @return [Integer] The maximum number of tasks that may be waiting in the work queue at any one time. - # When the queue size reaches `max_queue` subsequent tasks will be rejected in - # accordance with the configured `fallback_policy`. - - # @!macro thread_pool_executor_attr_reader_length - # The number of threads currently in the pool. - # @return [Integer] The number of threads currently in the pool. - - # @!macro thread_pool_executor_attr_reader_queue_length - # The number of tasks in the queue awaiting execution. - # @return [Integer] The number of tasks in the queue awaiting execution. - - # @!macro thread_pool_executor_attr_reader_remaining_capacity - # Number of tasks that may be enqueued before reaching `max_queue` and rejecting - # new tasks. A value of -1 indicates that the queue may grow without bound. - # - # @return [Integer] Number of tasks that may be enqueued before reaching `max_queue` and rejecting - # new tasks. A value of -1 indicates that the queue may grow without bound. - - # @!macro thread_pool_executor_method_prune_pool - # Prune the thread pool of unneeded threads - # - # What is being pruned is controlled by the min_threads and idletime - # parameters passed at pool creation time - # - # This is a no-op on some pool implementation (e.g. the Java one). The Ruby - # pool will auto-prune each time a new job is posted. You will need to call - # this method explicitely in case your application post jobs in bursts (a - # lot of jobs and then nothing for long periods) - - # @!macro thread_pool_executor_public_api - # - # @!macro abstract_executor_service_public_api - # - # @!attribute [r] max_length - # @!macro thread_pool_executor_attr_reader_max_length - # - # @!attribute [r] min_length - # @!macro thread_pool_executor_attr_reader_min_length - # - # @!attribute [r] largest_length - # @!macro thread_pool_executor_attr_reader_largest_length - # - # @!attribute [r] scheduled_task_count - # @!macro thread_pool_executor_attr_reader_scheduled_task_count - # - # @!attribute [r] completed_task_count - # @!macro thread_pool_executor_attr_reader_completed_task_count - # - # @!attribute [r] idletime - # @!macro thread_pool_executor_attr_reader_idletime - # - # @!attribute [r] max_queue - # @!macro thread_pool_executor_attr_reader_max_queue - # - # @!attribute [r] length - # @!macro thread_pool_executor_attr_reader_length - # - # @!attribute [r] queue_length - # @!macro thread_pool_executor_attr_reader_queue_length - # - # @!attribute [r] remaining_capacity - # @!macro thread_pool_executor_attr_reader_remaining_capacity - # - # @!method can_overflow? - # @!macro executor_service_method_can_overflow_question - # - # @!method prune_pool - # @!macro thread_pool_executor_method_prune_pool - - - - - # @!macro thread_pool_options - # - # **Thread Pool Options** - # - # Thread pools support several configuration options: - # - # * `idletime`: The number of seconds that a thread may be idle before being reclaimed. - # * `name`: The name of the executor (optional). Printed in the executor's `#to_s` output and - # a `-worker-` name is given to its threads if supported by used Ruby - # implementation. `` is uniq for each thread. - # * `max_queue`: The maximum number of tasks that may be waiting in the work queue at - # any one time. When the queue size reaches `max_queue` and no new threads can be created, - # subsequent tasks will be rejected in accordance with the configured `fallback_policy`. - # * `auto_terminate`: When true (default), the threads started will be marked as daemon. - # * `fallback_policy`: The policy defining how rejected tasks are handled. - # - # Three fallback policies are supported: - # - # * `:abort`: Raise a `RejectedExecutionError` exception and discard the task. - # * `:discard`: Discard the task and return false. - # * `:caller_runs`: Execute the task on the calling thread. - # - # **Shutting Down Thread Pools** - # - # Killing a thread pool while tasks are still being processed, either by calling - # the `#kill` method or at application exit, will have unpredictable results. There - # is no way for the thread pool to know what resources are being used by the - # in-progress tasks. When those tasks are killed the impact on those resources - # cannot be predicted. The *best* practice is to explicitly shutdown all thread - # pools using the provided methods: - # - # * Call `#shutdown` to initiate an orderly termination of all in-progress tasks - # * Call `#wait_for_termination` with an appropriate timeout interval an allow - # the orderly shutdown to complete - # * Call `#kill` *only when* the thread pool fails to shutdown in the allotted time - # - # On some runtime platforms (most notably the JVM) the application will not - # exit until all thread pools have been shutdown. To prevent applications from - # "hanging" on exit, all threads can be marked as daemon according to the - # `:auto_terminate` option. - # - # ```ruby - # pool1 = Concurrent::FixedThreadPool.new(5) # threads will be marked as daemon - # pool2 = Concurrent::FixedThreadPool.new(5, auto_terminate: false) # mark threads as non-daemon - # ``` - # - # @note Failure to properly shutdown a thread pool can lead to unpredictable results. - # Please read *Shutting Down Thread Pools* for more information. - # - # @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html Java Tutorials: Thread Pools - # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html Java Executors class - # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html Java ExecutorService interface - # @see https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.html#setDaemon-boolean- - - - - - - # @!macro fixed_thread_pool - # - # A thread pool that reuses a fixed number of threads operating off an unbounded queue. - # At any point, at most `num_threads` will be active processing tasks. When all threads are busy new - # tasks `#post` to the thread pool are enqueued until a thread becomes available. - # Should a thread crash for any reason the thread will immediately be removed - # from the pool and replaced. - # - # The API and behavior of this class are based on Java's `FixedThreadPool` - # - # @!macro thread_pool_options - class FixedThreadPool < ThreadPoolExecutor - - # @!macro fixed_thread_pool_method_initialize - # - # Create a new thread pool. - # - # @param [Integer] num_threads the number of threads to allocate - # @param [Hash] opts the options defining pool behavior. - # @option opts [Symbol] :fallback_policy (`:abort`) the fallback policy - # - # @raise [ArgumentError] if `num_threads` is less than or equal to zero - # @raise [ArgumentError] if `fallback_policy` is not a known policy - # - # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Executors.html#newFixedThreadPool-int- - def initialize(num_threads, opts = {}) - raise ArgumentError.new('number of threads must be greater than zero') if num_threads.to_i < 1 - defaults = { max_queue: DEFAULT_MAX_QUEUE_SIZE, - idletime: DEFAULT_THREAD_IDLETIMEOUT } - overrides = { min_threads: num_threads, - max_threads: num_threads } - super(defaults.merge(opts).merge(overrides)) - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/immediate_executor.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/immediate_executor.rb deleted file mode 100644 index 282df7a0593ec..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/immediate_executor.rb +++ /dev/null @@ -1,66 +0,0 @@ -require 'concurrent/atomic/event' -require 'concurrent/executor/abstract_executor_service' -require 'concurrent/executor/serial_executor_service' - -module Concurrent - - # An executor service which runs all operations on the current thread, - # blocking as necessary. Operations are performed in the order they are - # received and no two operations can be performed simultaneously. - # - # This executor service exists mainly for testing an debugging. When used - # it immediately runs every `#post` operation on the current thread, blocking - # that thread until the operation is complete. This can be very beneficial - # during testing because it makes all operations deterministic. - # - # @note Intended for use primarily in testing and debugging. - class ImmediateExecutor < AbstractExecutorService - include SerialExecutorService - - # Creates a new executor - def initialize - @stopped = Concurrent::Event.new - end - - # @!macro executor_service_method_post - def post(*args, &task) - raise ArgumentError.new('no block given') unless block_given? - return false unless running? - task.call(*args) - true - end - - # @!macro executor_service_method_left_shift - def <<(task) - post(&task) - self - end - - # @!macro executor_service_method_running_question - def running? - ! shutdown? - end - - # @!macro executor_service_method_shuttingdown_question - def shuttingdown? - false - end - - # @!macro executor_service_method_shutdown_question - def shutdown? - @stopped.set? - end - - # @!macro executor_service_method_shutdown - def shutdown - @stopped.set - true - end - alias_method :kill, :shutdown - - # @!macro executor_service_method_wait_for_termination - def wait_for_termination(timeout = nil) - @stopped.wait(timeout) - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/indirect_immediate_executor.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/indirect_immediate_executor.rb deleted file mode 100644 index 4f9769fa3f3b5..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/indirect_immediate_executor.rb +++ /dev/null @@ -1,44 +0,0 @@ -require 'concurrent/executor/immediate_executor' -require 'concurrent/executor/simple_executor_service' - -module Concurrent - # An executor service which runs all operations on a new thread, blocking - # until it completes. Operations are performed in the order they are received - # and no two operations can be performed simultaneously. - # - # This executor service exists mainly for testing an debugging. When used it - # immediately runs every `#post` operation on a new thread, blocking the - # current thread until the operation is complete. This is similar to how the - # ImmediateExecutor works, but the operation has the full stack of the new - # thread at its disposal. This can be helpful when the operations will spawn - # more operations on the same executor and so on - such a situation might - # overflow the single stack in case of an ImmediateExecutor, which is - # inconsistent with how it would behave for a threaded executor. - # - # @note Intended for use primarily in testing and debugging. - class IndirectImmediateExecutor < ImmediateExecutor - # Creates a new executor - def initialize - super - @internal_executor = SimpleExecutorService.new - end - - # @!macro executor_service_method_post - def post(*args, &task) - raise ArgumentError.new("no block given") unless block_given? - return false unless running? - - event = Concurrent::Event.new - @internal_executor.post do - begin - task.call(*args) - ensure - event.set - end - end - event.wait - - true - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/java_executor_service.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/java_executor_service.rb deleted file mode 100644 index 9a86385520523..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/java_executor_service.rb +++ /dev/null @@ -1,103 +0,0 @@ -require 'concurrent/utility/engine' - -if Concurrent.on_jruby? - require 'concurrent/errors' - require 'concurrent/executor/abstract_executor_service' - - module Concurrent - - # @!macro abstract_executor_service_public_api - # @!visibility private - class JavaExecutorService < AbstractExecutorService - java_import 'java.lang.Runnable' - - FALLBACK_POLICY_CLASSES = { - abort: java.util.concurrent.ThreadPoolExecutor::AbortPolicy, - discard: java.util.concurrent.ThreadPoolExecutor::DiscardPolicy, - caller_runs: java.util.concurrent.ThreadPoolExecutor::CallerRunsPolicy - }.freeze - private_constant :FALLBACK_POLICY_CLASSES - - def post(*args, &task) - raise ArgumentError.new('no block given') unless block_given? - return fallback_action(*args, &task).call unless running? - @executor.submit Job.new(args, task) - true - rescue Java::JavaUtilConcurrent::RejectedExecutionException - raise RejectedExecutionError - end - - def wait_for_termination(timeout = nil) - if timeout.nil? - ok = @executor.awaitTermination(60, java.util.concurrent.TimeUnit::SECONDS) until ok - true - else - @executor.awaitTermination(1000 * timeout, java.util.concurrent.TimeUnit::MILLISECONDS) - end - end - - def shutdown - synchronize do - @executor.shutdown - nil - end - end - - def kill - synchronize do - @executor.shutdownNow - nil - end - end - - private - - def ns_running? - !(ns_shuttingdown? || ns_shutdown?) - end - - def ns_shuttingdown? - if @executor.respond_to? :isTerminating - @executor.isTerminating - else - false - end - end - - def ns_shutdown? - @executor.isShutdown || @executor.isTerminated - end - - class Job - include Runnable - def initialize(args, block) - @args = args - @block = block - end - - def run - @block.call(*@args) - end - end - private_constant :Job - end - - class DaemonThreadFactory - # hide include from YARD - send :include, java.util.concurrent.ThreadFactory - - def initialize(daemonize = true) - @daemonize = daemonize - end - - def newThread(runnable) - thread = java.util.concurrent.Executors.defaultThreadFactory().newThread(runnable) - thread.setDaemon(@daemonize) - return thread - end - end - - private_constant :DaemonThreadFactory - - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/java_single_thread_executor.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/java_single_thread_executor.rb deleted file mode 100644 index 7aa24f2d723bf..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/java_single_thread_executor.rb +++ /dev/null @@ -1,30 +0,0 @@ -if Concurrent.on_jruby? - - require 'concurrent/executor/java_executor_service' - require 'concurrent/executor/serial_executor_service' - - module Concurrent - - # @!macro single_thread_executor - # @!macro abstract_executor_service_public_api - # @!visibility private - class JavaSingleThreadExecutor < JavaExecutorService - include SerialExecutorService - - # @!macro single_thread_executor_method_initialize - def initialize(opts = {}) - super(opts) - end - - private - - def ns_initialize(opts) - @executor = java.util.concurrent.Executors.newSingleThreadExecutor( - DaemonThreadFactory.new(ns_auto_terminate?) - ) - @fallback_policy = opts.fetch(:fallback_policy, :discard) - raise ArgumentError.new("#{@fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICY_CLASSES.keys.include?(@fallback_policy) - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/java_thread_pool_executor.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/java_thread_pool_executor.rb deleted file mode 100644 index 1213a95fb0986..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/java_thread_pool_executor.rb +++ /dev/null @@ -1,140 +0,0 @@ -if Concurrent.on_jruby? - - require 'concurrent/executor/java_executor_service' - - module Concurrent - - # @!macro thread_pool_executor - # @!macro thread_pool_options - # @!visibility private - class JavaThreadPoolExecutor < JavaExecutorService - - # @!macro thread_pool_executor_constant_default_max_pool_size - DEFAULT_MAX_POOL_SIZE = java.lang.Integer::MAX_VALUE # 2147483647 - - # @!macro thread_pool_executor_constant_default_min_pool_size - DEFAULT_MIN_POOL_SIZE = 0 - - # @!macro thread_pool_executor_constant_default_max_queue_size - DEFAULT_MAX_QUEUE_SIZE = 0 - - # @!macro thread_pool_executor_constant_default_thread_timeout - DEFAULT_THREAD_IDLETIMEOUT = 60 - - # @!macro thread_pool_executor_constant_default_synchronous - DEFAULT_SYNCHRONOUS = false - - # @!macro thread_pool_executor_attr_reader_max_length - attr_reader :max_length - - # @!macro thread_pool_executor_attr_reader_max_queue - attr_reader :max_queue - - # @!macro thread_pool_executor_attr_reader_synchronous - attr_reader :synchronous - - # @!macro thread_pool_executor_method_initialize - def initialize(opts = {}) - super(opts) - end - - # @!macro executor_service_method_can_overflow_question - def can_overflow? - @max_queue != 0 - end - - # @!macro thread_pool_executor_attr_reader_min_length - def min_length - @executor.getCorePoolSize - end - - # @!macro thread_pool_executor_attr_reader_max_length - def max_length - @executor.getMaximumPoolSize - end - - # @!macro thread_pool_executor_attr_reader_length - def length - @executor.getPoolSize - end - - # @!macro thread_pool_executor_attr_reader_largest_length - def largest_length - @executor.getLargestPoolSize - end - - # @!macro thread_pool_executor_attr_reader_scheduled_task_count - def scheduled_task_count - @executor.getTaskCount - end - - # @!macro thread_pool_executor_attr_reader_completed_task_count - def completed_task_count - @executor.getCompletedTaskCount - end - - # @!macro thread_pool_executor_attr_reader_idletime - def idletime - @executor.getKeepAliveTime(java.util.concurrent.TimeUnit::SECONDS) - end - - # @!macro thread_pool_executor_attr_reader_queue_length - def queue_length - @executor.getQueue.size - end - - # @!macro thread_pool_executor_attr_reader_remaining_capacity - def remaining_capacity - @max_queue == 0 ? -1 : @executor.getQueue.remainingCapacity - end - - # @!macro executor_service_method_running_question - def running? - super && !@executor.isTerminating - end - - # @!macro thread_pool_executor_method_prune_pool - def prune_pool - end - - private - - def ns_initialize(opts) - min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i - max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i - idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i - @max_queue = opts.fetch(:max_queue, DEFAULT_MAX_QUEUE_SIZE).to_i - @synchronous = opts.fetch(:synchronous, DEFAULT_SYNCHRONOUS) - @fallback_policy = opts.fetch(:fallback_policy, :abort) - - raise ArgumentError.new("`synchronous` cannot be set unless `max_queue` is 0") if @synchronous && @max_queue > 0 - raise ArgumentError.new("`max_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if max_length < DEFAULT_MIN_POOL_SIZE - raise ArgumentError.new("`max_threads` cannot be greater than #{DEFAULT_MAX_POOL_SIZE}") if max_length > DEFAULT_MAX_POOL_SIZE - raise ArgumentError.new("`min_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if min_length < DEFAULT_MIN_POOL_SIZE - raise ArgumentError.new("`min_threads` cannot be more than `max_threads`") if min_length > max_length - raise ArgumentError.new("#{fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICY_CLASSES.include?(@fallback_policy) - - if @max_queue == 0 - if @synchronous - queue = java.util.concurrent.SynchronousQueue.new - else - queue = java.util.concurrent.LinkedBlockingQueue.new - end - else - queue = java.util.concurrent.LinkedBlockingQueue.new(@max_queue) - end - - @executor = java.util.concurrent.ThreadPoolExecutor.new( - min_length, - max_length, - idletime, - java.util.concurrent.TimeUnit::SECONDS, - queue, - DaemonThreadFactory.new(ns_auto_terminate?), - FALLBACK_POLICY_CLASSES[@fallback_policy].new) - - end - end - - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/ruby_executor_service.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/ruby_executor_service.rb deleted file mode 100644 index 1f7301b947434..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/ruby_executor_service.rb +++ /dev/null @@ -1,82 +0,0 @@ -require 'concurrent/executor/abstract_executor_service' -require 'concurrent/atomic/event' - -module Concurrent - - # @!macro abstract_executor_service_public_api - # @!visibility private - class RubyExecutorService < AbstractExecutorService - safe_initialization! - - def initialize(*args, &block) - super - @StopEvent = Event.new - @StoppedEvent = Event.new - end - - def post(*args, &task) - raise ArgumentError.new('no block given') unless block_given? - deferred_action = synchronize { - if running? - ns_execute(*args, &task) - else - fallback_action(*args, &task) - end - } - if deferred_action - deferred_action.call - else - true - end - end - - def shutdown - synchronize do - break unless running? - stop_event.set - ns_shutdown_execution - end - true - end - - def kill - synchronize do - break if shutdown? - stop_event.set - ns_kill_execution - stopped_event.set - end - true - end - - def wait_for_termination(timeout = nil) - stopped_event.wait(timeout) - end - - private - - def stop_event - @StopEvent - end - - def stopped_event - @StoppedEvent - end - - def ns_shutdown_execution - stopped_event.set - end - - def ns_running? - !stop_event.set? - end - - def ns_shuttingdown? - !(ns_running? || ns_shutdown?) - end - - def ns_shutdown? - stopped_event.set? - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/ruby_single_thread_executor.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/ruby_single_thread_executor.rb deleted file mode 100644 index 916337d4baad1..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/ruby_single_thread_executor.rb +++ /dev/null @@ -1,21 +0,0 @@ -require 'concurrent/executor/ruby_thread_pool_executor' - -module Concurrent - - # @!macro single_thread_executor - # @!macro abstract_executor_service_public_api - # @!visibility private - class RubySingleThreadExecutor < RubyThreadPoolExecutor - - # @!macro single_thread_executor_method_initialize - def initialize(opts = {}) - super( - min_threads: 1, - max_threads: 1, - max_queue: 0, - idletime: DEFAULT_THREAD_IDLETIMEOUT, - fallback_policy: opts.fetch(:fallback_policy, :discard), - ) - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/ruby_thread_pool_executor.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/ruby_thread_pool_executor.rb deleted file mode 100644 index 298dd7fed0078..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/ruby_thread_pool_executor.rb +++ /dev/null @@ -1,366 +0,0 @@ -require 'thread' -require 'concurrent/atomic/event' -require 'concurrent/concern/logging' -require 'concurrent/executor/ruby_executor_service' -require 'concurrent/utility/monotonic_time' - -module Concurrent - - # @!macro thread_pool_executor - # @!macro thread_pool_options - # @!visibility private - class RubyThreadPoolExecutor < RubyExecutorService - - # @!macro thread_pool_executor_constant_default_max_pool_size - DEFAULT_MAX_POOL_SIZE = 2_147_483_647 # java.lang.Integer::MAX_VALUE - - # @!macro thread_pool_executor_constant_default_min_pool_size - DEFAULT_MIN_POOL_SIZE = 0 - - # @!macro thread_pool_executor_constant_default_max_queue_size - DEFAULT_MAX_QUEUE_SIZE = 0 - - # @!macro thread_pool_executor_constant_default_thread_timeout - DEFAULT_THREAD_IDLETIMEOUT = 60 - - # @!macro thread_pool_executor_constant_default_synchronous - DEFAULT_SYNCHRONOUS = false - - # @!macro thread_pool_executor_attr_reader_max_length - attr_reader :max_length - - # @!macro thread_pool_executor_attr_reader_min_length - attr_reader :min_length - - # @!macro thread_pool_executor_attr_reader_idletime - attr_reader :idletime - - # @!macro thread_pool_executor_attr_reader_max_queue - attr_reader :max_queue - - # @!macro thread_pool_executor_attr_reader_synchronous - attr_reader :synchronous - - # @!macro thread_pool_executor_method_initialize - def initialize(opts = {}) - super(opts) - end - - # @!macro thread_pool_executor_attr_reader_largest_length - def largest_length - synchronize { @largest_length } - end - - # @!macro thread_pool_executor_attr_reader_scheduled_task_count - def scheduled_task_count - synchronize { @scheduled_task_count } - end - - # @!macro thread_pool_executor_attr_reader_completed_task_count - def completed_task_count - synchronize { @completed_task_count } - end - - # @!macro executor_service_method_can_overflow_question - def can_overflow? - synchronize { ns_limited_queue? } - end - - # @!macro thread_pool_executor_attr_reader_length - def length - synchronize { @pool.length } - end - - # @!macro thread_pool_executor_attr_reader_queue_length - def queue_length - synchronize { @queue.length } - end - - # @!macro thread_pool_executor_attr_reader_remaining_capacity - def remaining_capacity - synchronize do - if ns_limited_queue? - @max_queue - @queue.length - else - -1 - end - end - end - - # @!visibility private - def remove_busy_worker(worker) - synchronize { ns_remove_busy_worker worker } - end - - # @!visibility private - def ready_worker(worker, last_message) - synchronize { ns_ready_worker worker, last_message } - end - - # @!visibility private - def worker_died(worker) - synchronize { ns_worker_died worker } - end - - # @!visibility private - def worker_task_completed - synchronize { @completed_task_count += 1 } - end - - # @!macro thread_pool_executor_method_prune_pool - def prune_pool - synchronize { ns_prune_pool } - end - - private - - # @!visibility private - def ns_initialize(opts) - @min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i - @max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i - @idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i - @max_queue = opts.fetch(:max_queue, DEFAULT_MAX_QUEUE_SIZE).to_i - @synchronous = opts.fetch(:synchronous, DEFAULT_SYNCHRONOUS) - @fallback_policy = opts.fetch(:fallback_policy, :abort) - - raise ArgumentError.new("`synchronous` cannot be set unless `max_queue` is 0") if @synchronous && @max_queue > 0 - raise ArgumentError.new("#{@fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICIES.include?(@fallback_policy) - raise ArgumentError.new("`max_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if @max_length < DEFAULT_MIN_POOL_SIZE - raise ArgumentError.new("`max_threads` cannot be greater than #{DEFAULT_MAX_POOL_SIZE}") if @max_length > DEFAULT_MAX_POOL_SIZE - raise ArgumentError.new("`min_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if @min_length < DEFAULT_MIN_POOL_SIZE - raise ArgumentError.new("`min_threads` cannot be more than `max_threads`") if min_length > max_length - - @pool = [] # all workers - @ready = [] # used as a stash (most idle worker is at the start) - @queue = [] # used as queue - # @ready or @queue is empty at all times - @scheduled_task_count = 0 - @completed_task_count = 0 - @largest_length = 0 - @workers_counter = 0 - @ruby_pid = $$ # detects if Ruby has forked - - @gc_interval = opts.fetch(:gc_interval, @idletime / 2.0).to_i # undocumented - @next_gc_time = Concurrent.monotonic_time + @gc_interval - end - - # @!visibility private - def ns_limited_queue? - @max_queue != 0 - end - - # @!visibility private - def ns_execute(*args, &task) - ns_reset_if_forked - - if ns_assign_worker(*args, &task) || ns_enqueue(*args, &task) - @scheduled_task_count += 1 - else - return fallback_action(*args, &task) - end - - ns_prune_pool if @next_gc_time < Concurrent.monotonic_time - nil - end - - # @!visibility private - def ns_shutdown_execution - ns_reset_if_forked - - if @pool.empty? - # nothing to do - stopped_event.set - end - - if @queue.empty? - # no more tasks will be accepted, just stop all workers - @pool.each(&:stop) - end - end - - # @!visibility private - def ns_kill_execution - # TODO log out unprocessed tasks in queue - # TODO try to shutdown first? - @pool.each(&:kill) - @pool.clear - @ready.clear - end - - # tries to assign task to a worker, tries to get one from @ready or to create new one - # @return [true, false] if task is assigned to a worker - # - # @!visibility private - def ns_assign_worker(*args, &task) - # keep growing if the pool is not at the minimum yet - worker, _ = (@ready.pop if @pool.size >= @min_length) || ns_add_busy_worker - if worker - worker << [task, args] - true - else - false - end - rescue ThreadError - # Raised when the operating system refuses to create the new thread - return false - end - - # tries to enqueue task - # @return [true, false] if enqueued - # - # @!visibility private - def ns_enqueue(*args, &task) - return false if @synchronous - - if !ns_limited_queue? || @queue.size < @max_queue - @queue << [task, args] - true - else - false - end - end - - # @!visibility private - def ns_worker_died(worker) - ns_remove_busy_worker worker - replacement_worker = ns_add_busy_worker - ns_ready_worker replacement_worker, Concurrent.monotonic_time, false if replacement_worker - end - - # creates new worker which has to receive work to do after it's added - # @return [nil, Worker] nil of max capacity is reached - # - # @!visibility private - def ns_add_busy_worker - return if @pool.size >= @max_length - - @workers_counter += 1 - @pool << (worker = Worker.new(self, @workers_counter)) - @largest_length = @pool.length if @pool.length > @largest_length - worker - end - - # handle ready worker, giving it new job or assigning back to @ready - # - # @!visibility private - def ns_ready_worker(worker, last_message, success = true) - task_and_args = @queue.shift - if task_and_args - worker << task_and_args - else - # stop workers when !running?, do not return them to @ready - if running? - raise unless last_message - @ready.push([worker, last_message]) - else - worker.stop - end - end - end - - # removes a worker which is not in not tracked in @ready - # - # @!visibility private - def ns_remove_busy_worker(worker) - @pool.delete(worker) - stopped_event.set if @pool.empty? && !running? - true - end - - # try oldest worker if it is idle for enough time, it's returned back at the start - # - # @!visibility private - def ns_prune_pool - now = Concurrent.monotonic_time - stopped_workers = 0 - while !@ready.empty? && (@pool.size - stopped_workers > @min_length) - worker, last_message = @ready.first - if now - last_message > self.idletime - stopped_workers += 1 - @ready.shift - worker << :stop - else break - end - end - - @next_gc_time = Concurrent.monotonic_time + @gc_interval - end - - def ns_reset_if_forked - if $$ != @ruby_pid - @queue.clear - @ready.clear - @pool.clear - @scheduled_task_count = 0 - @completed_task_count = 0 - @largest_length = 0 - @workers_counter = 0 - @ruby_pid = $$ - end - end - - # @!visibility private - class Worker - include Concern::Logging - - def initialize(pool, id) - # instance variables accessed only under pool's lock so no need to sync here again - @queue = Queue.new - @pool = pool - @thread = create_worker @queue, pool, pool.idletime - - if @thread.respond_to?(:name=) - @thread.name = [pool.name, 'worker', id].compact.join('-') - end - end - - def <<(message) - @queue << message - end - - def stop - @queue << :stop - end - - def kill - @thread.kill - end - - private - - def create_worker(queue, pool, idletime) - Thread.new(queue, pool, idletime) do |my_queue, my_pool, my_idletime| - catch(:stop) do - loop do - - case message = my_queue.pop - when :stop - my_pool.remove_busy_worker(self) - throw :stop - - else - task, args = message - run_task my_pool, task, args - my_pool.ready_worker(self, Concurrent.monotonic_time) - end - end - end - end - end - - def run_task(pool, task, args) - task.call(*args) - pool.worker_task_completed - rescue => ex - # let it fail - log DEBUG, ex - rescue Exception => ex - log ERROR, ex - pool.worker_died(self) - throw :stop - end - end - - private_constant :Worker - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/safe_task_executor.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/safe_task_executor.rb deleted file mode 100644 index f796b8571fa44..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/safe_task_executor.rb +++ /dev/null @@ -1,35 +0,0 @@ -require 'concurrent/synchronization/lockable_object' - -module Concurrent - - # A simple utility class that executes a callable and returns and array of three elements: - # success - indicating if the callable has been executed without errors - # value - filled by the callable result if it has been executed without errors, nil otherwise - # reason - the error risen by the callable if it has been executed with errors, nil otherwise - class SafeTaskExecutor < Synchronization::LockableObject - - def initialize(task, opts = {}) - @task = task - @exception_class = opts.fetch(:rescue_exception, false) ? Exception : StandardError - super() # ensures visibility - end - - # @return [Array] - def execute(*args) - success = true - value = reason = nil - - synchronize do - begin - value = @task.call(*args) - success = true - rescue @exception_class => ex - reason = ex - success = false - end - end - - [success, value, reason] - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/serial_executor_service.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/serial_executor_service.rb deleted file mode 100644 index f1c38ecfa9c97..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/serial_executor_service.rb +++ /dev/null @@ -1,34 +0,0 @@ -require 'concurrent/executor/executor_service' - -module Concurrent - - # Indicates that the including `ExecutorService` guarantees - # that all operations will occur in the order they are post and that no - # two operations may occur simultaneously. This module provides no - # functionality and provides no guarantees. That is the responsibility - # of the including class. This module exists solely to allow the including - # object to be interrogated for its serialization status. - # - # @example - # class Foo - # include Concurrent::SerialExecutor - # end - # - # foo = Foo.new - # - # foo.is_a? Concurrent::ExecutorService #=> true - # foo.is_a? Concurrent::SerialExecutor #=> true - # foo.serialized? #=> true - # - # @!visibility private - module SerialExecutorService - include ExecutorService - - # @!macro executor_service_method_serialized_question - # - # @note Always returns `true` - def serialized? - true - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/serialized_execution.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/serialized_execution.rb deleted file mode 100644 index 4db7c7f0c2135..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/serialized_execution.rb +++ /dev/null @@ -1,107 +0,0 @@ -require 'concurrent/errors' -require 'concurrent/concern/logging' -require 'concurrent/synchronization/lockable_object' - -module Concurrent - - # Ensures passed jobs in a serialized order never running at the same time. - class SerializedExecution < Synchronization::LockableObject - include Concern::Logging - - def initialize() - super() - synchronize { ns_initialize } - end - - Job = Struct.new(:executor, :args, :block) do - def call - block.call(*args) - end - end - - # Submit a task to the executor for asynchronous processing. - # - # @param [Executor] executor to be used for this job - # - # @param [Array] args zero or more arguments to be passed to the task - # - # @yield the asynchronous task to perform - # - # @return [Boolean] `true` if the task is queued, `false` if the executor - # is not running - # - # @raise [ArgumentError] if no task is given - def post(executor, *args, &task) - posts [[executor, args, task]] - true - end - - # As {#post} but allows to submit multiple tasks at once, it's guaranteed that they will not - # be interleaved by other tasks. - # - # @param [Array, Proc)>] posts array of triplets where - # first is a {ExecutorService}, second is array of args for task, third is a task (Proc) - def posts(posts) - # if can_overflow? - # raise ArgumentError, 'SerializedExecution does not support thread-pools which can overflow' - # end - - return nil if posts.empty? - - jobs = posts.map { |executor, args, task| Job.new executor, args, task } - - job_to_post = synchronize do - if @being_executed - @stash.push(*jobs) - nil - else - @being_executed = true - @stash.push(*jobs[1..-1]) - jobs.first - end - end - - call_job job_to_post if job_to_post - true - end - - private - - def ns_initialize - @being_executed = false - @stash = [] - end - - def call_job(job) - did_it_run = begin - job.executor.post { work(job) } - true - rescue RejectedExecutionError => ex - false - end - - # TODO not the best idea to run it myself - unless did_it_run - begin - work job - rescue => ex - # let it fail - log DEBUG, ex - end - end - end - - # ensures next job is executed if any is stashed - def work(job) - job.call - ensure - synchronize do - job = @stash.shift || (@being_executed = false) - end - - # TODO maybe be able to tell caching pool to just enqueue this job, because the current one end at the end - # of this block - call_job job if job - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/serialized_execution_delegator.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/serialized_execution_delegator.rb deleted file mode 100644 index 8197781b52f4e..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/serialized_execution_delegator.rb +++ /dev/null @@ -1,28 +0,0 @@ -require 'delegate' -require 'concurrent/executor/serial_executor_service' -require 'concurrent/executor/serialized_execution' - -module Concurrent - - # A wrapper/delegator for any `ExecutorService` that - # guarantees serialized execution of tasks. - # - # @see [SimpleDelegator](http://www.ruby-doc.org/stdlib-2.1.2/libdoc/delegate/rdoc/SimpleDelegator.html) - # @see Concurrent::SerializedExecution - class SerializedExecutionDelegator < SimpleDelegator - include SerialExecutorService - - def initialize(executor) - @executor = executor - @serializer = SerializedExecution.new - super(executor) - end - - # @!macro executor_service_method_post - def post(*args, &task) - raise ArgumentError.new('no block given') unless block_given? - return false unless running? - @serializer.post(@executor, *args, &task) - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/simple_executor_service.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/simple_executor_service.rb deleted file mode 100644 index 0bc62afd38d20..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/simple_executor_service.rb +++ /dev/null @@ -1,103 +0,0 @@ -require 'concurrent/atomic/atomic_boolean' -require 'concurrent/atomic/atomic_fixnum' -require 'concurrent/atomic/event' -require 'concurrent/executor/executor_service' -require 'concurrent/executor/ruby_executor_service' - -module Concurrent - - # An executor service in which every operation spawns a new, - # independently operating thread. - # - # This is perhaps the most inefficient executor service in this - # library. It exists mainly for testing an debugging. Thread creation - # and management is expensive in Ruby and this executor performs no - # resource pooling. This can be very beneficial during testing and - # debugging because it decouples the using code from the underlying - # executor implementation. In production this executor will likely - # lead to suboptimal performance. - # - # @note Intended for use primarily in testing and debugging. - class SimpleExecutorService < RubyExecutorService - - # @!macro executor_service_method_post - def self.post(*args) - raise ArgumentError.new('no block given') unless block_given? - Thread.new(*args) do - Thread.current.abort_on_exception = false - yield(*args) - end - true - end - - # @!macro executor_service_method_left_shift - def self.<<(task) - post(&task) - self - end - - # @!macro executor_service_method_post - def post(*args, &task) - raise ArgumentError.new('no block given') unless block_given? - return false unless running? - @count.increment - Thread.new(*args) do - Thread.current.abort_on_exception = false - begin - yield(*args) - ensure - @count.decrement - @stopped.set if @running.false? && @count.value == 0 - end - end - end - - # @!macro executor_service_method_left_shift - def <<(task) - post(&task) - self - end - - # @!macro executor_service_method_running_question - def running? - @running.true? - end - - # @!macro executor_service_method_shuttingdown_question - def shuttingdown? - @running.false? && ! @stopped.set? - end - - # @!macro executor_service_method_shutdown_question - def shutdown? - @stopped.set? - end - - # @!macro executor_service_method_shutdown - def shutdown - @running.make_false - @stopped.set if @count.value == 0 - true - end - - # @!macro executor_service_method_kill - def kill - @running.make_false - @stopped.set - true - end - - # @!macro executor_service_method_wait_for_termination - def wait_for_termination(timeout = nil) - @stopped.wait(timeout) - end - - private - - def ns_initialize(*args) - @running = Concurrent::AtomicBoolean.new(true) - @stopped = Concurrent::Event.new - @count = Concurrent::AtomicFixnum.new(0) - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/single_thread_executor.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/single_thread_executor.rb deleted file mode 100644 index f1474ea9ff46f..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/single_thread_executor.rb +++ /dev/null @@ -1,57 +0,0 @@ -require 'concurrent/utility/engine' -require 'concurrent/executor/ruby_single_thread_executor' - -module Concurrent - - if Concurrent.on_jruby? - require 'concurrent/executor/java_single_thread_executor' - end - - SingleThreadExecutorImplementation = case - when Concurrent.on_jruby? - JavaSingleThreadExecutor - else - RubySingleThreadExecutor - end - private_constant :SingleThreadExecutorImplementation - - # @!macro single_thread_executor - # - # A thread pool with a single thread an unlimited queue. Should the thread - # die for any reason it will be removed and replaced, thus ensuring that - # the executor will always remain viable and available to process jobs. - # - # A common pattern for background processing is to create a single thread - # on which an infinite loop is run. The thread's loop blocks on an input - # source (perhaps blocking I/O or a queue) and processes each input as it - # is received. This pattern has several issues. The thread itself is highly - # susceptible to errors during processing. Also, the thread itself must be - # constantly monitored and restarted should it die. `SingleThreadExecutor` - # encapsulates all these bahaviors. The task processor is highly resilient - # to errors from within tasks. Also, should the thread die it will - # automatically be restarted. - # - # The API and behavior of this class are based on Java's `SingleThreadExecutor`. - # - # @!macro abstract_executor_service_public_api - class SingleThreadExecutor < SingleThreadExecutorImplementation - - # @!macro single_thread_executor_method_initialize - # - # Create a new thread pool. - # - # @option opts [Symbol] :fallback_policy (:discard) the policy for handling new - # tasks that are received when the queue size has reached - # `max_queue` or the executor has shut down - # - # @raise [ArgumentError] if `:fallback_policy` is not one of the values specified - # in `FALLBACK_POLICIES` - # - # @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html - # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html - # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html - - # @!method initialize(opts = {}) - # @!macro single_thread_executor_method_initialize - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/thread_pool_executor.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/thread_pool_executor.rb deleted file mode 100644 index 253d46a9d1d0d..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/thread_pool_executor.rb +++ /dev/null @@ -1,88 +0,0 @@ -require 'concurrent/utility/engine' -require 'concurrent/executor/ruby_thread_pool_executor' - -module Concurrent - - if Concurrent.on_jruby? - require 'concurrent/executor/java_thread_pool_executor' - end - - ThreadPoolExecutorImplementation = case - when Concurrent.on_jruby? - JavaThreadPoolExecutor - else - RubyThreadPoolExecutor - end - private_constant :ThreadPoolExecutorImplementation - - # @!macro thread_pool_executor - # - # An abstraction composed of one or more threads and a task queue. Tasks - # (blocks or `proc` objects) are submitted to the pool and added to the queue. - # The threads in the pool remove the tasks and execute them in the order - # they were received. - # - # A `ThreadPoolExecutor` will automatically adjust the pool size according - # to the bounds set by `min-threads` and `max-threads`. When a new task is - # submitted and fewer than `min-threads` threads are running, a new thread - # is created to handle the request, even if other worker threads are idle. - # If there are more than `min-threads` but less than `max-threads` threads - # running, a new thread will be created only if the queue is full. - # - # Threads that are idle for too long will be garbage collected, down to the - # configured minimum options. Should a thread crash it, too, will be garbage collected. - # - # `ThreadPoolExecutor` is based on the Java class of the same name. From - # the official Java documentation; - # - # > Thread pools address two different problems: they usually provide - # > improved performance when executing large numbers of asynchronous tasks, - # > due to reduced per-task invocation overhead, and they provide a means - # > of bounding and managing the resources, including threads, consumed - # > when executing a collection of tasks. Each ThreadPoolExecutor also - # > maintains some basic statistics, such as the number of completed tasks. - # > - # > To be useful across a wide range of contexts, this class provides many - # > adjustable parameters and extensibility hooks. However, programmers are - # > urged to use the more convenient Executors factory methods - # > [CachedThreadPool] (unbounded thread pool, with automatic thread reclamation), - # > [FixedThreadPool] (fixed size thread pool) and [SingleThreadExecutor] (single - # > background thread), that preconfigure settings for the most common usage - # > scenarios. - # - # @!macro thread_pool_options - # - # @!macro thread_pool_executor_public_api - class ThreadPoolExecutor < ThreadPoolExecutorImplementation - - # @!macro thread_pool_executor_method_initialize - # - # Create a new thread pool. - # - # @param [Hash] opts the options which configure the thread pool. - # - # @option opts [Integer] :max_threads (DEFAULT_MAX_POOL_SIZE) the maximum - # number of threads to be created - # @option opts [Integer] :min_threads (DEFAULT_MIN_POOL_SIZE) When a new task is submitted - # and fewer than `min_threads` are running, a new thread is created - # @option opts [Integer] :idletime (DEFAULT_THREAD_IDLETIMEOUT) the maximum - # number of seconds a thread may be idle before being reclaimed - # @option opts [Integer] :max_queue (DEFAULT_MAX_QUEUE_SIZE) the maximum - # number of tasks allowed in the work queue at any one time; a value of - # zero means the queue may grow without bound - # @option opts [Symbol] :fallback_policy (:abort) the policy for handling new - # tasks that are received when the queue size has reached - # `max_queue` or the executor has shut down - # @option opts [Boolean] :synchronous (DEFAULT_SYNCHRONOUS) whether or not a value of 0 - # for :max_queue means the queue must perform direct hand-off rather than unbounded. - # @raise [ArgumentError] if `:max_threads` is less than one - # @raise [ArgumentError] if `:min_threads` is less than zero - # @raise [ArgumentError] if `:fallback_policy` is not one of the values specified - # in `FALLBACK_POLICIES` - # - # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.html - - # @!method initialize(opts = {}) - # @!macro thread_pool_executor_method_initialize - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/timer_set.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/timer_set.rb deleted file mode 100644 index 0dfaf1288c190..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executor/timer_set.rb +++ /dev/null @@ -1,172 +0,0 @@ -require 'concurrent/scheduled_task' -require 'concurrent/atomic/event' -require 'concurrent/collection/non_concurrent_priority_queue' -require 'concurrent/executor/executor_service' -require 'concurrent/executor/single_thread_executor' - -require 'concurrent/options' - -module Concurrent - - # Executes a collection of tasks, each after a given delay. A master task - # monitors the set and schedules each task for execution at the appropriate - # time. Tasks are run on the global thread pool or on the supplied executor. - # Each task is represented as a `ScheduledTask`. - # - # @see Concurrent::ScheduledTask - # - # @!macro monotonic_clock_warning - class TimerSet < RubyExecutorService - - # Create a new set of timed tasks. - # - # @!macro executor_options - # - # @param [Hash] opts the options used to specify the executor on which to perform actions - # @option opts [Executor] :executor when set use the given `Executor` instance. - # Three special values are also supported: `:task` returns the global task pool, - # `:operation` returns the global operation pool, and `:immediate` returns a new - # `ImmediateExecutor` object. - def initialize(opts = {}) - super(opts) - end - - # Post a task to be execute run after a given delay (in seconds). If the - # delay is less than 1/100th of a second the task will be immediately post - # to the executor. - # - # @param [Float] delay the number of seconds to wait for before executing the task. - # @param [Array] args the arguments passed to the task on execution. - # - # @yield the task to be performed. - # - # @return [Concurrent::ScheduledTask, false] IVar representing the task if the post - # is successful; false after shutdown. - # - # @raise [ArgumentError] if the intended execution time is not in the future. - # @raise [ArgumentError] if no block is given. - def post(delay, *args, &task) - raise ArgumentError.new('no block given') unless block_given? - return false unless running? - opts = { executor: @task_executor, - args: args, - timer_set: self } - task = ScheduledTask.execute(delay, opts, &task) # may raise exception - task.unscheduled? ? false : task - end - - # Begin an immediate shutdown. In-progress tasks will be allowed to - # complete but enqueued tasks will be dismissed and no new tasks - # will be accepted. Has no additional effect if the thread pool is - # not running. - def kill - shutdown - end - - private :<< - - private - - # Initialize the object. - # - # @param [Hash] opts the options to create the object with. - # @!visibility private - def ns_initialize(opts) - @queue = Collection::NonConcurrentPriorityQueue.new(order: :min) - @task_executor = Options.executor_from_options(opts) || Concurrent.global_io_executor - @timer_executor = SingleThreadExecutor.new - @condition = Event.new - @ruby_pid = $$ # detects if Ruby has forked - end - - # Post the task to the internal queue. - # - # @note This is intended as a callback method from ScheduledTask - # only. It is not intended to be used directly. Post a task - # by using the `SchedulesTask#execute` method. - # - # @!visibility private - def post_task(task) - synchronize { ns_post_task(task) } - end - - # @!visibility private - def ns_post_task(task) - return false unless ns_running? - ns_reset_if_forked - if (task.initial_delay) <= 0.01 - task.executor.post { task.process_task } - else - @queue.push(task) - # only post the process method when the queue is empty - @timer_executor.post(&method(:process_tasks)) if @queue.length == 1 - @condition.set - end - true - end - - # Remove the given task from the queue. - # - # @note This is intended as a callback method from `ScheduledTask` - # only. It is not intended to be used directly. Cancel a task - # by using the `ScheduledTask#cancel` method. - # - # @!visibility private - def remove_task(task) - synchronize { @queue.delete(task) } - end - - # `ExecutorService` callback called during shutdown. - # - # @!visibility private - def ns_shutdown_execution - ns_reset_if_forked - @queue.clear - @timer_executor.kill - stopped_event.set - end - - def ns_reset_if_forked - if $$ != @ruby_pid - @queue.clear - @condition.reset - @ruby_pid = $$ - end - end - - # Run a loop and execute tasks in the scheduled order and at the approximate - # scheduled time. If no tasks remain the thread will exit gracefully so that - # garbage collection can occur. If there are no ready tasks it will sleep - # for up to 60 seconds waiting for the next scheduled task. - # - # @!visibility private - def process_tasks - loop do - task = synchronize { @condition.reset; @queue.peek } - break unless task - - now = Concurrent.monotonic_time - diff = task.schedule_time - now - - if diff <= 0 - # We need to remove the task from the queue before passing - # it to the executor, to avoid race conditions where we pass - # the peek'ed task to the executor and then pop a different - # one that's been added in the meantime. - # - # Note that there's no race condition between the peek and - # this pop - this pop could retrieve a different task from - # the peek, but that task would be due to fire now anyway - # (because @queue is a priority queue, and this thread is - # the only reader, so whatever timer is at the head of the - # queue now must have the same pop time, or a closer one, as - # when we peeked). - task = synchronize { @queue.pop } - task.executor.post { task.process_task } - else - @condition.wait([diff, 60].min) - end - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executors.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executors.rb deleted file mode 100644 index eb1972ce697a9..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/executors.rb +++ /dev/null @@ -1,20 +0,0 @@ -require 'concurrent/executor/abstract_executor_service' -require 'concurrent/executor/cached_thread_pool' -require 'concurrent/executor/executor_service' -require 'concurrent/executor/fixed_thread_pool' -require 'concurrent/executor/immediate_executor' -require 'concurrent/executor/indirect_immediate_executor' -require 'concurrent/executor/java_executor_service' -require 'concurrent/executor/java_single_thread_executor' -require 'concurrent/executor/java_thread_pool_executor' -require 'concurrent/executor/ruby_executor_service' -require 'concurrent/executor/ruby_single_thread_executor' -require 'concurrent/executor/ruby_thread_pool_executor' -require 'concurrent/executor/cached_thread_pool' -require 'concurrent/executor/safe_task_executor' -require 'concurrent/executor/serial_executor_service' -require 'concurrent/executor/serialized_execution' -require 'concurrent/executor/serialized_execution_delegator' -require 'concurrent/executor/single_thread_executor' -require 'concurrent/executor/thread_pool_executor' -require 'concurrent/executor/timer_set' diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/future.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/future.rb deleted file mode 100644 index 1af182ecb2805..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/future.rb +++ /dev/null @@ -1,141 +0,0 @@ -require 'thread' -require 'concurrent/constants' -require 'concurrent/errors' -require 'concurrent/ivar' -require 'concurrent/executor/safe_task_executor' - -require 'concurrent/options' - -# TODO (pitr-ch 14-Mar-2017): deprecate, Future, Promise, etc. - - -module Concurrent - - # {include:file:docs-source/future.md} - # - # @!macro copy_options - # - # @see http://ruby-doc.org/stdlib-2.1.1/libdoc/observer/rdoc/Observable.html Ruby Observable module - # @see http://clojuredocs.org/clojure_core/clojure.core/future Clojure's future function - # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Future.html java.util.concurrent.Future - class Future < IVar - - # Create a new `Future` in the `:unscheduled` state. - # - # @yield the asynchronous operation to perform - # - # @!macro executor_and_deref_options - # - # @option opts [object, Array] :args zero or more arguments to be passed the task - # block on execution - # - # @raise [ArgumentError] if no block is given - def initialize(opts = {}, &block) - raise ArgumentError.new('no block given') unless block_given? - super(NULL, opts.merge(__task_from_block__: block), &nil) - end - - # Execute an `:unscheduled` `Future`. Immediately sets the state to `:pending` and - # passes the block to a new thread/thread pool for eventual execution. - # Does nothing if the `Future` is in any state other than `:unscheduled`. - # - # @return [Future] a reference to `self` - # - # @example Instance and execute in separate steps - # future = Concurrent::Future.new{ sleep(1); 42 } - # future.state #=> :unscheduled - # future.execute - # future.state #=> :pending - # - # @example Instance and execute in one line - # future = Concurrent::Future.new{ sleep(1); 42 }.execute - # future.state #=> :pending - def execute - if compare_and_set_state(:pending, :unscheduled) - @executor.post{ safe_execute(@task, @args) } - self - end - end - - # Create a new `Future` object with the given block, execute it, and return the - # `:pending` object. - # - # @yield the asynchronous operation to perform - # - # @!macro executor_and_deref_options - # - # @option opts [object, Array] :args zero or more arguments to be passed the task - # block on execution - # - # @raise [ArgumentError] if no block is given - # - # @return [Future] the newly created `Future` in the `:pending` state - # - # @example - # future = Concurrent::Future.execute{ sleep(1); 42 } - # future.state #=> :pending - def self.execute(opts = {}, &block) - Future.new(opts, &block).execute - end - - # @!macro ivar_set_method - def set(value = NULL, &block) - check_for_block_or_value!(block_given?, value) - synchronize do - if @state != :unscheduled - raise MultipleAssignmentError - else - @task = block || Proc.new { value } - end - end - execute - end - - # Attempt to cancel the operation if it has not already processed. - # The operation can only be cancelled while still `pending`. It cannot - # be cancelled once it has begun processing or has completed. - # - # @return [Boolean] was the operation successfully cancelled. - def cancel - if compare_and_set_state(:cancelled, :pending) - complete(false, nil, CancelledOperationError.new) - true - else - false - end - end - - # Has the operation been successfully cancelled? - # - # @return [Boolean] - def cancelled? - state == :cancelled - end - - # Wait the given number of seconds for the operation to complete. - # On timeout attempt to cancel the operation. - # - # @param [Numeric] timeout the maximum time in seconds to wait. - # @return [Boolean] true if the operation completed before the timeout - # else false - def wait_or_cancel(timeout) - wait(timeout) - if complete? - true - else - cancel - false - end - end - - protected - - def ns_initialize(value, opts) - super - @state = :unscheduled - @task = opts[:__task_from_block__] - @executor = Options.executor_from_options(opts) || Concurrent.global_io_executor - @args = get_arguments_from(opts) - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/hash.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/hash.rb deleted file mode 100644 index 7902fe9d29609..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/hash.rb +++ /dev/null @@ -1,50 +0,0 @@ -require 'concurrent/utility/engine' -require 'concurrent/thread_safe/util' - -module Concurrent - - # @!macro concurrent_hash - # - # A thread-safe subclass of Hash. This version locks against the object - # itself for every method call, ensuring only one thread can be reading - # or writing at a time. This includes iteration methods like `#each`, - # which takes the lock repeatedly when reading an item. - # - # @see http://ruby-doc.org/core/Hash.html Ruby standard library `Hash` - - # @!macro internal_implementation_note - HashImplementation = case - when Concurrent.on_cruby? - # Hash is thread-safe in practice because CRuby runs - # threads one at a time and does not do context - # switching during the execution of C functions. - ::Hash - - when Concurrent.on_jruby? - require 'jruby/synchronized' - - class JRubyHash < ::Hash - include JRuby::Synchronized - end - JRubyHash - - when Concurrent.on_truffleruby? - require 'concurrent/thread_safe/util/data_structures' - - class TruffleRubyHash < ::Hash - end - - ThreadSafe::Util.make_synchronized_on_truffleruby TruffleRubyHash - TruffleRubyHash - - else - warn 'Possibly unsupported Ruby implementation' - ::Hash - end - private_constant :HashImplementation - - # @!macro concurrent_hash - class Hash < HashImplementation - end - -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/immutable_struct.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/immutable_struct.rb deleted file mode 100644 index 48462e8375b87..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/immutable_struct.rb +++ /dev/null @@ -1,101 +0,0 @@ -require 'concurrent/synchronization/abstract_struct' -require 'concurrent/synchronization/lockable_object' - -module Concurrent - - # A thread-safe, immutable variation of Ruby's standard `Struct`. - # - # @see http://ruby-doc.org/core/Struct.html Ruby standard library `Struct` - module ImmutableStruct - include Synchronization::AbstractStruct - - def self.included(base) - base.safe_initialization! - end - - # @!macro struct_values - def values - ns_values - end - - alias_method :to_a, :values - - # @!macro struct_values_at - def values_at(*indexes) - ns_values_at(indexes) - end - - # @!macro struct_inspect - def inspect - ns_inspect - end - - alias_method :to_s, :inspect - - # @!macro struct_merge - def merge(other, &block) - ns_merge(other, &block) - end - - # @!macro struct_to_h - def to_h - ns_to_h - end - - # @!macro struct_get - def [](member) - ns_get(member) - end - - # @!macro struct_equality - def ==(other) - ns_equality(other) - end - - # @!macro struct_each - def each(&block) - return enum_for(:each) unless block_given? - ns_each(&block) - end - - # @!macro struct_each_pair - def each_pair(&block) - return enum_for(:each_pair) unless block_given? - ns_each_pair(&block) - end - - # @!macro struct_select - def select(&block) - return enum_for(:select) unless block_given? - ns_select(&block) - end - - private - - # @!visibility private - def initialize_copy(original) - super(original) - ns_initialize_copy - end - - # @!macro struct_new - def self.new(*args, &block) - clazz_name = nil - if args.length == 0 - raise ArgumentError.new('wrong number of arguments (0 for 1+)') - elsif args.length > 0 && args.first.is_a?(String) - clazz_name = args.shift - end - FACTORY.define_struct(clazz_name, args, &block) - end - - FACTORY = Class.new(Synchronization::LockableObject) do - def define_struct(name, members, &block) - synchronize do - Synchronization::AbstractStruct.define_struct_class(ImmutableStruct, Synchronization::Object, name, members, &block) - end - end - end.new - private_constant :FACTORY - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/ivar.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/ivar.rb deleted file mode 100644 index 4165038f8945b..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/ivar.rb +++ /dev/null @@ -1,208 +0,0 @@ -require 'concurrent/constants' -require 'concurrent/errors' -require 'concurrent/collection/copy_on_write_observer_set' -require 'concurrent/concern/obligation' -require 'concurrent/concern/observable' -require 'concurrent/executor/safe_task_executor' -require 'concurrent/synchronization/lockable_object' - -module Concurrent - - # An `IVar` is like a future that you can assign. As a future is a value that - # is being computed that you can wait on, an `IVar` is a value that is waiting - # to be assigned, that you can wait on. `IVars` are single assignment and - # deterministic. - # - # Then, express futures as an asynchronous computation that assigns an `IVar`. - # The `IVar` becomes the primitive on which [futures](Future) and - # [dataflow](Dataflow) are built. - # - # An `IVar` is a single-element container that is normally created empty, and - # can only be set once. The I in `IVar` stands for immutable. Reading an - # `IVar` normally blocks until it is set. It is safe to set and read an `IVar` - # from different threads. - # - # If you want to have some parallel task set the value in an `IVar`, you want - # a `Future`. If you want to create a graph of parallel tasks all executed - # when the values they depend on are ready you want `dataflow`. `IVar` is - # generally a low-level primitive. - # - # ## Examples - # - # Create, set and get an `IVar` - # - # ```ruby - # ivar = Concurrent::IVar.new - # ivar.set 14 - # ivar.value #=> 14 - # ivar.set 2 # would now be an error - # ``` - # - # ## See Also - # - # 1. For the theory: Arvind, R. Nikhil, and K. Pingali. - # [I-Structures: Data structures for parallel computing](http://dl.acm.org/citation.cfm?id=69562). - # In Proceedings of Workshop on Graph Reduction, 1986. - # 2. For recent application: - # [DataDrivenFuture in Habanero Java from Rice](http://www.cs.rice.edu/~vs3/hjlib/doc/edu/rice/hj/api/HjDataDrivenFuture.html). - class IVar < Synchronization::LockableObject - include Concern::Obligation - include Concern::Observable - - # Create a new `IVar` in the `:pending` state with the (optional) initial value. - # - # @param [Object] value the initial value - # @param [Hash] opts the options to create a message with - # @option opts [String] :dup_on_deref (false) call `#dup` before returning - # the data - # @option opts [String] :freeze_on_deref (false) call `#freeze` before - # returning the data - # @option opts [String] :copy_on_deref (nil) call the given `Proc` passing - # the internal value and returning the value returned from the proc - def initialize(value = NULL, opts = {}, &block) - if value != NULL && block_given? - raise ArgumentError.new('provide only a value or a block') - end - super(&nil) - synchronize { ns_initialize(value, opts, &block) } - end - - # Add an observer on this object that will receive notification on update. - # - # Upon completion the `IVar` will notify all observers in a thread-safe way. - # The `func` method of the observer will be called with three arguments: the - # `Time` at which the `Future` completed the asynchronous operation, the - # final `value` (or `nil` on rejection), and the final `reason` (or `nil` on - # fulfillment). - # - # @param [Object] observer the object that will be notified of changes - # @param [Symbol] func symbol naming the method to call when this - # `Observable` has changes` - def add_observer(observer = nil, func = :update, &block) - raise ArgumentError.new('cannot provide both an observer and a block') if observer && block - direct_notification = false - - if block - observer = block - func = :call - end - - synchronize do - if event.set? - direct_notification = true - else - observers.add_observer(observer, func) - end - end - - observer.send(func, Time.now, self.value, reason) if direct_notification - observer - end - - # @!macro ivar_set_method - # Set the `IVar` to a value and wake or notify all threads waiting on it. - # - # @!macro ivar_set_parameters_and_exceptions - # @param [Object] value the value to store in the `IVar` - # @yield A block operation to use for setting the value - # @raise [ArgumentError] if both a value and a block are given - # @raise [Concurrent::MultipleAssignmentError] if the `IVar` has already - # been set or otherwise completed - # - # @return [IVar] self - def set(value = NULL) - check_for_block_or_value!(block_given?, value) - raise MultipleAssignmentError unless compare_and_set_state(:processing, :pending) - - begin - value = yield if block_given? - complete_without_notification(true, value, nil) - rescue => ex - complete_without_notification(false, nil, ex) - end - - notify_observers(self.value, reason) - self - end - - # @!macro ivar_fail_method - # Set the `IVar` to failed due to some error and wake or notify all threads waiting on it. - # - # @param [Object] reason for the failure - # @raise [Concurrent::MultipleAssignmentError] if the `IVar` has already - # been set or otherwise completed - # @return [IVar] self - def fail(reason = StandardError.new) - complete(false, nil, reason) - end - - # Attempt to set the `IVar` with the given value or block. Return a - # boolean indicating the success or failure of the set operation. - # - # @!macro ivar_set_parameters_and_exceptions - # - # @return [Boolean] true if the value was set else false - def try_set(value = NULL, &block) - set(value, &block) - true - rescue MultipleAssignmentError - false - end - - protected - - # @!visibility private - def ns_initialize(value, opts) - value = yield if block_given? - init_obligation - self.observers = Collection::CopyOnWriteObserverSet.new - set_deref_options(opts) - - @state = :pending - if value != NULL - ns_complete_without_notification(true, value, nil) - end - end - - # @!visibility private - def safe_execute(task, args = []) - if compare_and_set_state(:processing, :pending) - success, val, reason = SafeTaskExecutor.new(task, rescue_exception: true).execute(*@args) - complete(success, val, reason) - yield(success, val, reason) if block_given? - end - end - - # @!visibility private - def complete(success, value, reason) - complete_without_notification(success, value, reason) - notify_observers(self.value, reason) - self - end - - # @!visibility private - def complete_without_notification(success, value, reason) - synchronize { ns_complete_without_notification(success, value, reason) } - self - end - - # @!visibility private - def notify_observers(value, reason) - observers.notify_and_delete_observers{ [Time.now, value, reason] } - end - - # @!visibility private - def ns_complete_without_notification(success, value, reason) - raise MultipleAssignmentError if [:fulfilled, :rejected].include? @state - set_state(success, value, reason) - event.set - end - - # @!visibility private - def check_for_block_or_value!(block_given, value) # :nodoc: - if (block_given && value != NULL) || (! block_given && value == NULL) - raise ArgumentError.new('must set with either a value or a block') - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/map.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/map.rb deleted file mode 100644 index 1b22241954969..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/map.rb +++ /dev/null @@ -1,350 +0,0 @@ -require 'thread' -require 'concurrent/constants' -require 'concurrent/utility/engine' - -module Concurrent - # @!visibility private - module Collection - - # @!visibility private - MapImplementation = case - when Concurrent.on_jruby? - require 'concurrent/utility/native_extension_loader' - # noinspection RubyResolve - JRubyMapBackend - when Concurrent.on_cruby? - require 'concurrent/collection/map/mri_map_backend' - MriMapBackend - when Concurrent.on_truffleruby? - if defined?(::TruffleRuby::ConcurrentMap) - require 'concurrent/collection/map/truffleruby_map_backend' - TruffleRubyMapBackend - else - require 'concurrent/collection/map/atomic_reference_map_backend' - AtomicReferenceMapBackend - end - else - warn 'Concurrent::Map: unsupported Ruby engine, using a fully synchronized Concurrent::Map implementation' - require 'concurrent/collection/map/synchronized_map_backend' - SynchronizedMapBackend - end - end - - # `Concurrent::Map` is a hash-like object and should have much better performance - # characteristics, especially under high concurrency, than `Concurrent::Hash`. - # However, `Concurrent::Map `is not strictly semantically equivalent to a ruby `Hash` - # -- for instance, it does not necessarily retain ordering by insertion time as `Hash` - # does. For most uses it should do fine though, and we recommend you consider - # `Concurrent::Map` instead of `Concurrent::Hash` for your concurrency-safe hash needs. - class Map < Collection::MapImplementation - - # @!macro map.atomic_method - # This method is atomic. - - # @!macro map.atomic_method_with_block - # This method is atomic. - # @note Atomic methods taking a block do not allow the `self` instance - # to be used within the block. Doing so will cause a deadlock. - - # @!method []=(key, value) - # Set a value with key - # @param [Object] key - # @param [Object] value - # @return [Object] the new value - - # @!method compute_if_absent(key) - # Compute and store new value for key if the key is absent. - # @param [Object] key - # @yield new value - # @yieldreturn [Object] new value - # @return [Object] new value or current value - # @!macro map.atomic_method_with_block - - # @!method compute_if_present(key) - # Compute and store new value for key if the key is present. - # @param [Object] key - # @yield new value - # @yieldparam old_value [Object] - # @yieldreturn [Object, nil] new value, when nil the key is removed - # @return [Object, nil] new value or nil - # @!macro map.atomic_method_with_block - - # @!method compute(key) - # Compute and store new value for key. - # @param [Object] key - # @yield compute new value from old one - # @yieldparam old_value [Object, nil] old_value, or nil when key is absent - # @yieldreturn [Object, nil] new value, when nil the key is removed - # @return [Object, nil] new value or nil - # @!macro map.atomic_method_with_block - - # @!method merge_pair(key, value) - # If the key is absent, the value is stored, otherwise new value is - # computed with a block. - # @param [Object] key - # @param [Object] value - # @yield compute new value from old one - # @yieldparam old_value [Object] old value - # @yieldreturn [Object, nil] new value, when nil the key is removed - # @return [Object, nil] new value or nil - # @!macro map.atomic_method_with_block - - # @!method replace_pair(key, old_value, new_value) - # Replaces old_value with new_value if key exists and current value - # matches old_value - # @param [Object] key - # @param [Object] old_value - # @param [Object] new_value - # @return [true, false] true if replaced - # @!macro map.atomic_method - - # @!method replace_if_exists(key, new_value) - # Replaces current value with new_value if key exists - # @param [Object] key - # @param [Object] new_value - # @return [Object, nil] old value or nil - # @!macro map.atomic_method - - # @!method get_and_set(key, value) - # Get the current value under key and set new value. - # @param [Object] key - # @param [Object] value - # @return [Object, nil] old value or nil when the key was absent - # @!macro map.atomic_method - - # @!method delete(key) - # Delete key and its value. - # @param [Object] key - # @return [Object, nil] old value or nil when the key was absent - # @!macro map.atomic_method - - # @!method delete_pair(key, value) - # Delete pair and its value if current value equals the provided value. - # @param [Object] key - # @param [Object] value - # @return [true, false] true if deleted - # @!macro map.atomic_method - - # NonConcurrentMapBackend handles default_proc natively - unless defined?(Collection::NonConcurrentMapBackend) and self < Collection::NonConcurrentMapBackend - - # @param [Hash, nil] options options to set the :initial_capacity or :load_factor. Ignored on some Rubies. - # @param [Proc] default_proc Optional block to compute the default value if the key is not set, like `Hash#default_proc` - def initialize(options = nil, &default_proc) - if options.kind_of?(::Hash) - validate_options_hash!(options) - else - options = nil - end - - super(options) - @default_proc = default_proc - end - - # Get a value with key - # @param [Object] key - # @return [Object] the value - def [](key) - if value = super # non-falsy value is an existing mapping, return it right away - value - # re-check is done with get_or_default(key, NULL) instead of a simple !key?(key) in order to avoid a race condition, whereby by the time the current thread gets to the key?(key) call - # a key => value mapping might have already been created by a different thread (key?(key) would then return true, this elsif branch wouldn't be taken and an incorrent +nil+ value - # would be returned) - # note: nil == value check is not technically necessary - elsif @default_proc && nil == value && NULL == (value = get_or_default(key, NULL)) - @default_proc.call(self, key) - else - value - end - end - end - - alias_method :get, :[] - alias_method :put, :[]= - - # Get a value with key, or default_value when key is absent, - # or fail when no default_value is given. - # @param [Object] key - # @param [Object] default_value - # @yield default value for a key - # @yieldparam key [Object] - # @yieldreturn [Object] default value - # @return [Object] the value or default value - # @raise [KeyError] when key is missing and no default_value is provided - # @!macro map_method_not_atomic - # @note The "fetch-then-act" methods of `Map` are not atomic. `Map` is intended - # to be use as a concurrency primitive with strong happens-before - # guarantees. It is not intended to be used as a high-level abstraction - # supporting complex operations. All read and write operations are - # thread safe, but no guarantees are made regarding race conditions - # between the fetch operation and yielding to the block. Additionally, - # this method does not support recursion. This is due to internal - # constraints that are very unlikely to change in the near future. - def fetch(key, default_value = NULL) - if NULL != (value = get_or_default(key, NULL)) - value - elsif block_given? - yield key - elsif NULL != default_value - default_value - else - raise_fetch_no_key - end - end - - # Fetch value with key, or store default value when key is absent, - # or fail when no default_value is given. This is a two step operation, - # therefore not atomic. The store can overwrite other concurrently - # stored value. - # @param [Object] key - # @param [Object] default_value - # @yield default value for a key - # @yieldparam key [Object] - # @yieldreturn [Object] default value - # @return [Object] the value or default value - def fetch_or_store(key, default_value = NULL) - fetch(key) do - put(key, block_given? ? yield(key) : (NULL == default_value ? raise_fetch_no_key : default_value)) - end - end - - # Insert value into map with key if key is absent in one atomic step. - # @param [Object] key - # @param [Object] value - # @return [Object, nil] the previous value when key was present or nil when there was no key - def put_if_absent(key, value) - computed = false - result = compute_if_absent(key) do - computed = true - value - end - computed ? nil : result - end unless method_defined?(:put_if_absent) - - # Is the value stored in the map. Iterates over all values. - # @param [Object] value - # @return [true, false] - def value?(value) - each_value do |v| - return true if value.equal?(v) - end - false - end - - # All keys - # @return [::Array] keys - def keys - arr = [] - each_pair { |k, v| arr << k } - arr - end unless method_defined?(:keys) - - # All values - # @return [::Array] values - def values - arr = [] - each_pair { |k, v| arr << v } - arr - end unless method_defined?(:values) - - # Iterates over each key. - # @yield for each key in the map - # @yieldparam key [Object] - # @return [self] - # @!macro map.atomic_method_with_block - def each_key - each_pair { |k, v| yield k } - end unless method_defined?(:each_key) - - # Iterates over each value. - # @yield for each value in the map - # @yieldparam value [Object] - # @return [self] - # @!macro map.atomic_method_with_block - def each_value - each_pair { |k, v| yield v } - end unless method_defined?(:each_value) - - # Iterates over each key value pair. - # @yield for each key value pair in the map - # @yieldparam key [Object] - # @yieldparam value [Object] - # @return [self] - # @!macro map.atomic_method_with_block - def each_pair - return enum_for :each_pair unless block_given? - super - end - - alias_method :each, :each_pair unless method_defined?(:each) - - # Find key of a value. - # @param [Object] value - # @return [Object, nil] key or nil when not found - def key(value) - each_pair { |k, v| return k if v == value } - nil - end unless method_defined?(:key) - - # Is map empty? - # @return [true, false] - def empty? - each_pair { |k, v| return false } - true - end unless method_defined?(:empty?) - - # The size of map. - # @return [Integer] size - def size - count = 0 - each_pair { |k, v| count += 1 } - count - end unless method_defined?(:size) - - # @!visibility private - def marshal_dump - raise TypeError, "can't dump hash with default proc" if @default_proc - h = {} - each_pair { |k, v| h[k] = v } - h - end - - # @!visibility private - def marshal_load(hash) - initialize - populate_from(hash) - end - - undef :freeze - - # @!visibility private - def inspect - format '%s entries=%d default_proc=%s>', to_s[0..-2], size.to_s, @default_proc.inspect - end - - private - - def raise_fetch_no_key - raise KeyError, 'key not found' - end - - def initialize_copy(other) - super - populate_from(other) - end - - def populate_from(hash) - hash.each_pair { |k, v| self[k] = v } - self - end - - def validate_options_hash!(options) - if (initial_capacity = options[:initial_capacity]) && (!initial_capacity.kind_of?(Integer) || initial_capacity < 0) - raise ArgumentError, ":initial_capacity must be a positive Integer" - end - if (load_factor = options[:load_factor]) && (!load_factor.kind_of?(Numeric) || load_factor <= 0 || load_factor > 1) - raise ArgumentError, ":load_factor must be a number between 0 and 1" - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/maybe.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/maybe.rb deleted file mode 100644 index 317c82b86fa2a..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/maybe.rb +++ /dev/null @@ -1,229 +0,0 @@ -require 'concurrent/synchronization/object' - -module Concurrent - - # A `Maybe` encapsulates an optional value. A `Maybe` either contains a value - # of (represented as `Just`), or it is empty (represented as `Nothing`). Using - # `Maybe` is a good way to deal with errors or exceptional cases without - # resorting to drastic measures such as exceptions. - # - # `Maybe` is a replacement for the use of `nil` with better type checking. - # - # For compatibility with {Concurrent::Concern::Obligation} the predicate and - # accessor methods are aliased as `fulfilled?`, `rejected?`, `value`, and - # `reason`. - # - # ## Motivation - # - # A common pattern in languages with pattern matching, such as Erlang and - # Haskell, is to return *either* a value *or* an error from a function - # Consider this Erlang code: - # - # ```erlang - # case file:consult("data.dat") of - # {ok, Terms} -> do_something_useful(Terms); - # {error, Reason} -> lager:error(Reason) - # end. - # ``` - # - # In this example the standard library function `file:consult` returns a - # [tuple](http://erlang.org/doc/reference_manual/data_types.html#id69044) - # with two elements: an [atom](http://erlang.org/doc/reference_manual/data_types.html#id64134) - # (similar to a ruby symbol) and a variable containing ancillary data. On - # success it returns the atom `ok` and the data from the file. On failure it - # returns `error` and a string with an explanation of the problem. With this - # pattern there is no ambiguity regarding success or failure. If the file is - # empty the return value cannot be misinterpreted as an error. And when an - # error occurs the return value provides useful information. - # - # In Ruby we tend to return `nil` when an error occurs or else we raise an - # exception. Both of these idioms are problematic. Returning `nil` is - # ambiguous because `nil` may also be a valid value. It also lacks - # information pertaining to the nature of the error. Raising an exception - # is both expensive and usurps the normal flow of control. All of these - # problems can be solved with the use of a `Maybe`. - # - # A `Maybe` is unambiguous with regard to whether or not it contains a value. - # When `Just` it contains a value, when `Nothing` it does not. When `Just` - # the value it contains may be `nil`, which is perfectly valid. When - # `Nothing` the reason for the lack of a value is contained as well. The - # previous Erlang example can be duplicated in Ruby in a principled way by - # having functions return `Maybe` objects: - # - # ```ruby - # result = MyFileUtils.consult("data.dat") # returns a Maybe - # if result.just? - # do_something_useful(result.value) # or result.just - # else - # logger.error(result.reason) # or result.nothing - # end - # ``` - # - # @example Returning a Maybe from a Function - # module MyFileUtils - # def self.consult(path) - # file = File.open(path, 'r') - # Concurrent::Maybe.just(file.read) - # rescue => ex - # return Concurrent::Maybe.nothing(ex) - # ensure - # file.close if file - # end - # end - # - # maybe = MyFileUtils.consult('bogus.file') - # maybe.just? #=> false - # maybe.nothing? #=> true - # maybe.reason #=> # - # - # maybe = MyFileUtils.consult('README.md') - # maybe.just? #=> true - # maybe.nothing? #=> false - # maybe.value #=> "# Concurrent Ruby\n[![Gem Version..." - # - # @example Using Maybe with a Block - # result = Concurrent::Maybe.from do - # Client.find(10) # Client is an ActiveRecord model - # end - # - # # -- if the record was found - # result.just? #=> true - # result.value #=> # - # - # # -- if the record was not found - # result.just? #=> false - # result.reason #=> ActiveRecord::RecordNotFound - # - # @example Using Maybe with the Null Object Pattern - # # In a Rails controller... - # result = ClientService.new(10).find # returns a Maybe - # render json: result.or(NullClient.new) - # - # @see https://hackage.haskell.org/package/base-4.2.0.1/docs/Data-Maybe.html Haskell Data.Maybe - # @see https://github.com/purescript/purescript-maybe/blob/master/docs/Data.Maybe.md PureScript Data.Maybe - class Maybe < Synchronization::Object - include Comparable - safe_initialization! - - # Indicates that the given attribute has not been set. - # When `Just` the {#nothing} getter will return `NONE`. - # When `Nothing` the {#just} getter will return `NONE`. - NONE = ::Object.new.freeze - - # The value of a `Maybe` when `Just`. Will be `NONE` when `Nothing`. - attr_reader :just - - # The reason for the `Maybe` when `Nothing`. Will be `NONE` when `Just`. - attr_reader :nothing - - private_class_method :new - - # Create a new `Maybe` using the given block. - # - # Runs the given block passing all function arguments to the block as block - # arguments. If the block runs to completion without raising an exception - # a new `Just` is created with the value set to the return value of the - # block. If the block raises an exception a new `Nothing` is created with - # the reason being set to the raised exception. - # - # @param [Array] args Zero or more arguments to pass to the block. - # @yield The block from which to create a new `Maybe`. - # @yieldparam [Array] args Zero or more block arguments passed as - # arguments to the function. - # - # @return [Maybe] The newly created object. - # - # @raise [ArgumentError] when no block given. - def self.from(*args) - raise ArgumentError.new('no block given') unless block_given? - begin - value = yield(*args) - return new(value, NONE) - rescue => ex - return new(NONE, ex) - end - end - - # Create a new `Just` with the given value. - # - # @param [Object] value The value to set for the new `Maybe` object. - # - # @return [Maybe] The newly created object. - def self.just(value) - return new(value, NONE) - end - - # Create a new `Nothing` with the given (optional) reason. - # - # @param [Exception] error The reason to set for the new `Maybe` object. - # When given a string a new `StandardError` will be created with the - # argument as the message. When no argument is given a new - # `StandardError` with an empty message will be created. - # - # @return [Maybe] The newly created object. - def self.nothing(error = '') - if error.is_a?(Exception) - nothing = error - else - nothing = StandardError.new(error.to_s) - end - return new(NONE, nothing) - end - - # Is this `Maybe` a `Just` (successfully fulfilled with a value)? - # - # @return [Boolean] True if `Just` or false if `Nothing`. - def just? - ! nothing? - end - alias :fulfilled? :just? - - # Is this `Maybe` a `nothing` (rejected with an exception upon fulfillment)? - # - # @return [Boolean] True if `Nothing` or false if `Just`. - def nothing? - @nothing != NONE - end - alias :rejected? :nothing? - - alias :value :just - - alias :reason :nothing - - # Comparison operator. - # - # @return [Integer] 0 if self and other are both `Nothing`; - # -1 if self is `Nothing` and other is `Just`; - # 1 if self is `Just` and other is nothing; - # `self.just <=> other.just` if both self and other are `Just`. - def <=>(other) - if nothing? - other.nothing? ? 0 : -1 - else - other.nothing? ? 1 : just <=> other.just - end - end - - # Return either the value of self or the given default value. - # - # @return [Object] The value of self when `Just`; else the given default. - def or(other) - just? ? just : other - end - - private - - # Create a new `Maybe` with the given attributes. - # - # @param [Object] just The value when `Just` else `NONE`. - # @param [Exception, Object] nothing The exception when `Nothing` else `NONE`. - # - # @return [Maybe] The new `Maybe`. - # - # @!visibility private - def initialize(just, nothing) - @just = just - @nothing = nothing - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/mutable_struct.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/mutable_struct.rb deleted file mode 100644 index 5d0e9b9af59e0..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/mutable_struct.rb +++ /dev/null @@ -1,239 +0,0 @@ -require 'concurrent/synchronization/abstract_struct' -require 'concurrent/synchronization/lockable_object' - -module Concurrent - - # An thread-safe variation of Ruby's standard `Struct`. Values can be set at - # construction or safely changed at any time during the object's lifecycle. - # - # @see http://ruby-doc.org/core/Struct.html Ruby standard library `Struct` - module MutableStruct - include Synchronization::AbstractStruct - - # @!macro struct_new - # - # Factory for creating new struct classes. - # - # ``` - # new([class_name] [, member_name]+>) -> StructClass click to toggle source - # new([class_name] [, member_name]+>) {|StructClass| block } -> StructClass - # new(value, ...) -> obj - # StructClass[value, ...] -> obj - # ``` - # - # The first two forms are used to create a new struct subclass `class_name` - # that can contain a value for each member_name . This subclass can be - # used to create instances of the structure like any other Class . - # - # If the `class_name` is omitted an anonymous struct class will be created. - # Otherwise, the name of this struct will appear as a constant in the struct class, - # so it must be unique for all structs under this base class and must start with a - # capital letter. Assigning a struct class to a constant also gives the class - # the name of the constant. - # - # If a block is given it will be evaluated in the context of `StructClass`, passing - # the created class as a parameter. This is the recommended way to customize a struct. - # Subclassing an anonymous struct creates an extra anonymous class that will never be used. - # - # The last two forms create a new instance of a struct subclass. The number of value - # parameters must be less than or equal to the number of attributes defined for the - # struct. Unset parameters default to nil. Passing more parameters than number of attributes - # will raise an `ArgumentError`. - # - # @see http://ruby-doc.org/core/Struct.html#method-c-new Ruby standard library `Struct#new` - - # @!macro struct_values - # - # Returns the values for this struct as an Array. - # - # @return [Array] the values for this struct - # - def values - synchronize { ns_values } - end - alias_method :to_a, :values - - # @!macro struct_values_at - # - # Returns the struct member values for each selector as an Array. - # - # A selector may be either an Integer offset or a Range of offsets (as in `Array#values_at`). - # - # @param [Fixnum, Range] indexes the index(es) from which to obatin the values (in order) - def values_at(*indexes) - synchronize { ns_values_at(indexes) } - end - - # @!macro struct_inspect - # - # Describe the contents of this struct in a string. - # - # @return [String] the contents of this struct in a string - def inspect - synchronize { ns_inspect } - end - alias_method :to_s, :inspect - - # @!macro struct_merge - # - # Returns a new struct containing the contents of `other` and the contents - # of `self`. If no block is specified, the value for entries with duplicate - # keys will be that of `other`. Otherwise the value for each duplicate key - # is determined by calling the block with the key, its value in `self` and - # its value in `other`. - # - # @param [Hash] other the hash from which to set the new values - # @yield an options block for resolving duplicate keys - # @yieldparam [String, Symbol] member the name of the member which is duplicated - # @yieldparam [Object] selfvalue the value of the member in `self` - # @yieldparam [Object] othervalue the value of the member in `other` - # - # @return [Synchronization::AbstractStruct] a new struct with the new values - # - # @raise [ArgumentError] of given a member that is not defined in the struct - def merge(other, &block) - synchronize { ns_merge(other, &block) } - end - - # @!macro struct_to_h - # - # Returns a hash containing the names and values for the struct’s members. - # - # @return [Hash] the names and values for the struct’s members - def to_h - synchronize { ns_to_h } - end - - # @!macro struct_get - # - # Attribute Reference - # - # @param [Symbol, String, Integer] member the string or symbol name of the member - # for which to obtain the value or the member's index - # - # @return [Object] the value of the given struct member or the member at the given index. - # - # @raise [NameError] if the member does not exist - # @raise [IndexError] if the index is out of range. - def [](member) - synchronize { ns_get(member) } - end - - # @!macro struct_equality - # - # Equality - # - # @return [Boolean] true if other has the same struct subclass and has - # equal member values (according to `Object#==`) - def ==(other) - synchronize { ns_equality(other) } - end - - # @!macro struct_each - # - # Yields the value of each struct member in order. If no block is given - # an enumerator is returned. - # - # @yield the operation to be performed on each struct member - # @yieldparam [Object] value each struct value (in order) - def each(&block) - return enum_for(:each) unless block_given? - synchronize { ns_each(&block) } - end - - # @!macro struct_each_pair - # - # Yields the name and value of each struct member in order. If no block is - # given an enumerator is returned. - # - # @yield the operation to be performed on each struct member/value pair - # @yieldparam [Object] member each struct member (in order) - # @yieldparam [Object] value each struct value (in order) - def each_pair(&block) - return enum_for(:each_pair) unless block_given? - synchronize { ns_each_pair(&block) } - end - - # @!macro struct_select - # - # Yields each member value from the struct to the block and returns an Array - # containing the member values from the struct for which the given block - # returns a true value (equivalent to `Enumerable#select`). - # - # @yield the operation to be performed on each struct member - # @yieldparam [Object] value each struct value (in order) - # - # @return [Array] an array containing each value for which the block returns true - def select(&block) - return enum_for(:select) unless block_given? - synchronize { ns_select(&block) } - end - - # @!macro struct_set - # - # Attribute Assignment - # - # Sets the value of the given struct member or the member at the given index. - # - # @param [Symbol, String, Integer] member the string or symbol name of the member - # for which to obtain the value or the member's index - # - # @return [Object] the value of the given struct member or the member at the given index. - # - # @raise [NameError] if the name does not exist - # @raise [IndexError] if the index is out of range. - def []=(member, value) - if member.is_a? Integer - length = synchronize { @values.length } - if member >= length - raise IndexError.new("offset #{member} too large for struct(size:#{length})") - end - synchronize { @values[member] = value } - else - send("#{member}=", value) - end - rescue NoMethodError - raise NameError.new("no member '#{member}' in struct") - end - - private - - # @!visibility private - def initialize_copy(original) - synchronize do - super(original) - ns_initialize_copy - end - end - - # @!macro struct_new - def self.new(*args, &block) - clazz_name = nil - if args.length == 0 - raise ArgumentError.new('wrong number of arguments (0 for 1+)') - elsif args.length > 0 && args.first.is_a?(String) - clazz_name = args.shift - end - FACTORY.define_struct(clazz_name, args, &block) - end - - FACTORY = Class.new(Synchronization::LockableObject) do - def define_struct(name, members, &block) - synchronize do - clazz = Synchronization::AbstractStruct.define_struct_class(MutableStruct, Synchronization::LockableObject, name, members, &block) - members.each_with_index do |member, index| - clazz.send :remove_method, member - clazz.send(:define_method, member) do - synchronize { @values[index] } - end - clazz.send(:define_method, "#{member}=") do |value| - synchronize { @values[index] = value } - end - end - clazz - end - end - end.new - private_constant :FACTORY - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/mvar.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/mvar.rb deleted file mode 100644 index dfc41950cf85f..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/mvar.rb +++ /dev/null @@ -1,242 +0,0 @@ -require 'concurrent/concern/dereferenceable' -require 'concurrent/synchronization/object' - -module Concurrent - - # An `MVar` is a synchronized single element container. They are empty or - # contain one item. Taking a value from an empty `MVar` blocks, as does - # putting a value into a full one. You can either think of them as blocking - # queue of length one, or a special kind of mutable variable. - # - # On top of the fundamental `#put` and `#take` operations, we also provide a - # `#mutate` that is atomic with respect to operations on the same instance. - # These operations all support timeouts. - # - # We also support non-blocking operations `#try_put!` and `#try_take!`, a - # `#set!` that ignores existing values, a `#value` that returns the value - # without removing it or returns `MVar::EMPTY`, and a `#modify!` that yields - # `MVar::EMPTY` if the `MVar` is empty and can be used to set `MVar::EMPTY`. - # You shouldn't use these operations in the first instance. - # - # `MVar` is a [Dereferenceable](Dereferenceable). - # - # `MVar` is related to M-structures in Id, `MVar` in Haskell and `SyncVar` in Scala. - # - # Note that unlike the original Haskell paper, our `#take` is blocking. This is how - # Haskell and Scala do it today. - # - # @!macro copy_options - # - # ## See Also - # - # 1. P. Barth, R. Nikhil, and Arvind. [M-Structures: Extending a parallel, non- strict, functional language with state](http://dl.acm.org/citation.cfm?id=652538). In Proceedings of the 5th - # ACM Conference on Functional Programming Languages and Computer Architecture (FPCA), 1991. - # - # 2. S. Peyton Jones, A. Gordon, and S. Finne. [Concurrent Haskell](http://dl.acm.org/citation.cfm?id=237794). - # In Proceedings of the 23rd Symposium on Principles of Programming Languages - # (PoPL), 1996. - class MVar < Synchronization::Object - include Concern::Dereferenceable - safe_initialization! - - # Unique value that represents that an `MVar` was empty - EMPTY = ::Object.new - - # Unique value that represents that an `MVar` timed out before it was able - # to produce a value. - TIMEOUT = ::Object.new - - # Create a new `MVar`, either empty or with an initial value. - # - # @param [Hash] opts the options controlling how the future will be processed - # - # @!macro deref_options - def initialize(value = EMPTY, opts = {}) - @value = value - @mutex = Mutex.new - @empty_condition = ConditionVariable.new - @full_condition = ConditionVariable.new - set_deref_options(opts) - end - - # Remove the value from an `MVar`, leaving it empty, and blocking if there - # isn't a value. A timeout can be set to limit the time spent blocked, in - # which case it returns `TIMEOUT` if the time is exceeded. - # @return [Object] the value that was taken, or `TIMEOUT` - def take(timeout = nil) - @mutex.synchronize do - wait_for_full(timeout) - - # If we timed out we'll still be empty - if unlocked_full? - value = @value - @value = EMPTY - @empty_condition.signal - apply_deref_options(value) - else - TIMEOUT - end - end - end - - # acquires lock on the from an `MVAR`, yields the value to provided block, - # and release lock. A timeout can be set to limit the time spent blocked, - # in which case it returns `TIMEOUT` if the time is exceeded. - # @return [Object] the value returned by the block, or `TIMEOUT` - def borrow(timeout = nil) - @mutex.synchronize do - wait_for_full(timeout) - - # if we timeoud out we'll still be empty - if unlocked_full? - yield @value - else - TIMEOUT - end - end - end - - # Put a value into an `MVar`, blocking if there is already a value until - # it is empty. A timeout can be set to limit the time spent blocked, in - # which case it returns `TIMEOUT` if the time is exceeded. - # @return [Object] the value that was put, or `TIMEOUT` - def put(value, timeout = nil) - @mutex.synchronize do - wait_for_empty(timeout) - - # If we timed out we won't be empty - if unlocked_empty? - @value = value - @full_condition.signal - apply_deref_options(value) - else - TIMEOUT - end - end - end - - # Atomically `take`, yield the value to a block for transformation, and then - # `put` the transformed value. Returns the transformed value. A timeout can - # be set to limit the time spent blocked, in which case it returns `TIMEOUT` - # if the time is exceeded. - # @return [Object] the transformed value, or `TIMEOUT` - def modify(timeout = nil) - raise ArgumentError.new('no block given') unless block_given? - - @mutex.synchronize do - wait_for_full(timeout) - - # If we timed out we'll still be empty - if unlocked_full? - value = @value - @value = yield value - @full_condition.signal - apply_deref_options(value) - else - TIMEOUT - end - end - end - - # Non-blocking version of `take`, that returns `EMPTY` instead of blocking. - def try_take! - @mutex.synchronize do - if unlocked_full? - value = @value - @value = EMPTY - @empty_condition.signal - apply_deref_options(value) - else - EMPTY - end - end - end - - # Non-blocking version of `put`, that returns whether or not it was successful. - def try_put!(value) - @mutex.synchronize do - if unlocked_empty? - @value = value - @full_condition.signal - true - else - false - end - end - end - - # Non-blocking version of `put` that will overwrite an existing value. - def set!(value) - @mutex.synchronize do - old_value = @value - @value = value - @full_condition.signal - apply_deref_options(old_value) - end - end - - # Non-blocking version of `modify` that will yield with `EMPTY` if there is no value yet. - def modify! - raise ArgumentError.new('no block given') unless block_given? - - @mutex.synchronize do - value = @value - @value = yield value - if unlocked_empty? - @empty_condition.signal - else - @full_condition.signal - end - apply_deref_options(value) - end - end - - # Returns if the `MVar` is currently empty. - def empty? - @mutex.synchronize { @value == EMPTY } - end - - # Returns if the `MVar` currently contains a value. - def full? - !empty? - end - - protected - - def synchronize(&block) - @mutex.synchronize(&block) - end - - private - - def unlocked_empty? - @value == EMPTY - end - - def unlocked_full? - ! unlocked_empty? - end - - def wait_for_full(timeout) - wait_while(@full_condition, timeout) { unlocked_empty? } - end - - def wait_for_empty(timeout) - wait_while(@empty_condition, timeout) { unlocked_full? } - end - - def wait_while(condition, timeout) - if timeout.nil? - while yield - condition.wait(@mutex) - end - else - stop = Concurrent.monotonic_time + timeout - while yield && timeout > 0.0 - condition.wait(@mutex, timeout) - timeout = stop - Concurrent.monotonic_time - end - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/options.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/options.rb deleted file mode 100644 index bdd22a9df15d4..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/options.rb +++ /dev/null @@ -1,42 +0,0 @@ -require 'concurrent/configuration' - -module Concurrent - - # @!visibility private - module Options - - # Get the requested `Executor` based on the values set in the options hash. - # - # @param [Hash] opts the options defining the requested executor - # @option opts [Executor] :executor when set use the given `Executor` instance. - # Three special values are also supported: `:fast` returns the global fast executor, - # `:io` returns the global io executor, and `:immediate` returns a new - # `ImmediateExecutor` object. - # - # @return [Executor, nil] the requested thread pool, or nil when no option specified - # - # @!visibility private - def self.executor_from_options(opts = {}) # :nodoc: - if identifier = opts.fetch(:executor, nil) - executor(identifier) - else - nil - end - end - - def self.executor(executor_identifier) - case executor_identifier - when :fast - Concurrent.global_fast_executor - when :io - Concurrent.global_io_executor - when :immediate - Concurrent.global_immediate_executor - when Concurrent::ExecutorService - executor_identifier - else - raise ArgumentError, "executor not recognized by '#{executor_identifier}'" - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/promise.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/promise.rb deleted file mode 100644 index ccc47dd628c8d..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/promise.rb +++ /dev/null @@ -1,580 +0,0 @@ -require 'thread' -require 'concurrent/constants' -require 'concurrent/errors' -require 'concurrent/ivar' -require 'concurrent/executor/safe_task_executor' - -require 'concurrent/options' - -module Concurrent - - PromiseExecutionError = Class.new(StandardError) - - # Promises are inspired by the JavaScript [Promises/A](http://wiki.commonjs.org/wiki/Promises/A) - # and [Promises/A+](http://promises-aplus.github.io/promises-spec/) specifications. - # - # > A promise represents the eventual value returned from the single - # > completion of an operation. - # - # Promises are similar to futures and share many of the same behaviours. - # Promises are far more robust, however. Promises can be chained in a tree - # structure where each promise may have zero or more children. Promises are - # chained using the `then` method. The result of a call to `then` is always - # another promise. Promises are resolved asynchronously (with respect to the - # main thread) but in a strict order: parents are guaranteed to be resolved - # before their children, children before their younger siblings. The `then` - # method takes two parameters: an optional block to be executed upon parent - # resolution and an optional callable to be executed upon parent failure. The - # result of each promise is passed to each of its children upon resolution. - # When a promise is rejected all its children will be summarily rejected and - # will receive the reason. - # - # Promises have several possible states: *:unscheduled*, *:pending*, - # *:processing*, *:rejected*, or *:fulfilled*. These are also aggregated as - # `#incomplete?` and `#complete?`. When a Promise is created it is set to - # *:unscheduled*. Once the `#execute` method is called the state becomes - # *:pending*. Once a job is pulled from the thread pool's queue and is given - # to a thread for processing (often immediately upon `#post`) the state - # becomes *:processing*. The future will remain in this state until processing - # is complete. A future that is in the *:unscheduled*, *:pending*, or - # *:processing* is considered `#incomplete?`. A `#complete?` Promise is either - # *:rejected*, indicating that an exception was thrown during processing, or - # *:fulfilled*, indicating success. If a Promise is *:fulfilled* its `#value` - # will be updated to reflect the result of the operation. If *:rejected* the - # `reason` will be updated with a reference to the thrown exception. The - # predicate methods `#unscheduled?`, `#pending?`, `#rejected?`, and - # `#fulfilled?` can be called at any time to obtain the state of the Promise, - # as can the `#state` method, which returns a symbol. - # - # Retrieving the value of a promise is done through the `value` (alias: - # `deref`) method. Obtaining the value of a promise is a potentially blocking - # operation. When a promise is *rejected* a call to `value` will return `nil` - # immediately. When a promise is *fulfilled* a call to `value` will - # immediately return the current value. When a promise is *pending* a call to - # `value` will block until the promise is either *rejected* or *fulfilled*. A - # *timeout* value can be passed to `value` to limit how long the call will - # block. If `nil` the call will block indefinitely. If `0` the call will not - # block. Any other integer or float value will indicate the maximum number of - # seconds to block. - # - # Promises run on the global thread pool. - # - # @!macro copy_options - # - # ### Examples - # - # Start by requiring promises - # - # ```ruby - # require 'concurrent/promise' - # ``` - # - # Then create one - # - # ```ruby - # p = Concurrent::Promise.execute do - # # do something - # 42 - # end - # ``` - # - # Promises can be chained using the `then` method. The `then` method accepts a - # block and an executor, to be executed on fulfillment, and a callable argument to be executed - # on rejection. The result of the each promise is passed as the block argument - # to chained promises. - # - # ```ruby - # p = Concurrent::Promise.new{10}.then{|x| x * 2}.then{|result| result - 10 }.execute - # ``` - # - # And so on, and so on, and so on... - # - # ```ruby - # p = Concurrent::Promise.fulfill(20). - # then{|result| result - 10 }. - # then{|result| result * 3 }. - # then(executor: different_executor){|result| result % 5 }.execute - # ``` - # - # The initial state of a newly created Promise depends on the state of its parent: - # - if parent is *unscheduled* the child will be *unscheduled* - # - if parent is *pending* the child will be *pending* - # - if parent is *fulfilled* the child will be *pending* - # - if parent is *rejected* the child will be *pending* (but will ultimately be *rejected*) - # - # Promises are executed asynchronously from the main thread. By the time a - # child Promise finishes intialization it may be in a different state than its - # parent (by the time a child is created its parent may have completed - # execution and changed state). Despite being asynchronous, however, the order - # of execution of Promise objects in a chain (or tree) is strictly defined. - # - # There are multiple ways to create and execute a new `Promise`. Both ways - # provide identical behavior: - # - # ```ruby - # # create, operate, then execute - # p1 = Concurrent::Promise.new{ "Hello World!" } - # p1.state #=> :unscheduled - # p1.execute - # - # # create and immediately execute - # p2 = Concurrent::Promise.new{ "Hello World!" }.execute - # - # # execute during creation - # p3 = Concurrent::Promise.execute{ "Hello World!" } - # ``` - # - # Once the `execute` method is called a `Promise` becomes `pending`: - # - # ```ruby - # p = Concurrent::Promise.execute{ "Hello, world!" } - # p.state #=> :pending - # p.pending? #=> true - # ``` - # - # Wait a little bit, and the promise will resolve and provide a value: - # - # ```ruby - # p = Concurrent::Promise.execute{ "Hello, world!" } - # sleep(0.1) - # - # p.state #=> :fulfilled - # p.fulfilled? #=> true - # p.value #=> "Hello, world!" - # ``` - # - # If an exception occurs, the promise will be rejected and will provide - # a reason for the rejection: - # - # ```ruby - # p = Concurrent::Promise.execute{ raise StandardError.new("Here comes the Boom!") } - # sleep(0.1) - # - # p.state #=> :rejected - # p.rejected? #=> true - # p.reason #=> "#" - # ``` - # - # #### Rejection - # - # When a promise is rejected all its children will be rejected and will - # receive the rejection `reason` as the rejection callable parameter: - # - # ```ruby - # p = Concurrent::Promise.execute { Thread.pass; raise StandardError } - # - # c1 = p.then(-> reason { 42 }) - # c2 = p.then(-> reason { raise 'Boom!' }) - # - # c1.wait.state #=> :fulfilled - # c1.value #=> 45 - # c2.wait.state #=> :rejected - # c2.reason #=> # - # ``` - # - # Once a promise is rejected it will continue to accept children that will - # receive immediately rejection (they will be executed asynchronously). - # - # #### Aliases - # - # The `then` method is the most generic alias: it accepts a block to be - # executed upon parent fulfillment and a callable to be executed upon parent - # rejection. At least one of them should be passed. The default block is `{ - # |result| result }` that fulfills the child with the parent value. The - # default callable is `{ |reason| raise reason }` that rejects the child with - # the parent reason. - # - # - `on_success { |result| ... }` is the same as `then {|result| ... }` - # - `rescue { |reason| ... }` is the same as `then(Proc.new { |reason| ... } )` - # - `rescue` is aliased by `catch` and `on_error` - class Promise < IVar - - # Initialize a new Promise with the provided options. - # - # @!macro executor_and_deref_options - # - # @!macro promise_init_options - # - # @option opts [Promise] :parent the parent `Promise` when building a chain/tree - # @option opts [Proc] :on_fulfill fulfillment handler - # @option opts [Proc] :on_reject rejection handler - # @option opts [object, Array] :args zero or more arguments to be passed - # the task block on execution - # - # @yield The block operation to be performed asynchronously. - # - # @raise [ArgumentError] if no block is given - # - # @see http://wiki.commonjs.org/wiki/Promises/A - # @see http://promises-aplus.github.io/promises-spec/ - def initialize(opts = {}, &block) - opts.delete_if { |k, v| v.nil? } - super(NULL, opts.merge(__promise_body_from_block__: block), &nil) - end - - # Create a new `Promise` and fulfill it immediately. - # - # @!macro executor_and_deref_options - # - # @!macro promise_init_options - # - # @raise [ArgumentError] if no block is given - # - # @return [Promise] the newly created `Promise` - def self.fulfill(value, opts = {}) - Promise.new(opts).tap { |p| p.send(:synchronized_set_state!, true, value, nil) } - end - - # Create a new `Promise` and reject it immediately. - # - # @!macro executor_and_deref_options - # - # @!macro promise_init_options - # - # @raise [ArgumentError] if no block is given - # - # @return [Promise] the newly created `Promise` - def self.reject(reason, opts = {}) - Promise.new(opts).tap { |p| p.send(:synchronized_set_state!, false, nil, reason) } - end - - # Execute an `:unscheduled` `Promise`. Immediately sets the state to `:pending` and - # passes the block to a new thread/thread pool for eventual execution. - # Does nothing if the `Promise` is in any state other than `:unscheduled`. - # - # @return [Promise] a reference to `self` - def execute - if root? - if compare_and_set_state(:pending, :unscheduled) - set_pending - realize(@promise_body) - end - else - compare_and_set_state(:pending, :unscheduled) - @parent.execute - end - self - end - - # @!macro ivar_set_method - # - # @raise [Concurrent::PromiseExecutionError] if not the root promise - def set(value = NULL, &block) - raise PromiseExecutionError.new('supported only on root promise') unless root? - check_for_block_or_value!(block_given?, value) - synchronize do - if @state != :unscheduled - raise MultipleAssignmentError - else - @promise_body = block || Proc.new { |result| value } - end - end - execute - end - - # @!macro ivar_fail_method - # - # @raise [Concurrent::PromiseExecutionError] if not the root promise - def fail(reason = StandardError.new) - set { raise reason } - end - - # Create a new `Promise` object with the given block, execute it, and return the - # `:pending` object. - # - # @!macro executor_and_deref_options - # - # @!macro promise_init_options - # - # @return [Promise] the newly created `Promise` in the `:pending` state - # - # @raise [ArgumentError] if no block is given - # - # @example - # promise = Concurrent::Promise.execute{ sleep(1); 42 } - # promise.state #=> :pending - def self.execute(opts = {}, &block) - new(opts, &block).execute - end - - # Chain a new promise off the current promise. - # - # @return [Promise] the new promise - # @yield The block operation to be performed asynchronously. - # @overload then(rescuer, executor, &block) - # @param [Proc] rescuer An optional rescue block to be executed if the - # promise is rejected. - # @param [ThreadPool] executor An optional thread pool executor to be used - # in the new Promise - # @overload then(rescuer, executor: executor, &block) - # @param [Proc] rescuer An optional rescue block to be executed if the - # promise is rejected. - # @param [ThreadPool] executor An optional thread pool executor to be used - # in the new Promise - def then(*args, &block) - if args.last.is_a?(::Hash) - executor = args.pop[:executor] - rescuer = args.first - else - rescuer, executor = args - end - - executor ||= @executor - - raise ArgumentError.new('rescuers and block are both missing') if rescuer.nil? && !block_given? - block = Proc.new { |result| result } unless block_given? - child = Promise.new( - parent: self, - executor: executor, - on_fulfill: block, - on_reject: rescuer - ) - - synchronize do - child.state = :pending if @state == :pending - child.on_fulfill(apply_deref_options(@value)) if @state == :fulfilled - child.on_reject(@reason) if @state == :rejected - @children << child - end - - child - end - - # Chain onto this promise an action to be undertaken on success - # (fulfillment). - # - # @yield The block to execute - # - # @return [Promise] self - def on_success(&block) - raise ArgumentError.new('no block given') unless block_given? - self.then(&block) - end - - # Chain onto this promise an action to be undertaken on failure - # (rejection). - # - # @yield The block to execute - # - # @return [Promise] self - def rescue(&block) - self.then(block) - end - - alias_method :catch, :rescue - alias_method :on_error, :rescue - - # Yield the successful result to the block that returns a promise. If that - # promise is also successful the result is the result of the yielded promise. - # If either part fails the whole also fails. - # - # @example - # Promise.execute { 1 }.flat_map { |v| Promise.execute { v + 2 } }.value! #=> 3 - # - # @return [Promise] - def flat_map(&block) - child = Promise.new( - parent: self, - executor: ImmediateExecutor.new, - ) - - on_error { |e| child.on_reject(e) } - on_success do |result1| - begin - inner = block.call(result1) - inner.execute - inner.on_success { |result2| child.on_fulfill(result2) } - inner.on_error { |e| child.on_reject(e) } - rescue => e - child.on_reject(e) - end - end - - child - end - - # Builds a promise that produces the result of promises in an Array - # and fails if any of them fails. - # - # @overload zip(*promises) - # @param [Array] promises - # - # @overload zip(*promises, opts) - # @param [Array] promises - # @param [Hash] opts the configuration options - # @option opts [Executor] :executor (ImmediateExecutor.new) when set use the given `Executor` instance. - # @option opts [Boolean] :execute (true) execute promise before returning - # - # @return [Promise] - def self.zip(*promises) - opts = promises.last.is_a?(::Hash) ? promises.pop.dup : {} - opts[:executor] ||= ImmediateExecutor.new - zero = if !opts.key?(:execute) || opts.delete(:execute) - fulfill([], opts) - else - Promise.new(opts) { [] } - end - - promises.reduce(zero) do |p1, p2| - p1.flat_map do |results| - p2.then do |next_result| - results << next_result - end - end - end - end - - # Builds a promise that produces the result of self and others in an Array - # and fails if any of them fails. - # - # @overload zip(*promises) - # @param [Array] others - # - # @overload zip(*promises, opts) - # @param [Array] others - # @param [Hash] opts the configuration options - # @option opts [Executor] :executor (ImmediateExecutor.new) when set use the given `Executor` instance. - # @option opts [Boolean] :execute (true) execute promise before returning - # - # @return [Promise] - def zip(*others) - self.class.zip(self, *others) - end - - # Aggregates a collection of promises and executes the `then` condition - # if all aggregated promises succeed. Executes the `rescue` handler with - # a `Concurrent::PromiseExecutionError` if any of the aggregated promises - # fail. Upon execution will execute any of the aggregate promises that - # were not already executed. - # - # @!macro promise_self_aggregate - # - # The returned promise will not yet have been executed. Additional `#then` - # and `#rescue` handlers may still be provided. Once the returned promise - # is execute the aggregate promises will be also be executed (if they have - # not been executed already). The results of the aggregate promises will - # be checked upon completion. The necessary `#then` and `#rescue` blocks - # on the aggregating promise will then be executed as appropriate. If the - # `#rescue` handlers are executed the raises exception will be - # `Concurrent::PromiseExecutionError`. - # - # @param [Array] promises Zero or more promises to aggregate - # @return [Promise] an unscheduled (not executed) promise that aggregates - # the promises given as arguments - def self.all?(*promises) - aggregate(:all?, *promises) - end - - # Aggregates a collection of promises and executes the `then` condition - # if any aggregated promises succeed. Executes the `rescue` handler with - # a `Concurrent::PromiseExecutionError` if any of the aggregated promises - # fail. Upon execution will execute any of the aggregate promises that - # were not already executed. - # - # @!macro promise_self_aggregate - def self.any?(*promises) - aggregate(:any?, *promises) - end - - protected - - def ns_initialize(value, opts) - super - - @executor = Options.executor_from_options(opts) || Concurrent.global_io_executor - @args = get_arguments_from(opts) - - @parent = opts.fetch(:parent) { nil } - @on_fulfill = opts.fetch(:on_fulfill) { Proc.new { |result| result } } - @on_reject = opts.fetch(:on_reject) { Proc.new { |reason| raise reason } } - - @promise_body = opts[:__promise_body_from_block__] || Proc.new { |result| result } - @state = :unscheduled - @children = [] - end - - # Aggregate a collection of zero or more promises under a composite promise, - # execute the aggregated promises and collect them into a standard Ruby array, - # call the given Ruby `Ennnumerable` predicate (such as `any?`, `all?`, `none?`, - # or `one?`) on the collection checking for the success or failure of each, - # then executing the composite's `#then` handlers if the predicate returns - # `true` or executing the composite's `#rescue` handlers if the predicate - # returns false. - # - # @!macro promise_self_aggregate - def self.aggregate(method, *promises) - composite = Promise.new do - completed = promises.collect do |promise| - promise.execute if promise.unscheduled? - promise.wait - promise - end - unless completed.empty? || completed.send(method){|promise| promise.fulfilled? } - raise PromiseExecutionError - end - end - composite - end - - # @!visibility private - def set_pending - synchronize do - @state = :pending - @children.each { |c| c.set_pending } - end - end - - # @!visibility private - def root? # :nodoc: - @parent.nil? - end - - # @!visibility private - def on_fulfill(result) - realize Proc.new { @on_fulfill.call(result) } - nil - end - - # @!visibility private - def on_reject(reason) - realize Proc.new { @on_reject.call(reason) } - nil - end - - # @!visibility private - def notify_child(child) - if_state(:fulfilled) { child.on_fulfill(apply_deref_options(@value)) } - if_state(:rejected) { child.on_reject(@reason) } - end - - # @!visibility private - def complete(success, value, reason) - children_to_notify = synchronize do - set_state!(success, value, reason) - @children.dup - end - - children_to_notify.each { |child| notify_child(child) } - observers.notify_and_delete_observers{ [Time.now, self.value, reason] } - end - - # @!visibility private - def realize(task) - @executor.post do - success, value, reason = SafeTaskExecutor.new(task, rescue_exception: true).execute(*@args) - complete(success, value, reason) - end - end - - # @!visibility private - def set_state!(success, value, reason) - set_state(success, value, reason) - event.set - end - - # @!visibility private - def synchronized_set_state!(success, value, reason) - synchronize { set_state!(success, value, reason) } - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/promises.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/promises.rb deleted file mode 100644 index 3cd17055ca250..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/promises.rb +++ /dev/null @@ -1,2168 +0,0 @@ -require 'concurrent/synchronization/object' -require 'concurrent/atomic/atomic_boolean' -require 'concurrent/atomic/atomic_fixnum' -require 'concurrent/collection/lock_free_stack' -require 'concurrent/configuration' -require 'concurrent/errors' -require 'concurrent/re_include' - -module Concurrent - - # {include:file:docs-source/promises-main.md} - module Promises - - # @!macro promises.param.default_executor - # @param [Executor, :io, :fast] default_executor Instance of an executor or a name of the - # global executor. Default executor propagates to chained futures unless overridden with - # executor parameter or changed with {AbstractEventFuture#with_default_executor}. - # - # @!macro promises.param.executor - # @param [Executor, :io, :fast] executor Instance of an executor or a name of the - # global executor. The task is executed on it, default executor remains unchanged. - # - # @!macro promises.param.args - # @param [Object] args arguments which are passed to the task when it's executed. - # (It might be prepended with other arguments, see the @yeild section). - # - # @!macro promises.shortcut.on - # Shortcut of {#$0_on} with default `:io` executor supplied. - # @see #$0_on - # - # @!macro promises.shortcut.using - # Shortcut of {#$0_using} with default `:io` executor supplied. - # @see #$0_using - # - # @!macro promise.param.task-future - # @yieldreturn will become result of the returned Future. - # Its returned value becomes {Future#value} fulfilling it, - # raised exception becomes {Future#reason} rejecting it. - # - # @!macro promise.param.callback - # @yieldreturn is forgotten. - - # Container of all {Future}, {Event} factory methods. They are never constructed directly with - # new. - module FactoryMethods - extend ReInclude - extend self - - module Configuration - # @return [Executor, :io, :fast] the executor which is used when none is supplied - # to a factory method. The method can be overridden in the receivers of - # `include FactoryMethod` - def default_executor - :io - end - end - - include Configuration - - # @!macro promises.shortcut.on - # @return [ResolvableEvent] - def resolvable_event - resolvable_event_on default_executor - end - - # Created resolvable event, user is responsible for resolving the event once by - # {Promises::ResolvableEvent#resolve}. - # - # @!macro promises.param.default_executor - # @return [ResolvableEvent] - def resolvable_event_on(default_executor = self.default_executor) - ResolvableEventPromise.new(default_executor).future - end - - # @!macro promises.shortcut.on - # @return [ResolvableFuture] - def resolvable_future - resolvable_future_on default_executor - end - - # Creates resolvable future, user is responsible for resolving the future once by - # {Promises::ResolvableFuture#resolve}, {Promises::ResolvableFuture#fulfill}, - # or {Promises::ResolvableFuture#reject} - # - # @!macro promises.param.default_executor - # @return [ResolvableFuture] - def resolvable_future_on(default_executor = self.default_executor) - ResolvableFuturePromise.new(default_executor).future - end - - # @!macro promises.shortcut.on - # @return [Future] - def future(*args, &task) - future_on(default_executor, *args, &task) - end - - # Constructs new Future which will be resolved after block is evaluated on default executor. - # Evaluation begins immediately. - # - # @!macro promises.param.default_executor - # @!macro promises.param.args - # @yield [*args] to the task. - # @!macro promise.param.task-future - # @return [Future] - def future_on(default_executor, *args, &task) - ImmediateEventPromise.new(default_executor).future.then(*args, &task) - end - - # Creates resolved future with will be either fulfilled with the given value or rejection with - # the given reason. - # - # @param [true, false] fulfilled - # @param [Object] value - # @param [Object] reason - # @!macro promises.param.default_executor - # @return [Future] - def resolved_future(fulfilled, value, reason, default_executor = self.default_executor) - ImmediateFuturePromise.new(default_executor, fulfilled, value, reason).future - end - - # Creates resolved future with will be fulfilled with the given value. - # - # @!macro promises.param.default_executor - # @param [Object] value - # @return [Future] - def fulfilled_future(value, default_executor = self.default_executor) - resolved_future true, value, nil, default_executor - end - - # Creates resolved future with will be rejected with the given reason. - # - # @!macro promises.param.default_executor - # @param [Object] reason - # @return [Future] - def rejected_future(reason, default_executor = self.default_executor) - resolved_future false, nil, reason, default_executor - end - - # Creates resolved event. - # - # @!macro promises.param.default_executor - # @return [Event] - def resolved_event(default_executor = self.default_executor) - ImmediateEventPromise.new(default_executor).event - end - - # General constructor. Behaves differently based on the argument's type. It's provided for convenience - # but it's better to be explicit. - # - # @see rejected_future, resolved_event, fulfilled_future - # @!macro promises.param.default_executor - # @return [Event, Future] - # - # @overload make_future(nil, default_executor = self.default_executor) - # @param [nil] nil - # @return [Event] resolved event. - # - # @overload make_future(a_future, default_executor = self.default_executor) - # @param [Future] a_future - # @return [Future] a future which will be resolved when a_future is. - # - # @overload make_future(an_event, default_executor = self.default_executor) - # @param [Event] an_event - # @return [Event] an event which will be resolved when an_event is. - # - # @overload make_future(exception, default_executor = self.default_executor) - # @param [Exception] exception - # @return [Future] a rejected future with the exception as its reason. - # - # @overload make_future(value, default_executor = self.default_executor) - # @param [Object] value when none of the above overloads fits - # @return [Future] a fulfilled future with the value. - def make_future(argument = nil, default_executor = self.default_executor) - case argument - when AbstractEventFuture - # returning wrapper would change nothing - argument - when Exception - rejected_future argument, default_executor - when nil - resolved_event default_executor - else - fulfilled_future argument, default_executor - end - end - - # @!macro promises.shortcut.on - # @return [Future, Event] - def delay(*args, &task) - delay_on default_executor, *args, &task - end - - # Creates new event or future which is resolved only after it is touched, - # see {Concurrent::AbstractEventFuture#touch}. - # - # @!macro promises.param.default_executor - # @overload delay_on(default_executor, *args, &task) - # If task is provided it returns a {Future} representing the result of the task. - # @!macro promises.param.args - # @yield [*args] to the task. - # @!macro promise.param.task-future - # @return [Future] - # @overload delay_on(default_executor) - # If no task is provided, it returns an {Event} - # @return [Event] - def delay_on(default_executor, *args, &task) - event = DelayPromise.new(default_executor).event - task ? event.chain(*args, &task) : event - end - - # @!macro promises.shortcut.on - # @return [Future, Event] - def schedule(intended_time, *args, &task) - schedule_on default_executor, intended_time, *args, &task - end - - # Creates new event or future which is resolved in intended_time. - # - # @!macro promises.param.default_executor - # @!macro promises.param.intended_time - # @param [Numeric, Time] intended_time `Numeric` means to run in `intended_time` seconds. - # `Time` means to run on `intended_time`. - # @overload schedule_on(default_executor, intended_time, *args, &task) - # If task is provided it returns a {Future} representing the result of the task. - # @!macro promises.param.args - # @yield [*args] to the task. - # @!macro promise.param.task-future - # @return [Future] - # @overload schedule_on(default_executor, intended_time) - # If no task is provided, it returns an {Event} - # @return [Event] - def schedule_on(default_executor, intended_time, *args, &task) - event = ScheduledPromise.new(default_executor, intended_time).event - task ? event.chain(*args, &task) : event - end - - # @!macro promises.shortcut.on - # @return [Future] - def zip_futures(*futures_and_or_events) - zip_futures_on default_executor, *futures_and_or_events - end - - # Creates new future which is resolved after all futures_and_or_events are resolved. - # Its value is array of zipped future values. Its reason is array of reasons for rejection. - # If there is an error it rejects. - # @!macro promises.event-conversion - # If event is supplied, which does not have value and can be only resolved, it's - # represented as `:fulfilled` with value `nil`. - # - # @!macro promises.param.default_executor - # @param [AbstractEventFuture] futures_and_or_events - # @return [Future] - def zip_futures_on(default_executor, *futures_and_or_events) - ZipFuturesPromise.new_blocked_by(futures_and_or_events, default_executor).future - end - - alias_method :zip, :zip_futures - - # @!macro promises.shortcut.on - # @return [Event] - def zip_events(*futures_and_or_events) - zip_events_on default_executor, *futures_and_or_events - end - - # Creates new event which is resolved after all futures_and_or_events are resolved. - # (Future is resolved when fulfilled or rejected.) - # - # @!macro promises.param.default_executor - # @param [AbstractEventFuture] futures_and_or_events - # @return [Event] - def zip_events_on(default_executor, *futures_and_or_events) - ZipEventsPromise.new_blocked_by(futures_and_or_events, default_executor).event - end - - # @!macro promises.shortcut.on - # @return [Future] - def any_resolved_future(*futures_and_or_events) - any_resolved_future_on default_executor, *futures_and_or_events - end - - alias_method :any, :any_resolved_future - - # Creates new future which is resolved after first futures_and_or_events is resolved. - # Its result equals result of the first resolved future. - # @!macro promises.any-touch - # If resolved it does not propagate {Concurrent::AbstractEventFuture#touch}, leaving delayed - # futures un-executed if they are not required any more. - # @!macro promises.event-conversion - # - # @!macro promises.param.default_executor - # @param [AbstractEventFuture] futures_and_or_events - # @return [Future] - def any_resolved_future_on(default_executor, *futures_and_or_events) - AnyResolvedFuturePromise.new_blocked_by(futures_and_or_events, default_executor).future - end - - # @!macro promises.shortcut.on - # @return [Future] - def any_fulfilled_future(*futures_and_or_events) - any_fulfilled_future_on default_executor, *futures_and_or_events - end - - # Creates new future which is resolved after first of futures_and_or_events is fulfilled. - # Its result equals result of the first resolved future or if all futures_and_or_events reject, - # it has reason of the last resolved future. - # @!macro promises.any-touch - # @!macro promises.event-conversion - # - # @!macro promises.param.default_executor - # @param [AbstractEventFuture] futures_and_or_events - # @return [Future] - def any_fulfilled_future_on(default_executor, *futures_and_or_events) - AnyFulfilledFuturePromise.new_blocked_by(futures_and_or_events, default_executor).future - end - - # @!macro promises.shortcut.on - # @return [Event] - def any_event(*futures_and_or_events) - any_event_on default_executor, *futures_and_or_events - end - - # Creates new event which becomes resolved after first of the futures_and_or_events resolves. - # @!macro promises.any-touch - # - # @!macro promises.param.default_executor - # @param [AbstractEventFuture] futures_and_or_events - # @return [Event] - def any_event_on(default_executor, *futures_and_or_events) - AnyResolvedEventPromise.new_blocked_by(futures_and_or_events, default_executor).event - end - - # TODO consider adding first(count, *futures) - # TODO consider adding zip_by(slice, *futures) processing futures in slices - # TODO or rather a generic aggregator taking a function - end - - module InternalStates - # @!visibility private - class State - def resolved? - raise NotImplementedError - end - - def to_sym - raise NotImplementedError - end - end - - # @!visibility private - class Pending < State - def resolved? - false - end - - def to_sym - :pending - end - end - - # @!visibility private - class Reserved < Pending - end - - # @!visibility private - class ResolvedWithResult < State - def resolved? - true - end - - def to_sym - :resolved - end - - def result - [fulfilled?, value, reason] - end - - def fulfilled? - raise NotImplementedError - end - - def value - raise NotImplementedError - end - - def reason - raise NotImplementedError - end - - def apply - raise NotImplementedError - end - end - - # @!visibility private - class Fulfilled < ResolvedWithResult - - def initialize(value) - @Value = value - end - - def fulfilled? - true - end - - def apply(args, block) - block.call value, *args - end - - def value - @Value - end - - def reason - nil - end - - def to_sym - :fulfilled - end - end - - # @!visibility private - class FulfilledArray < Fulfilled - def apply(args, block) - block.call(*value, *args) - end - end - - # @!visibility private - class Rejected < ResolvedWithResult - def initialize(reason) - @Reason = reason - end - - def fulfilled? - false - end - - def value - nil - end - - def reason - @Reason - end - - def to_sym - :rejected - end - - def apply(args, block) - block.call reason, *args - end - end - - # @!visibility private - class PartiallyRejected < ResolvedWithResult - def initialize(value, reason) - super() - @Value = value - @Reason = reason - end - - def fulfilled? - false - end - - def to_sym - :rejected - end - - def value - @Value - end - - def reason - @Reason - end - - def apply(args, block) - block.call(*reason, *args) - end - end - - # @!visibility private - PENDING = Pending.new - # @!visibility private - RESERVED = Reserved.new - # @!visibility private - RESOLVED = Fulfilled.new(nil) - - def RESOLVED.to_sym - :resolved - end - end - - private_constant :InternalStates - - # @!macro promises.shortcut.event-future - # @see Event#$0 - # @see Future#$0 - - # @!macro promises.param.timeout - # @param [Numeric] timeout the maximum time in second to wait. - - # @!macro promises.warn.blocks - # @note This function potentially blocks current thread until the Future is resolved. - # Be careful it can deadlock. Try to chain instead. - - # Common ancestor of {Event} and {Future} classes, many shared methods are defined here. - class AbstractEventFuture < Synchronization::Object - safe_initialization! - attr_atomic(:internal_state) - private :internal_state=, :swap_internal_state, :compare_and_set_internal_state, :update_internal_state - # @!method internal_state - # @!visibility private - - include InternalStates - - def initialize(promise, default_executor) - super() - @Lock = Mutex.new - @Condition = ConditionVariable.new - @Promise = promise - @DefaultExecutor = default_executor - @Callbacks = LockFreeStack.new - @Waiters = AtomicFixnum.new 0 - self.internal_state = PENDING - end - - private :initialize - - # Returns its state. - # @return [Symbol] - # - # @overload an_event.state - # @return [:pending, :resolved] - # @overload a_future.state - # Both :fulfilled, :rejected implies :resolved. - # @return [:pending, :fulfilled, :rejected] - def state - internal_state.to_sym - end - - # Is it in pending state? - # @return [Boolean] - def pending? - !internal_state.resolved? - end - - # Is it in resolved state? - # @return [Boolean] - def resolved? - internal_state.resolved? - end - - # Propagates touch. Requests all the delayed futures, which it depends on, to be - # executed. This method is called by any other method requiring resolved state, like {#wait}. - # @return [self] - def touch - @Promise.touch - self - end - - # @!macro promises.touches - # Calls {Concurrent::AbstractEventFuture#touch}. - - # @!macro promises.method.wait - # Wait (block the Thread) until receiver is {#resolved?}. - # @!macro promises.touches - # - # @!macro promises.warn.blocks - # @!macro promises.param.timeout - # @return [self, true, false] self implies timeout was not used, true implies timeout was used - # and it was resolved, false implies it was not resolved within timeout. - def wait(timeout = nil) - result = wait_until_resolved(timeout) - timeout ? result : self - end - - # Returns default executor. - # @return [Executor] default executor - # @see #with_default_executor - # @see FactoryMethods#future_on - # @see FactoryMethods#resolvable_future - # @see FactoryMethods#any_fulfilled_future_on - # @see similar - def default_executor - @DefaultExecutor - end - - # @!macro promises.shortcut.on - # @return [Future] - def chain(*args, &task) - chain_on @DefaultExecutor, *args, &task - end - - # Chains the task to be executed asynchronously on executor after it is resolved. - # - # @!macro promises.param.executor - # @!macro promises.param.args - # @return [Future] - # @!macro promise.param.task-future - # - # @overload an_event.chain_on(executor, *args, &task) - # @yield [*args] to the task. - # @overload a_future.chain_on(executor, *args, &task) - # @yield [fulfilled, value, reason, *args] to the task. - # @yieldparam [true, false] fulfilled - # @yieldparam [Object] value - # @yieldparam [Object] reason - def chain_on(executor, *args, &task) - ChainPromise.new_blocked_by1(self, @DefaultExecutor, executor, args, &task).future - end - - # @return [String] Short string representation. - def to_s - format '%s %s>', super[0..-2], state - end - - alias_method :inspect, :to_s - - # Resolves the resolvable when receiver is resolved. - # - # @param [Resolvable] resolvable - # @return [self] - def chain_resolvable(resolvable) - on_resolution! { resolvable.resolve_with internal_state } - end - - alias_method :tangle, :chain_resolvable - - # @!macro promises.shortcut.using - # @return [self] - def on_resolution(*args, &callback) - on_resolution_using @DefaultExecutor, *args, &callback - end - - # Stores the callback to be executed synchronously on resolving thread after it is - # resolved. - # - # @!macro promises.param.args - # @!macro promise.param.callback - # @return [self] - # - # @overload an_event.on_resolution!(*args, &callback) - # @yield [*args] to the callback. - # @overload a_future.on_resolution!(*args, &callback) - # @yield [fulfilled, value, reason, *args] to the callback. - # @yieldparam [true, false] fulfilled - # @yieldparam [Object] value - # @yieldparam [Object] reason - def on_resolution!(*args, &callback) - add_callback :callback_on_resolution, args, callback - end - - # Stores the callback to be executed asynchronously on executor after it is resolved. - # - # @!macro promises.param.executor - # @!macro promises.param.args - # @!macro promise.param.callback - # @return [self] - # - # @overload an_event.on_resolution_using(executor, *args, &callback) - # @yield [*args] to the callback. - # @overload a_future.on_resolution_using(executor, *args, &callback) - # @yield [fulfilled, value, reason, *args] to the callback. - # @yieldparam [true, false] fulfilled - # @yieldparam [Object] value - # @yieldparam [Object] reason - def on_resolution_using(executor, *args, &callback) - add_callback :async_callback_on_resolution, executor, args, callback - end - - # @!macro promises.method.with_default_executor - # Crates new object with same class with the executor set as its new default executor. - # Any futures depending on it will use the new default executor. - # @!macro promises.shortcut.event-future - # @abstract - # @return [AbstractEventFuture] - def with_default_executor(executor) - raise NotImplementedError - end - - # @!visibility private - def resolve_with(state, raise_on_reassign = true, reserved = false) - if compare_and_set_internal_state(reserved ? RESERVED : PENDING, state) - # go to synchronized block only if there were waiting threads - @Lock.synchronize { @Condition.broadcast } unless @Waiters.value == 0 - call_callbacks state - else - return rejected_resolution(raise_on_reassign, state) - end - self - end - - # For inspection. - # @!visibility private - # @return [Array] - def blocks - @Callbacks.each_with_object([]) do |(method, args), promises| - promises.push(args[0]) if method == :callback_notify_blocked - end - end - - # For inspection. - # @!visibility private - def callbacks - @Callbacks.each.to_a - end - - # For inspection. - # @!visibility private - def promise - @Promise - end - - # For inspection. - # @!visibility private - def touched? - promise.touched? - end - - # For inspection. - # @!visibility private - def waiting_threads - @Waiters.each.to_a - end - - # @!visibility private - def add_callback_notify_blocked(promise, index) - add_callback :callback_notify_blocked, promise, index - end - - # @!visibility private - def add_callback_clear_delayed_node(node) - add_callback(:callback_clear_delayed_node, node) - end - - # @!visibility private - def with_hidden_resolvable - # TODO (pitr-ch 10-Dec-2018): documentation, better name if in edge - self - end - - private - - def add_callback(method, *args) - state = internal_state - if state.resolved? - call_callback method, state, args - else - @Callbacks.push [method, args] - state = internal_state - # take back if it was resolved in the meanwhile - call_callbacks state if state.resolved? - end - self - end - - def callback_clear_delayed_node(state, node) - node.value = nil - end - - # @return [Boolean] - def wait_until_resolved(timeout) - return true if resolved? - - touch - - @Lock.synchronize do - @Waiters.increment - begin - unless resolved? - @Condition.wait @Lock, timeout - end - ensure - # JRuby may raise ConcurrencyError - @Waiters.decrement - end - end - resolved? - end - - def call_callback(method, state, args) - self.send method, state, *args - end - - def call_callbacks(state) - method, args = @Callbacks.pop - while method - call_callback method, state, args - method, args = @Callbacks.pop - end - end - - def with_async(executor, *args, &block) - Concurrent.executor(executor).post(*args, &block) - end - - def async_callback_on_resolution(state, executor, args, callback) - with_async(executor, state, args, callback) do |st, ar, cb| - callback_on_resolution st, ar, cb - end - end - - def callback_notify_blocked(state, promise, index) - promise.on_blocker_resolution self, index - end - end - - # Represents an event which will happen in future (will be resolved). The event is either - # pending or resolved. It should be always resolved. Use {Future} to communicate rejections and - # cancellation. - class Event < AbstractEventFuture - - alias_method :then, :chain - - - # @!macro promises.method.zip - # Creates a new event or a future which will be resolved when receiver and other are. - # Returns an event if receiver and other are events, otherwise returns a future. - # If just one of the parties is Future then the result - # of the returned future is equal to the result of the supplied future. If both are futures - # then the result is as described in {FactoryMethods#zip_futures_on}. - # - # @return [Future, Event] - def zip(other) - if other.is_a?(Future) - ZipFutureEventPromise.new_blocked_by2(other, self, @DefaultExecutor).future - else - ZipEventEventPromise.new_blocked_by2(self, other, @DefaultExecutor).event - end - end - - alias_method :&, :zip - - # Creates a new event which will be resolved when the first of receiver, `event_or_future` - # resolves. - # - # @return [Event] - def any(event_or_future) - AnyResolvedEventPromise.new_blocked_by2(self, event_or_future, @DefaultExecutor).event - end - - alias_method :|, :any - - # Creates new event dependent on receiver which will not evaluate until touched, see {#touch}. - # In other words, it inserts delay into the chain of Futures making rest of it lazy evaluated. - # - # @return [Event] - def delay - event = DelayPromise.new(@DefaultExecutor).event - ZipEventEventPromise.new_blocked_by2(self, event, @DefaultExecutor).event - end - - # @!macro promise.method.schedule - # Creates new event dependent on receiver scheduled to execute on/in intended_time. - # In time is interpreted from the moment the receiver is resolved, therefore it inserts - # delay into the chain. - # - # @!macro promises.param.intended_time - # @return [Event] - def schedule(intended_time) - chain do - event = ScheduledPromise.new(@DefaultExecutor, intended_time).event - ZipEventEventPromise.new_blocked_by2(self, event, @DefaultExecutor).event - end.flat_event - end - - # Converts event to a future. The future is fulfilled when the event is resolved, the future may never fail. - # - # @return [Future] - def to_future - future = Promises.resolvable_future - ensure - chain_resolvable(future) - end - - # Returns self, since this is event - # @return [Event] - def to_event - self - end - - # @!macro promises.method.with_default_executor - # @return [Event] - def with_default_executor(executor) - EventWrapperPromise.new_blocked_by1(self, executor).event - end - - private - - def rejected_resolution(raise_on_reassign, state) - raise Concurrent::MultipleAssignmentError.new('Event can be resolved only once') if raise_on_reassign - return false - end - - def callback_on_resolution(state, args, callback) - callback.call(*args) - end - end - - # Represents a value which will become available in future. May reject with a reason instead, - # e.g. when the tasks raises an exception. - class Future < AbstractEventFuture - - # Is it in fulfilled state? - # @return [Boolean] - def fulfilled? - state = internal_state - state.resolved? && state.fulfilled? - end - - # Is it in rejected state? - # @return [Boolean] - def rejected? - state = internal_state - state.resolved? && !state.fulfilled? - end - - # @!macro promises.warn.nil - # @note Make sure returned `nil` is not confused with timeout, no value when rejected, - # no reason when fulfilled, etc. - # Use more exact methods if needed, like {#wait}, {#value!}, {#result}, etc. - - # @!macro promises.method.value - # Return value of the future. - # @!macro promises.touches - # - # @!macro promises.warn.blocks - # @!macro promises.warn.nil - # @!macro promises.param.timeout - # @!macro promises.param.timeout_value - # @param [Object] timeout_value a value returned by the method when it times out - # @return [Object, nil, timeout_value] the value of the Future when fulfilled, - # timeout_value on timeout, - # nil on rejection. - def value(timeout = nil, timeout_value = nil) - if wait_until_resolved timeout - internal_state.value - else - timeout_value - end - end - - # Returns reason of future's rejection. - # @!macro promises.touches - # - # @!macro promises.warn.blocks - # @!macro promises.warn.nil - # @!macro promises.param.timeout - # @!macro promises.param.timeout_value - # @return [Object, timeout_value] the reason, or timeout_value on timeout, or nil on fulfillment. - def reason(timeout = nil, timeout_value = nil) - if wait_until_resolved timeout - internal_state.reason - else - timeout_value - end - end - - # Returns triplet fulfilled?, value, reason. - # @!macro promises.touches - # - # @!macro promises.warn.blocks - # @!macro promises.param.timeout - # @return [Array(Boolean, Object, Object), nil] triplet of fulfilled?, value, reason, or nil - # on timeout. - def result(timeout = nil) - internal_state.result if wait_until_resolved timeout - end - - # @!macro promises.method.wait - # @raise [Exception] {#reason} on rejection - def wait!(timeout = nil) - result = wait_until_resolved!(timeout) - timeout ? result : self - end - - # @!macro promises.method.value - # @return [Object, nil, timeout_value] the value of the Future when fulfilled, - # or nil on rejection, - # or timeout_value on timeout. - # @raise [Exception] {#reason} on rejection - def value!(timeout = nil, timeout_value = nil) - if wait_until_resolved! timeout - internal_state.value - else - timeout_value - end - end - - # Allows rejected Future to be risen with `raise` method. - # If the reason is not an exception `Runtime.new(reason)` is returned. - # - # @example - # raise Promises.rejected_future(StandardError.new("boom")) - # raise Promises.rejected_future("or just boom") - # @raise [Concurrent::Error] when raising not rejected future - # @return [Exception] - def exception(*args) - raise Concurrent::Error, 'it is not rejected' unless rejected? - raise ArgumentError unless args.size <= 1 - reason = Array(internal_state.reason).flatten.compact - if reason.size > 1 - ex = Concurrent::MultipleErrors.new reason - ex.set_backtrace(caller) - ex - else - ex = if reason[0].respond_to? :exception - reason[0].exception(*args) - else - RuntimeError.new(reason[0]).exception(*args) - end - ex.set_backtrace Array(ex.backtrace) + caller - ex - end - end - - # @!macro promises.shortcut.on - # @return [Future] - def then(*args, &task) - then_on @DefaultExecutor, *args, &task - end - - # Chains the task to be executed asynchronously on executor after it fulfills. Does not run - # the task if it rejects. It will resolve though, triggering any dependent futures. - # - # @!macro promises.param.executor - # @!macro promises.param.args - # @!macro promise.param.task-future - # @return [Future] - # @yield [value, *args] to the task. - def then_on(executor, *args, &task) - ThenPromise.new_blocked_by1(self, @DefaultExecutor, executor, args, &task).future - end - - # @!macro promises.shortcut.on - # @return [Future] - def rescue(*args, &task) - rescue_on @DefaultExecutor, *args, &task - end - - # Chains the task to be executed asynchronously on executor after it rejects. Does not run - # the task if it fulfills. It will resolve though, triggering any dependent futures. - # - # @!macro promises.param.executor - # @!macro promises.param.args - # @!macro promise.param.task-future - # @return [Future] - # @yield [reason, *args] to the task. - def rescue_on(executor, *args, &task) - RescuePromise.new_blocked_by1(self, @DefaultExecutor, executor, args, &task).future - end - - # @!macro promises.method.zip - # @return [Future] - def zip(other) - if other.is_a?(Future) - ZipFuturesPromise.new_blocked_by2(self, other, @DefaultExecutor).future - else - ZipFutureEventPromise.new_blocked_by2(self, other, @DefaultExecutor).future - end - end - - alias_method :&, :zip - - # Creates a new event which will be resolved when the first of receiver, `event_or_future` - # resolves. Returning future will have value nil if event_or_future is event and resolves - # first. - # - # @return [Future] - def any(event_or_future) - AnyResolvedFuturePromise.new_blocked_by2(self, event_or_future, @DefaultExecutor).future - end - - alias_method :|, :any - - # Creates new future dependent on receiver which will not evaluate until touched, see {#touch}. - # In other words, it inserts delay into the chain of Futures making rest of it lazy evaluated. - # - # @return [Future] - def delay - event = DelayPromise.new(@DefaultExecutor).event - ZipFutureEventPromise.new_blocked_by2(self, event, @DefaultExecutor).future - end - - # @!macro promise.method.schedule - # @return [Future] - def schedule(intended_time) - chain do - event = ScheduledPromise.new(@DefaultExecutor, intended_time).event - ZipFutureEventPromise.new_blocked_by2(self, event, @DefaultExecutor).future - end.flat - end - - # @!macro promises.method.with_default_executor - # @return [Future] - def with_default_executor(executor) - FutureWrapperPromise.new_blocked_by1(self, executor).future - end - - # Creates new future which will have result of the future returned by receiver. If receiver - # rejects it will have its rejection. - # - # @param [Integer] level how many levels of futures should flatten - # @return [Future] - def flat_future(level = 1) - FlatFuturePromise.new_blocked_by1(self, level, @DefaultExecutor).future - end - - alias_method :flat, :flat_future - - # Creates new event which will be resolved when the returned event by receiver is. - # Be careful if the receiver rejects it will just resolve since Event does not hold reason. - # - # @return [Event] - def flat_event - FlatEventPromise.new_blocked_by1(self, @DefaultExecutor).event - end - - # @!macro promises.shortcut.using - # @return [self] - def on_fulfillment(*args, &callback) - on_fulfillment_using @DefaultExecutor, *args, &callback - end - - # Stores the callback to be executed synchronously on resolving thread after it is - # fulfilled. Does nothing on rejection. - # - # @!macro promises.param.args - # @!macro promise.param.callback - # @return [self] - # @yield [value, *args] to the callback. - def on_fulfillment!(*args, &callback) - add_callback :callback_on_fulfillment, args, callback - end - - # Stores the callback to be executed asynchronously on executor after it is - # fulfilled. Does nothing on rejection. - # - # @!macro promises.param.executor - # @!macro promises.param.args - # @!macro promise.param.callback - # @return [self] - # @yield [value, *args] to the callback. - def on_fulfillment_using(executor, *args, &callback) - add_callback :async_callback_on_fulfillment, executor, args, callback - end - - # @!macro promises.shortcut.using - # @return [self] - def on_rejection(*args, &callback) - on_rejection_using @DefaultExecutor, *args, &callback - end - - # Stores the callback to be executed synchronously on resolving thread after it is - # rejected. Does nothing on fulfillment. - # - # @!macro promises.param.args - # @!macro promise.param.callback - # @return [self] - # @yield [reason, *args] to the callback. - def on_rejection!(*args, &callback) - add_callback :callback_on_rejection, args, callback - end - - # Stores the callback to be executed asynchronously on executor after it is - # rejected. Does nothing on fulfillment. - # - # @!macro promises.param.executor - # @!macro promises.param.args - # @!macro promise.param.callback - # @return [self] - # @yield [reason, *args] to the callback. - def on_rejection_using(executor, *args, &callback) - add_callback :async_callback_on_rejection, executor, args, callback - end - - # Allows to use futures as green threads. The receiver has to evaluate to a future which - # represents what should be done next. It basically flattens indefinitely until non Future - # values is returned which becomes result of the returned future. Any encountered exception - # will become reason of the returned future. - # - # @return [Future] - # @param [#call(value)] run_test - # an object which when called returns either Future to keep running with - # or nil, then the run completes with the value. - # The run_test can be used to extract the Future from deeper structure, - # or to distinguish Future which is a resulting value from a future - # which is suppose to continue running. - # @example - # body = lambda do |v| - # v += 1 - # v < 5 ? Promises.future(v, &body) : v - # end - # Promises.future(0, &body).run.value! # => 5 - def run(run_test = method(:run_test)) - RunFuturePromise.new_blocked_by1(self, @DefaultExecutor, run_test).future - end - - # @!visibility private - def apply(args, block) - internal_state.apply args, block - end - - # Converts future to event which is resolved when future is resolved by fulfillment or rejection. - # - # @return [Event] - def to_event - event = Promises.resolvable_event - ensure - chain_resolvable(event) - end - - # Returns self, since this is a future - # @return [Future] - def to_future - self - end - - # @return [String] Short string representation. - def to_s - if resolved? - format '%s with %s>', super[0..-2], (fulfilled? ? value : reason).inspect - else - super - end - end - - alias_method :inspect, :to_s - - private - - def run_test(v) - v if v.is_a?(Future) - end - - def rejected_resolution(raise_on_reassign, state) - if raise_on_reassign - if internal_state == RESERVED - raise Concurrent::MultipleAssignmentError.new( - "Future can be resolved only once. It is already reserved.") - else - raise Concurrent::MultipleAssignmentError.new( - "Future can be resolved only once. It's #{result}, trying to set #{state.result}.", - current_result: result, - new_result: state.result) - end - end - return false - end - - def wait_until_resolved!(timeout = nil) - result = wait_until_resolved(timeout) - raise self if rejected? - result - end - - def async_callback_on_fulfillment(state, executor, args, callback) - with_async(executor, state, args, callback) do |st, ar, cb| - callback_on_fulfillment st, ar, cb - end - end - - def async_callback_on_rejection(state, executor, args, callback) - with_async(executor, state, args, callback) do |st, ar, cb| - callback_on_rejection st, ar, cb - end - end - - def callback_on_fulfillment(state, args, callback) - state.apply args, callback if state.fulfilled? - end - - def callback_on_rejection(state, args, callback) - state.apply args, callback unless state.fulfilled? - end - - def callback_on_resolution(state, args, callback) - callback.call(*state.result, *args) - end - - end - - # Marker module of Future, Event resolved manually. - module Resolvable - include InternalStates - end - - # A Event which can be resolved by user. - class ResolvableEvent < Event - include Resolvable - - # @!macro raise_on_reassign - # @raise [MultipleAssignmentError] when already resolved and raise_on_reassign is true. - - # @!macro promise.param.raise_on_reassign - # @param [Boolean] raise_on_reassign should method raise exception if already resolved - # @return [self, false] false is returned when raise_on_reassign is false and the receiver - # is already resolved. - # - - # Makes the event resolved, which triggers all dependent futures. - # - # @!macro promise.param.raise_on_reassign - # @!macro promise.param.reserved - # @param [true, false] reserved - # Set to true if the resolvable is {#reserve}d by you, - # marks resolution of reserved resolvable events and futures explicitly. - # Advanced feature, ignore unless you use {Resolvable#reserve} from edge. - def resolve(raise_on_reassign = true, reserved = false) - resolve_with RESOLVED, raise_on_reassign, reserved - end - - # Creates new event wrapping receiver, effectively hiding the resolve method. - # - # @return [Event] - def with_hidden_resolvable - @with_hidden_resolvable ||= EventWrapperPromise.new_blocked_by1(self, @DefaultExecutor).event - end - - # Behaves as {AbstractEventFuture#wait} but has one additional optional argument - # resolve_on_timeout. - # - # @param [true, false] resolve_on_timeout - # If it times out and the argument is true it will also resolve the event. - # @return [self, true, false] - # @see AbstractEventFuture#wait - def wait(timeout = nil, resolve_on_timeout = false) - super(timeout) or if resolve_on_timeout - # if it fails to resolve it was resolved in the meantime - # so return true as if there was no timeout - !resolve(false) - else - false - end - end - end - - # A Future which can be resolved by user. - class ResolvableFuture < Future - include Resolvable - - # Makes the future resolved with result of triplet `fulfilled?`, `value`, `reason`, - # which triggers all dependent futures. - # - # @param [true, false] fulfilled - # @param [Object] value - # @param [Object] reason - # @!macro promise.param.raise_on_reassign - # @!macro promise.param.reserved - def resolve(fulfilled = true, value = nil, reason = nil, raise_on_reassign = true, reserved = false) - resolve_with(fulfilled ? Fulfilled.new(value) : Rejected.new(reason), raise_on_reassign, reserved) - end - - # Makes the future fulfilled with `value`, - # which triggers all dependent futures. - # - # @param [Object] value - # @!macro promise.param.raise_on_reassign - # @!macro promise.param.reserved - def fulfill(value, raise_on_reassign = true, reserved = false) - resolve_with Fulfilled.new(value), raise_on_reassign, reserved - end - - # Makes the future rejected with `reason`, - # which triggers all dependent futures. - # - # @param [Object] reason - # @!macro promise.param.raise_on_reassign - # @!macro promise.param.reserved - def reject(reason, raise_on_reassign = true, reserved = false) - resolve_with Rejected.new(reason), raise_on_reassign, reserved - end - - # Evaluates the block and sets its result as future's value fulfilling, if the block raises - # an exception the future rejects with it. - # - # @yield [*args] to the block. - # @yieldreturn [Object] value - # @return [self] - def evaluate_to(*args, &block) - promise.evaluate_to(*args, block) - end - - # Evaluates the block and sets its result as future's value fulfilling, if the block raises - # an exception the future rejects with it. - # - # @yield [*args] to the block. - # @yieldreturn [Object] value - # @return [self] - # @raise [Exception] also raise reason on rejection. - def evaluate_to!(*args, &block) - promise.evaluate_to(*args, block).wait! - end - - # @!macro promises.resolvable.resolve_on_timeout - # @param [::Array(true, Object, nil), ::Array(false, nil, Exception), nil] resolve_on_timeout - # If it times out and the argument is not nil it will also resolve the future - # to the provided resolution. - - # Behaves as {AbstractEventFuture#wait} but has one additional optional argument - # resolve_on_timeout. - # - # @!macro promises.resolvable.resolve_on_timeout - # @return [self, true, false] - # @see AbstractEventFuture#wait - def wait(timeout = nil, resolve_on_timeout = nil) - super(timeout) or if resolve_on_timeout - # if it fails to resolve it was resolved in the meantime - # so return true as if there was no timeout - !resolve(*resolve_on_timeout, false) - else - false - end - end - - # Behaves as {Future#wait!} but has one additional optional argument - # resolve_on_timeout. - # - # @!macro promises.resolvable.resolve_on_timeout - # @return [self, true, false] - # @raise [Exception] {#reason} on rejection - # @see Future#wait! - def wait!(timeout = nil, resolve_on_timeout = nil) - super(timeout) or if resolve_on_timeout - if resolve(*resolve_on_timeout, false) - false - else - # if it fails to resolve it was resolved in the meantime - # so return true as if there was no timeout - raise self if rejected? - true - end - else - false - end - end - - # Behaves as {Future#value} but has one additional optional argument - # resolve_on_timeout. - # - # @!macro promises.resolvable.resolve_on_timeout - # @return [Object, timeout_value, nil] - # @see Future#value - def value(timeout = nil, timeout_value = nil, resolve_on_timeout = nil) - if wait_until_resolved timeout - internal_state.value - else - if resolve_on_timeout - unless resolve(*resolve_on_timeout, false) - # if it fails to resolve it was resolved in the meantime - # so return value as if there was no timeout - return internal_state.value - end - end - timeout_value - end - end - - # Behaves as {Future#value!} but has one additional optional argument - # resolve_on_timeout. - # - # @!macro promises.resolvable.resolve_on_timeout - # @return [Object, timeout_value, nil] - # @raise [Exception] {#reason} on rejection - # @see Future#value! - def value!(timeout = nil, timeout_value = nil, resolve_on_timeout = nil) - if wait_until_resolved! timeout - internal_state.value - else - if resolve_on_timeout - unless resolve(*resolve_on_timeout, false) - # if it fails to resolve it was resolved in the meantime - # so return value as if there was no timeout - raise self if rejected? - return internal_state.value - end - end - timeout_value - end - end - - # Behaves as {Future#reason} but has one additional optional argument - # resolve_on_timeout. - # - # @!macro promises.resolvable.resolve_on_timeout - # @return [Exception, timeout_value, nil] - # @see Future#reason - def reason(timeout = nil, timeout_value = nil, resolve_on_timeout = nil) - if wait_until_resolved timeout - internal_state.reason - else - if resolve_on_timeout - unless resolve(*resolve_on_timeout, false) - # if it fails to resolve it was resolved in the meantime - # so return value as if there was no timeout - return internal_state.reason - end - end - timeout_value - end - end - - # Behaves as {Future#result} but has one additional optional argument - # resolve_on_timeout. - # - # @!macro promises.resolvable.resolve_on_timeout - # @return [::Array(Boolean, Object, Exception), nil] - # @see Future#result - def result(timeout = nil, resolve_on_timeout = nil) - if wait_until_resolved timeout - internal_state.result - else - if resolve_on_timeout - unless resolve(*resolve_on_timeout, false) - # if it fails to resolve it was resolved in the meantime - # so return value as if there was no timeout - internal_state.result - end - end - # otherwise returns nil - end - end - - # Creates new future wrapping receiver, effectively hiding the resolve method and similar. - # - # @return [Future] - def with_hidden_resolvable - @with_hidden_resolvable ||= FutureWrapperPromise.new_blocked_by1(self, @DefaultExecutor).future - end - end - - # @abstract - # @private - class AbstractPromise < Synchronization::Object - safe_initialization! - include InternalStates - - def initialize(future) - super() - @Future = future - end - - def future - @Future - end - - alias_method :event, :future - - def default_executor - future.default_executor - end - - def state - future.state - end - - def touch - end - - def to_s - format '%s %s>', super[0..-2], @Future - end - - alias_method :inspect, :to_s - - def delayed_because - nil - end - - private - - def resolve_with(new_state, raise_on_reassign = true) - @Future.resolve_with(new_state, raise_on_reassign) - end - - # @return [Future] - def evaluate_to(*args, block) - resolve_with Fulfilled.new(block.call(*args)) - rescue Exception => error - resolve_with Rejected.new(error) - raise error unless error.is_a?(StandardError) - end - end - - class ResolvableEventPromise < AbstractPromise - def initialize(default_executor) - super ResolvableEvent.new(self, default_executor) - end - end - - class ResolvableFuturePromise < AbstractPromise - def initialize(default_executor) - super ResolvableFuture.new(self, default_executor) - end - - public :evaluate_to - end - - # @abstract - class InnerPromise < AbstractPromise - end - - # @abstract - class BlockedPromise < InnerPromise - - private_class_method :new - - def self.new_blocked_by1(blocker, *args, &block) - blocker_delayed = blocker.promise.delayed_because - promise = new(blocker_delayed, 1, *args, &block) - blocker.add_callback_notify_blocked promise, 0 - promise - end - - def self.new_blocked_by2(blocker1, blocker2, *args, &block) - blocker_delayed1 = blocker1.promise.delayed_because - blocker_delayed2 = blocker2.promise.delayed_because - delayed = if blocker_delayed1 && blocker_delayed2 - # TODO (pitr-ch 23-Dec-2016): use arrays when we know it will not grow (only flat adds delay) - LockFreeStack.of2(blocker_delayed1, blocker_delayed2) - else - blocker_delayed1 || blocker_delayed2 - end - promise = new(delayed, 2, *args, &block) - blocker1.add_callback_notify_blocked promise, 0 - blocker2.add_callback_notify_blocked promise, 1 - promise - end - - def self.new_blocked_by(blockers, *args, &block) - delayed = blockers.reduce(nil) { |d, f| add_delayed d, f.promise.delayed_because } - promise = new(delayed, blockers.size, *args, &block) - blockers.each_with_index { |f, i| f.add_callback_notify_blocked promise, i } - promise - end - - def self.add_delayed(delayed1, delayed2) - if delayed1 && delayed2 - delayed1.push delayed2 - delayed1 - else - delayed1 || delayed2 - end - end - - def initialize(delayed, blockers_count, future) - super(future) - @Delayed = delayed - @Countdown = AtomicFixnum.new blockers_count - end - - def on_blocker_resolution(future, index) - countdown = process_on_blocker_resolution(future, index) - resolvable = resolvable?(countdown, future, index) - - on_resolvable(future, index) if resolvable - end - - def delayed_because - @Delayed - end - - def touch - clear_and_propagate_touch - end - - # for inspection only - def blocked_by - blocked_by = [] - ObjectSpace.each_object(AbstractEventFuture) { |o| blocked_by.push o if o.blocks.include? self } - blocked_by - end - - private - - def clear_and_propagate_touch(stack_or_element = @Delayed) - return if stack_or_element.nil? - - if stack_or_element.is_a? LockFreeStack - stack_or_element.clear_each { |element| clear_and_propagate_touch element } - else - stack_or_element.touch unless stack_or_element.nil? # if still present - end - end - - # @return [true,false] if resolvable - def resolvable?(countdown, future, index) - countdown.zero? - end - - def process_on_blocker_resolution(future, index) - @Countdown.decrement - end - - def on_resolvable(resolved_future, index) - raise NotImplementedError - end - end - - # @abstract - class BlockedTaskPromise < BlockedPromise - def initialize(delayed, blockers_count, default_executor, executor, args, &task) - raise ArgumentError, 'no block given' unless block_given? - super delayed, 1, Future.new(self, default_executor) - @Executor = executor - @Task = task - @Args = args - end - - def executor - @Executor - end - end - - class ThenPromise < BlockedTaskPromise - private - - def initialize(delayed, blockers_count, default_executor, executor, args, &task) - super delayed, blockers_count, default_executor, executor, args, &task - end - - def on_resolvable(resolved_future, index) - if resolved_future.fulfilled? - Concurrent.executor(@Executor).post(resolved_future, @Args, @Task) do |future, args, task| - evaluate_to lambda { future.apply args, task } - end - else - resolve_with resolved_future.internal_state - end - end - end - - class RescuePromise < BlockedTaskPromise - private - - def initialize(delayed, blockers_count, default_executor, executor, args, &task) - super delayed, blockers_count, default_executor, executor, args, &task - end - - def on_resolvable(resolved_future, index) - if resolved_future.rejected? - Concurrent.executor(@Executor).post(resolved_future, @Args, @Task) do |future, args, task| - evaluate_to lambda { future.apply args, task } - end - else - resolve_with resolved_future.internal_state - end - end - end - - class ChainPromise < BlockedTaskPromise - private - - def on_resolvable(resolved_future, index) - if Future === resolved_future - Concurrent.executor(@Executor).post(resolved_future, @Args, @Task) do |future, args, task| - evaluate_to(*future.result, *args, task) - end - else - Concurrent.executor(@Executor).post(@Args, @Task) do |args, task| - evaluate_to(*args, task) - end - end - end - end - - # will be immediately resolved - class ImmediateEventPromise < InnerPromise - def initialize(default_executor) - super Event.new(self, default_executor).resolve_with(RESOLVED) - end - end - - class ImmediateFuturePromise < InnerPromise - def initialize(default_executor, fulfilled, value, reason) - super Future.new(self, default_executor). - resolve_with(fulfilled ? Fulfilled.new(value) : Rejected.new(reason)) - end - end - - class AbstractFlatPromise < BlockedPromise - - def initialize(delayed_because, blockers_count, event_or_future) - delayed = LockFreeStack.of1(self) - super(delayed, blockers_count, event_or_future) - # noinspection RubyArgCount - @Touched = AtomicBoolean.new false - @DelayedBecause = delayed_because || LockFreeStack.new - - event_or_future.add_callback_clear_delayed_node delayed.peek - end - - def touch - if @Touched.make_true - clear_and_propagate_touch @DelayedBecause - end - end - - private - - def touched? - @Touched.value - end - - def on_resolvable(resolved_future, index) - resolve_with resolved_future.internal_state - end - - def resolvable?(countdown, future, index) - !@Future.internal_state.resolved? && super(countdown, future, index) - end - - def add_delayed_of(future) - delayed = future.promise.delayed_because - if touched? - clear_and_propagate_touch delayed - else - BlockedPromise.add_delayed @DelayedBecause, delayed - clear_and_propagate_touch @DelayedBecause if touched? - end - end - - end - - class FlatEventPromise < AbstractFlatPromise - - private - - def initialize(delayed, blockers_count, default_executor) - super delayed, 2, Event.new(self, default_executor) - end - - def process_on_blocker_resolution(future, index) - countdown = super(future, index) - if countdown.nonzero? - internal_state = future.internal_state - - unless internal_state.fulfilled? - resolve_with RESOLVED - return countdown - end - - value = internal_state.value - case value - when AbstractEventFuture - add_delayed_of value - value.add_callback_notify_blocked self, nil - countdown - else - resolve_with RESOLVED - end - end - countdown - end - - end - - class FlatFuturePromise < AbstractFlatPromise - - private - - def initialize(delayed, blockers_count, levels, default_executor) - raise ArgumentError, 'levels has to be higher than 0' if levels < 1 - # flat promise may result to a future having delayed futures, therefore we have to have empty stack - # to be able to add new delayed futures - super delayed || LockFreeStack.new, 1 + levels, Future.new(self, default_executor) - end - - def process_on_blocker_resolution(future, index) - countdown = super(future, index) - if countdown.nonzero? - internal_state = future.internal_state - - unless internal_state.fulfilled? - resolve_with internal_state - return countdown - end - - value = internal_state.value - case value - when AbstractEventFuture - add_delayed_of value - value.add_callback_notify_blocked self, nil - countdown - else - evaluate_to(lambda { raise TypeError, "returned value #{value.inspect} is not a Future" }) - end - end - countdown - end - - end - - class RunFuturePromise < AbstractFlatPromise - - private - - def initialize(delayed, blockers_count, default_executor, run_test) - super delayed, 1, Future.new(self, default_executor) - @RunTest = run_test - end - - def process_on_blocker_resolution(future, index) - internal_state = future.internal_state - - unless internal_state.fulfilled? - resolve_with internal_state - return 0 - end - - value = internal_state.value - continuation_future = @RunTest.call value - - if continuation_future - add_delayed_of continuation_future - continuation_future.add_callback_notify_blocked self, nil - else - resolve_with internal_state - end - - 1 - end - end - - class ZipEventEventPromise < BlockedPromise - def initialize(delayed, blockers_count, default_executor) - super delayed, 2, Event.new(self, default_executor) - end - - private - - def on_resolvable(resolved_future, index) - resolve_with RESOLVED - end - end - - class ZipFutureEventPromise < BlockedPromise - def initialize(delayed, blockers_count, default_executor) - super delayed, 2, Future.new(self, default_executor) - @result = nil - end - - private - - def process_on_blocker_resolution(future, index) - # first blocking is future, take its result - @result = future.internal_state if index == 0 - # super has to be called after above to piggyback on volatile @Countdown - super future, index - end - - def on_resolvable(resolved_future, index) - resolve_with @result - end - end - - class EventWrapperPromise < BlockedPromise - def initialize(delayed, blockers_count, default_executor) - super delayed, 1, Event.new(self, default_executor) - end - - private - - def on_resolvable(resolved_future, index) - resolve_with RESOLVED - end - end - - class FutureWrapperPromise < BlockedPromise - def initialize(delayed, blockers_count, default_executor) - super delayed, 1, Future.new(self, default_executor) - end - - private - - def on_resolvable(resolved_future, index) - resolve_with resolved_future.internal_state - end - end - - class ZipFuturesPromise < BlockedPromise - - private - - def initialize(delayed, blockers_count, default_executor) - super(delayed, blockers_count, Future.new(self, default_executor)) - @Resolutions = ::Array.new(blockers_count, nil) - - on_resolvable nil, nil if blockers_count == 0 - end - - def process_on_blocker_resolution(future, index) - # TODO (pitr-ch 18-Dec-2016): Can we assume that array will never break under parallel access when never re-sized? - @Resolutions[index] = future.internal_state # has to be set before countdown in super - super future, index - end - - def on_resolvable(resolved_future, index) - all_fulfilled = true - values = ::Array.new(@Resolutions.size) - reasons = ::Array.new(@Resolutions.size) - - @Resolutions.each_with_index do |internal_state, i| - fulfilled, values[i], reasons[i] = internal_state.result - all_fulfilled &&= fulfilled - end - - if all_fulfilled - resolve_with FulfilledArray.new(values) - else - resolve_with PartiallyRejected.new(values, reasons) - end - end - end - - class ZipEventsPromise < BlockedPromise - - private - - def initialize(delayed, blockers_count, default_executor) - super delayed, blockers_count, Event.new(self, default_executor) - - on_resolvable nil, nil if blockers_count == 0 - end - - def on_resolvable(resolved_future, index) - resolve_with RESOLVED - end - end - - # @abstract - class AbstractAnyPromise < BlockedPromise - end - - class AnyResolvedEventPromise < AbstractAnyPromise - - private - - def initialize(delayed, blockers_count, default_executor) - super delayed, blockers_count, Event.new(self, default_executor) - end - - def resolvable?(countdown, future, index) - true - end - - def on_resolvable(resolved_future, index) - resolve_with RESOLVED, false - end - end - - class AnyResolvedFuturePromise < AbstractAnyPromise - - private - - def initialize(delayed, blockers_count, default_executor) - super delayed, blockers_count, Future.new(self, default_executor) - end - - def resolvable?(countdown, future, index) - true - end - - def on_resolvable(resolved_future, index) - resolve_with resolved_future.internal_state, false - end - end - - class AnyFulfilledFuturePromise < AnyResolvedFuturePromise - - private - - def resolvable?(countdown, event_or_future, index) - (event_or_future.is_a?(Event) ? event_or_future.resolved? : event_or_future.fulfilled?) || - # inlined super from BlockedPromise - countdown.zero? - end - end - - class DelayPromise < InnerPromise - - def initialize(default_executor) - event = Event.new(self, default_executor) - @Delayed = LockFreeStack.of1(self) - super event - event.add_callback_clear_delayed_node @Delayed.peek - end - - def touch - @Future.resolve_with RESOLVED - end - - def delayed_because - @Delayed - end - - end - - class ScheduledPromise < InnerPromise - def intended_time - @IntendedTime - end - - def inspect - "#{to_s[0..-2]} intended_time: #{@IntendedTime}>" - end - - private - - def initialize(default_executor, intended_time) - super Event.new(self, default_executor) - - @IntendedTime = intended_time - - in_seconds = begin - now = Time.now - schedule_time = if @IntendedTime.is_a? Time - @IntendedTime - else - now + @IntendedTime - end - [0, schedule_time.to_f - now.to_f].max - end - - Concurrent.global_timer_set.post(in_seconds) do - @Future.resolve_with RESOLVED - end - end - end - - extend FactoryMethods - - private_constant :AbstractPromise, - :ResolvableEventPromise, - :ResolvableFuturePromise, - :InnerPromise, - :BlockedPromise, - :BlockedTaskPromise, - :ThenPromise, - :RescuePromise, - :ChainPromise, - :ImmediateEventPromise, - :ImmediateFuturePromise, - :AbstractFlatPromise, - :FlatFuturePromise, - :FlatEventPromise, - :RunFuturePromise, - :ZipEventEventPromise, - :ZipFutureEventPromise, - :EventWrapperPromise, - :FutureWrapperPromise, - :ZipFuturesPromise, - :ZipEventsPromise, - :AbstractAnyPromise, - :AnyResolvedFuturePromise, - :AnyFulfilledFuturePromise, - :AnyResolvedEventPromise, - :DelayPromise, - :ScheduledPromise - - - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/re_include.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/re_include.rb deleted file mode 100644 index 600bc6a53568c..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/re_include.rb +++ /dev/null @@ -1,60 +0,0 @@ -module Concurrent - - # Methods form module A included to a module B, which is already included into class C, - # will not be visible in the C class. If this module is extended to B then A's methods - # are correctly made visible to C. - # - # @example - # module A - # def a - # :a - # end - # end - # - # module B1 - # end - # - # class C1 - # include B1 - # end - # - # module B2 - # extend Concurrent::ReInclude - # end - # - # class C2 - # include B2 - # end - # - # B1.send :include, A - # B2.send :include, A - # - # C1.new.respond_to? :a # => false - # C2.new.respond_to? :a # => true - # - # @!visibility private - module ReInclude - # @!visibility private - def included(base) - (@re_include_to_bases ||= []) << [:include, base] - super(base) - end - - # @!visibility private - def extended(base) - (@re_include_to_bases ||= []) << [:extend, base] - super(base) - end - - # @!visibility private - def include(*modules) - result = super(*modules) - modules.reverse.each do |module_being_included| - (@re_include_to_bases ||= []).each do |method, mod| - mod.send method, module_being_included - end - end - result - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/scheduled_task.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/scheduled_task.rb deleted file mode 100644 index 429fc0683c6f2..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/scheduled_task.rb +++ /dev/null @@ -1,331 +0,0 @@ -require 'concurrent/constants' -require 'concurrent/errors' -require 'concurrent/configuration' -require 'concurrent/ivar' -require 'concurrent/collection/copy_on_notify_observer_set' -require 'concurrent/utility/monotonic_time' - -require 'concurrent/options' - -module Concurrent - - # `ScheduledTask` is a close relative of `Concurrent::Future` but with one - # important difference: A `Future` is set to execute as soon as possible - # whereas a `ScheduledTask` is set to execute after a specified delay. This - # implementation is loosely based on Java's - # [ScheduledExecutorService](http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ScheduledExecutorService.html). - # It is a more feature-rich variant of {Concurrent.timer}. - # - # The *intended* schedule time of task execution is set on object construction - # with the `delay` argument. The delay is a numeric (floating point or integer) - # representing a number of seconds in the future. Any other value or a numeric - # equal to or less than zero will result in an exception. The *actual* schedule - # time of task execution is set when the `execute` method is called. - # - # The constructor can also be given zero or more processing options. Currently - # the only supported options are those recognized by the - # [Dereferenceable](Dereferenceable) module. - # - # The final constructor argument is a block representing the task to be performed. - # If no block is given an `ArgumentError` will be raised. - # - # **States** - # - # `ScheduledTask` mixes in the [Obligation](Obligation) module thus giving it - # "future" behavior. This includes the expected lifecycle states. `ScheduledTask` - # has one additional state, however. While the task (block) is being executed the - # state of the object will be `:processing`. This additional state is necessary - # because it has implications for task cancellation. - # - # **Cancellation** - # - # A `:pending` task can be cancelled using the `#cancel` method. A task in any - # other state, including `:processing`, cannot be cancelled. The `#cancel` - # method returns a boolean indicating the success of the cancellation attempt. - # A cancelled `ScheduledTask` cannot be restarted. It is immutable. - # - # **Obligation and Observation** - # - # The result of a `ScheduledTask` can be obtained either synchronously or - # asynchronously. `ScheduledTask` mixes in both the [Obligation](Obligation) - # module and the - # [Observable](http://ruby-doc.org/stdlib-2.0/libdoc/observer/rdoc/Observable.html) - # module from the Ruby standard library. With one exception `ScheduledTask` - # behaves identically to [Future](Observable) with regard to these modules. - # - # @!macro copy_options - # - # @example Basic usage - # - # require 'concurrent/scheduled_task' - # require 'csv' - # require 'open-uri' - # - # class Ticker - # def get_year_end_closing(symbol, year, api_key) - # uri = "https://www.alphavantage.co/query?function=TIME_SERIES_MONTHLY&symbol=#{symbol}&apikey=#{api_key}&datatype=csv" - # data = [] - # csv = URI.parse(uri).read - # if csv.include?('call frequency') - # return :rate_limit_exceeded - # end - # CSV.parse(csv, headers: true) do |row| - # data << row['close'].to_f if row['timestamp'].include?(year.to_s) - # end - # year_end = data.first - # year_end - # rescue => e - # p e - # end - # end - # - # api_key = ENV['ALPHAVANTAGE_KEY'] - # abort(error_message) unless api_key - # - # # Future - # price = Concurrent::Future.execute{ Ticker.new.get_year_end_closing('TWTR', 2013, api_key) } - # price.state #=> :pending - # price.pending? #=> true - # price.value(0) #=> nil (does not block) - # - # sleep(1) # do other stuff - # - # price.value #=> 63.65 (after blocking if necessary) - # price.state #=> :fulfilled - # price.fulfilled? #=> true - # price.value #=> 63.65 - # - # @example Successful task execution - # - # task = Concurrent::ScheduledTask.new(2){ 'What does the fox say?' } - # task.state #=> :unscheduled - # task.execute - # task.state #=> pending - # - # # wait for it... - # sleep(3) - # - # task.unscheduled? #=> false - # task.pending? #=> false - # task.fulfilled? #=> true - # task.rejected? #=> false - # task.value #=> 'What does the fox say?' - # - # @example One line creation and execution - # - # task = Concurrent::ScheduledTask.new(2){ 'What does the fox say?' }.execute - # task.state #=> pending - # - # task = Concurrent::ScheduledTask.execute(2){ 'What do you get when you multiply 6 by 9?' } - # task.state #=> pending - # - # @example Failed task execution - # - # task = Concurrent::ScheduledTask.execute(2){ raise StandardError.new('Call me maybe?') } - # task.pending? #=> true - # - # # wait for it... - # sleep(3) - # - # task.unscheduled? #=> false - # task.pending? #=> false - # task.fulfilled? #=> false - # task.rejected? #=> true - # task.value #=> nil - # task.reason #=> # - # - # @example Task execution with observation - # - # observer = Class.new{ - # def update(time, value, reason) - # puts "The task completed at #{time} with value '#{value}'" - # end - # }.new - # - # task = Concurrent::ScheduledTask.new(2){ 'What does the fox say?' } - # task.add_observer(observer) - # task.execute - # task.pending? #=> true - # - # # wait for it... - # sleep(3) - # - # #>> The task completed at 2013-11-07 12:26:09 -0500 with value 'What does the fox say?' - # - # @!macro monotonic_clock_warning - # - # @see Concurrent.timer - class ScheduledTask < IVar - include Comparable - - # The executor on which to execute the task. - # @!visibility private - attr_reader :executor - - # Schedule a task for execution at a specified future time. - # - # @param [Float] delay the number of seconds to wait for before executing the task - # - # @yield the task to be performed - # - # @!macro executor_and_deref_options - # - # @option opts [object, Array] :args zero or more arguments to be passed the task - # block on execution - # - # @raise [ArgumentError] When no block is given - # @raise [ArgumentError] When given a time that is in the past - def initialize(delay, opts = {}, &task) - raise ArgumentError.new('no block given') unless block_given? - raise ArgumentError.new('seconds must be greater than zero') if delay.to_f < 0.0 - - super(NULL, opts, &nil) - - synchronize do - ns_set_state(:unscheduled) - @parent = opts.fetch(:timer_set, Concurrent.global_timer_set) - @args = get_arguments_from(opts) - @delay = delay.to_f - @task = task - @time = nil - @executor = Options.executor_from_options(opts) || Concurrent.global_io_executor - self.observers = Collection::CopyOnNotifyObserverSet.new - end - end - - # The `delay` value given at instanciation. - # - # @return [Float] the initial delay. - def initial_delay - synchronize { @delay } - end - - # The monotonic time at which the the task is scheduled to be executed. - # - # @return [Float] the schedule time or nil if `unscheduled` - def schedule_time - synchronize { @time } - end - - # Comparator which orders by schedule time. - # - # @!visibility private - def <=>(other) - schedule_time <=> other.schedule_time - end - - # Has the task been cancelled? - # - # @return [Boolean] true if the task is in the given state else false - def cancelled? - synchronize { ns_check_state?(:cancelled) } - end - - # In the task execution in progress? - # - # @return [Boolean] true if the task is in the given state else false - def processing? - synchronize { ns_check_state?(:processing) } - end - - # Cancel this task and prevent it from executing. A task can only be - # cancelled if it is pending or unscheduled. - # - # @return [Boolean] true if successfully cancelled else false - def cancel - if compare_and_set_state(:cancelled, :pending, :unscheduled) - complete(false, nil, CancelledOperationError.new) - # To avoid deadlocks this call must occur outside of #synchronize - # Changing the state above should prevent redundant calls - @parent.send(:remove_task, self) - else - false - end - end - - # Reschedule the task using the original delay and the current time. - # A task can only be reset while it is `:pending`. - # - # @return [Boolean] true if successfully rescheduled else false - def reset - synchronize{ ns_reschedule(@delay) } - end - - # Reschedule the task using the given delay and the current time. - # A task can only be reset while it is `:pending`. - # - # @param [Float] delay the number of seconds to wait for before executing the task - # - # @return [Boolean] true if successfully rescheduled else false - # - # @raise [ArgumentError] When given a time that is in the past - def reschedule(delay) - delay = delay.to_f - raise ArgumentError.new('seconds must be greater than zero') if delay < 0.0 - synchronize{ ns_reschedule(delay) } - end - - # Execute an `:unscheduled` `ScheduledTask`. Immediately sets the state to `:pending` - # and starts counting down toward execution. Does nothing if the `ScheduledTask` is - # in any state other than `:unscheduled`. - # - # @return [ScheduledTask] a reference to `self` - def execute - if compare_and_set_state(:pending, :unscheduled) - synchronize{ ns_schedule(@delay) } - end - self - end - - # Create a new `ScheduledTask` object with the given block, execute it, and return the - # `:pending` object. - # - # @param [Float] delay the number of seconds to wait for before executing the task - # - # @!macro executor_and_deref_options - # - # @return [ScheduledTask] the newly created `ScheduledTask` in the `:pending` state - # - # @raise [ArgumentError] if no block is given - def self.execute(delay, opts = {}, &task) - new(delay, opts, &task).execute - end - - # Execute the task. - # - # @!visibility private - def process_task - safe_execute(@task, @args) - end - - protected :set, :try_set, :fail, :complete - - protected - - # Schedule the task using the given delay and the current time. - # - # @param [Float] delay the number of seconds to wait for before executing the task - # - # @return [Boolean] true if successfully rescheduled else false - # - # @!visibility private - def ns_schedule(delay) - @delay = delay - @time = Concurrent.monotonic_time + @delay - @parent.send(:post_task, self) - end - - # Reschedule the task using the given delay and the current time. - # A task can only be reset while it is `:pending`. - # - # @param [Float] delay the number of seconds to wait for before executing the task - # - # @return [Boolean] true if successfully rescheduled else false - # - # @!visibility private - def ns_reschedule(delay) - return false unless ns_check_state?(:pending) - @parent.send(:remove_task, self) && ns_schedule(delay) - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/set.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/set.rb deleted file mode 100644 index eee4effdfd2d6..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/set.rb +++ /dev/null @@ -1,64 +0,0 @@ -require 'concurrent/utility/engine' -require 'concurrent/thread_safe/util' -require 'set' - -module Concurrent - - # @!macro concurrent_set - # - # A thread-safe subclass of Set. This version locks against the object - # itself for every method call, ensuring only one thread can be reading - # or writing at a time. This includes iteration methods like `#each`. - # - # @note `a += b` is **not** a **thread-safe** operation on - # `Concurrent::Set`. It reads Set `a`, then it creates new `Concurrent::Set` - # which is union of `a` and `b`, then it writes the union to `a`. - # The read and write are independent operations they do not form a single atomic - # operation therefore when two `+=` operations are executed concurrently updates - # may be lost. Use `#merge` instead. - # - # @see http://ruby-doc.org/stdlib-2.4.0/libdoc/set/rdoc/Set.html Ruby standard library `Set` - - # @!macro internal_implementation_note - SetImplementation = case - when Concurrent.on_cruby? - # The CRuby implementation of Set is written in Ruby itself and is - # not thread safe for certain methods. - require 'monitor' - require 'concurrent/thread_safe/util/data_structures' - - class CRubySet < ::Set - end - - ThreadSafe::Util.make_synchronized_on_cruby CRubySet - CRubySet - - when Concurrent.on_jruby? - require 'jruby/synchronized' - - class JRubySet < ::Set - include JRuby::Synchronized - end - - JRubySet - - when Concurrent.on_truffleruby? - require 'concurrent/thread_safe/util/data_structures' - - class TruffleRubySet < ::Set - end - - ThreadSafe::Util.make_synchronized_on_truffleruby TruffleRubySet - TruffleRubySet - - else - warn 'Possibly unsupported Ruby implementation' - ::Set - end - private_constant :SetImplementation - - # @!macro concurrent_set - class Set < SetImplementation - end -end - diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/settable_struct.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/settable_struct.rb deleted file mode 100644 index 99b85619fd370..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/settable_struct.rb +++ /dev/null @@ -1,139 +0,0 @@ -require 'concurrent/errors' -require 'concurrent/synchronization/abstract_struct' -require 'concurrent/synchronization/lockable_object' - -module Concurrent - - # An thread-safe, write-once variation of Ruby's standard `Struct`. - # Each member can have its value set at most once, either at construction - # or any time thereafter. Attempting to assign a value to a member - # that has already been set will result in a `Concurrent::ImmutabilityError`. - # - # @see http://ruby-doc.org/core/Struct.html Ruby standard library `Struct` - # @see http://en.wikipedia.org/wiki/Final_(Java) Java `final` keyword - module SettableStruct - include Synchronization::AbstractStruct - - # @!macro struct_values - def values - synchronize { ns_values } - end - alias_method :to_a, :values - - # @!macro struct_values_at - def values_at(*indexes) - synchronize { ns_values_at(indexes) } - end - - # @!macro struct_inspect - def inspect - synchronize { ns_inspect } - end - alias_method :to_s, :inspect - - # @!macro struct_merge - def merge(other, &block) - synchronize { ns_merge(other, &block) } - end - - # @!macro struct_to_h - def to_h - synchronize { ns_to_h } - end - - # @!macro struct_get - def [](member) - synchronize { ns_get(member) } - end - - # @!macro struct_equality - def ==(other) - synchronize { ns_equality(other) } - end - - # @!macro struct_each - def each(&block) - return enum_for(:each) unless block_given? - synchronize { ns_each(&block) } - end - - # @!macro struct_each_pair - def each_pair(&block) - return enum_for(:each_pair) unless block_given? - synchronize { ns_each_pair(&block) } - end - - # @!macro struct_select - def select(&block) - return enum_for(:select) unless block_given? - synchronize { ns_select(&block) } - end - - # @!macro struct_set - # - # @raise [Concurrent::ImmutabilityError] if the given member has already been set - def []=(member, value) - if member.is_a? Integer - length = synchronize { @values.length } - if member >= length - raise IndexError.new("offset #{member} too large for struct(size:#{length})") - end - synchronize do - unless @values[member].nil? - raise Concurrent::ImmutabilityError.new('struct member has already been set') - end - @values[member] = value - end - else - send("#{member}=", value) - end - rescue NoMethodError - raise NameError.new("no member '#{member}' in struct") - end - - private - - # @!visibility private - def initialize_copy(original) - synchronize do - super(original) - ns_initialize_copy - end - end - - # @!macro struct_new - def self.new(*args, &block) - clazz_name = nil - if args.length == 0 - raise ArgumentError.new('wrong number of arguments (0 for 1+)') - elsif args.length > 0 && args.first.is_a?(String) - clazz_name = args.shift - end - FACTORY.define_struct(clazz_name, args, &block) - end - - FACTORY = Class.new(Synchronization::LockableObject) do - def define_struct(name, members, &block) - synchronize do - clazz = Synchronization::AbstractStruct.define_struct_class(SettableStruct, Synchronization::LockableObject, name, members, &block) - members.each_with_index do |member, index| - clazz.send :remove_method, member if clazz.instance_methods.include? member - clazz.send(:define_method, member) do - synchronize { @values[index] } - end - clazz.send(:define_method, "#{member}=") do |value| - synchronize do - unless @values[index].nil? - raise Concurrent::ImmutabilityError.new('struct member has already been set') - end - @values[index] = value - end - end - end - clazz - end - end - end.new - private_constant :FACTORY - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization.rb deleted file mode 100644 index 6d8cf4bd584d4..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization.rb +++ /dev/null @@ -1,13 +0,0 @@ -require 'concurrent/utility/native_extension_loader' # load native parts first - -require 'concurrent/synchronization/object' -require 'concurrent/synchronization/lockable_object' -require 'concurrent/synchronization/condition' -require 'concurrent/synchronization/lock' - -module Concurrent - # @!visibility private - module Synchronization - end -end - diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/abstract_lockable_object.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/abstract_lockable_object.rb deleted file mode 100644 index d9050b312fd45..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/abstract_lockable_object.rb +++ /dev/null @@ -1,102 +0,0 @@ -require 'concurrent/utility/native_extension_loader' # load native parts first -require 'concurrent/utility/monotonic_time' -require 'concurrent/synchronization/object' - -module Concurrent - module Synchronization - - # @!visibility private - class AbstractLockableObject < Synchronization::Object - - protected - - # @!macro synchronization_object_method_synchronize - # - # @yield runs the block synchronized against this object, - # equivalent of java's `synchronize(this) {}` - # @note can by made public in descendants if required by `public :synchronize` - def synchronize - raise NotImplementedError - end - - # @!macro synchronization_object_method_ns_wait_until - # - # Wait until condition is met or timeout passes, - # protects against spurious wake-ups. - # @param [Numeric, nil] timeout in seconds, `nil` means no timeout - # @yield condition to be met - # @yieldreturn [true, false] - # @return [true, false] if condition met - # @note only to be used inside synchronized block - # @note to provide direct access to this method in a descendant add method - # ``` - # def wait_until(timeout = nil, &condition) - # synchronize { ns_wait_until(timeout, &condition) } - # end - # ``` - def ns_wait_until(timeout = nil, &condition) - if timeout - wait_until = Concurrent.monotonic_time + timeout - loop do - now = Concurrent.monotonic_time - condition_result = condition.call - return condition_result if now >= wait_until || condition_result - ns_wait wait_until - now - end - else - ns_wait timeout until condition.call - true - end - end - - # @!macro synchronization_object_method_ns_wait - # - # Wait until another thread calls #signal or #broadcast, - # spurious wake-ups can happen. - # - # @param [Numeric, nil] timeout in seconds, `nil` means no timeout - # @return [self] - # @note only to be used inside synchronized block - # @note to provide direct access to this method in a descendant add method - # ``` - # def wait(timeout = nil) - # synchronize { ns_wait(timeout) } - # end - # ``` - def ns_wait(timeout = nil) - raise NotImplementedError - end - - # @!macro synchronization_object_method_ns_signal - # - # Signal one waiting thread. - # @return [self] - # @note only to be used inside synchronized block - # @note to provide direct access to this method in a descendant add method - # ``` - # def signal - # synchronize { ns_signal } - # end - # ``` - def ns_signal - raise NotImplementedError - end - - # @!macro synchronization_object_method_ns_broadcast - # - # Broadcast to all waiting threads. - # @return [self] - # @note only to be used inside synchronized block - # @note to provide direct access to this method in a descendant add method - # ``` - # def broadcast - # synchronize { ns_broadcast } - # end - # ``` - def ns_broadcast - raise NotImplementedError - end - - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/abstract_object.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/abstract_object.rb deleted file mode 100644 index 7cd2decf99ca5..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/abstract_object.rb +++ /dev/null @@ -1,22 +0,0 @@ -module Concurrent - module Synchronization - - # @!visibility private - # @!macro internal_implementation_note - class AbstractObject - def initialize - # nothing to do - end - - # @!visibility private - # @abstract - def full_memory_barrier - raise NotImplementedError - end - - def self.attr_volatile(*names) - raise NotImplementedError - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/abstract_struct.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/abstract_struct.rb deleted file mode 100644 index 1fe90c1649bf8..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/abstract_struct.rb +++ /dev/null @@ -1,171 +0,0 @@ -module Concurrent - module Synchronization - - # @!visibility private - # @!macro internal_implementation_note - module AbstractStruct - - # @!visibility private - def initialize(*values) - super() - ns_initialize(*values) - end - - # @!macro struct_length - # - # Returns the number of struct members. - # - # @return [Fixnum] the number of struct members - def length - self.class::MEMBERS.length - end - alias_method :size, :length - - # @!macro struct_members - # - # Returns the struct members as an array of symbols. - # - # @return [Array] the struct members as an array of symbols - def members - self.class::MEMBERS.dup - end - - protected - - # @!macro struct_values - # - # @!visibility private - def ns_values - @values.dup - end - - # @!macro struct_values_at - # - # @!visibility private - def ns_values_at(indexes) - @values.values_at(*indexes) - end - - # @!macro struct_to_h - # - # @!visibility private - def ns_to_h - length.times.reduce({}){|memo, i| memo[self.class::MEMBERS[i]] = @values[i]; memo} - end - - # @!macro struct_get - # - # @!visibility private - def ns_get(member) - if member.is_a? Integer - if member >= @values.length - raise IndexError.new("offset #{member} too large for struct(size:#{@values.length})") - end - @values[member] - else - send(member) - end - rescue NoMethodError - raise NameError.new("no member '#{member}' in struct") - end - - # @!macro struct_equality - # - # @!visibility private - def ns_equality(other) - self.class == other.class && self.values == other.values - end - - # @!macro struct_each - # - # @!visibility private - def ns_each - values.each{|value| yield value } - end - - # @!macro struct_each_pair - # - # @!visibility private - def ns_each_pair - @values.length.times do |index| - yield self.class::MEMBERS[index], @values[index] - end - end - - # @!macro struct_select - # - # @!visibility private - def ns_select - values.select{|value| yield value } - end - - # @!macro struct_inspect - # - # @!visibility private - def ns_inspect - struct = pr_underscore(self.class.ancestors[1]) - clazz = ((self.class.to_s =~ /^#" - end - - # @!macro struct_merge - # - # @!visibility private - def ns_merge(other, &block) - self.class.new(*self.to_h.merge(other, &block).values) - end - - # @!visibility private - def ns_initialize_copy - @values = @values.map do |val| - begin - val.clone - rescue TypeError - val - end - end - end - - # @!visibility private - def pr_underscore(clazz) - word = clazz.to_s.dup # dup string to workaround JRuby 9.2.0.0 bug https://github.com/jruby/jruby/issues/5229 - word.gsub!(/::/, '/') - word.gsub!(/([A-Z]+)([A-Z][a-z])/,'\1_\2') - word.gsub!(/([a-z\d])([A-Z])/,'\1_\2') - word.tr!("-", "_") - word.downcase! - word - end - - # @!visibility private - def self.define_struct_class(parent, base, name, members, &block) - clazz = Class.new(base || Object) do - include parent - self.const_set(:MEMBERS, members.collect{|member| member.to_s.to_sym}.freeze) - def ns_initialize(*values) - raise ArgumentError.new('struct size differs') if values.length > length - @values = values.fill(nil, values.length..length-1) - end - end - unless name.nil? - begin - parent.send :remove_const, name if parent.const_defined?(name, false) - parent.const_set(name, clazz) - clazz - rescue NameError - raise NameError.new("identifier #{name} needs to be constant") - end - end - members.each_with_index do |member, index| - clazz.send :remove_method, member if clazz.instance_methods.include? member - clazz.send(:define_method, member) do - @values[index] - end - end - clazz.class_exec(&block) unless block.nil? - clazz.singleton_class.send :alias_method, :[], :new - clazz - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/condition.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/condition.rb deleted file mode 100644 index 5daa68be8ab54..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/condition.rb +++ /dev/null @@ -1,62 +0,0 @@ -require 'concurrent/synchronization/lockable_object' - -module Concurrent - module Synchronization - - # @!visibility private - # TODO (pitr-ch 04-Dec-2016): should be in edge - class Condition < LockableObject - safe_initialization! - - # TODO (pitr 12-Sep-2015): locks two objects, improve - # TODO (pitr 26-Sep-2015): study - # http://grepcode.com/file/repository.grepcode.com/java/root/jdk/openjdk/8-b132/java/util/concurrent/locks/AbstractQueuedSynchronizer.java#AbstractQueuedSynchronizer.Node - - singleton_class.send :alias_method, :private_new, :new - private_class_method :new - - def initialize(lock) - super() - @Lock = lock - end - - def wait(timeout = nil) - @Lock.synchronize { ns_wait(timeout) } - end - - def ns_wait(timeout = nil) - synchronize { super(timeout) } - end - - def wait_until(timeout = nil, &condition) - @Lock.synchronize { ns_wait_until(timeout, &condition) } - end - - def ns_wait_until(timeout = nil, &condition) - synchronize { super(timeout, &condition) } - end - - def signal - @Lock.synchronize { ns_signal } - end - - def ns_signal - synchronize { super } - end - - def broadcast - @Lock.synchronize { ns_broadcast } - end - - def ns_broadcast - synchronize { super } - end - end - - class LockableObject < LockableObjectImplementation - def new_condition - Condition.private_new(self) - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/full_memory_barrier.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/full_memory_barrier.rb deleted file mode 100644 index 139e08d854e01..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/full_memory_barrier.rb +++ /dev/null @@ -1,29 +0,0 @@ -require 'concurrent/utility/native_extension_loader' # load native parts first - -module Concurrent - module Synchronization - case - when Concurrent.on_cruby? - def self.full_memory_barrier - # relying on undocumented behavior of CRuby, GVL acquire has lock which ensures visibility of ivars - # https://github.com/ruby/ruby/blob/ruby_2_2/thread_pthread.c#L204-L211 - end - - when Concurrent.on_jruby? - require 'concurrent/utility/native_extension_loader' - def self.full_memory_barrier - JRubyAttrVolatile.full_memory_barrier - end - - when Concurrent.on_truffleruby? - def self.full_memory_barrier - TruffleRuby.full_memory_barrier - end - - else - warn 'Possibly unsupported Ruby implementation' - def self.full_memory_barrier - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/jruby_lockable_object.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/jruby_lockable_object.rb deleted file mode 100644 index 76930461bdb8c..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/jruby_lockable_object.rb +++ /dev/null @@ -1,15 +0,0 @@ -require 'concurrent/utility/native_extension_loader' # load native parts first - -module Concurrent - module Synchronization - - if Concurrent.on_jruby? - - # @!visibility private - # @!macro internal_implementation_note - class JRubyLockableObject < AbstractLockableObject - - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/lock.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/lock.rb deleted file mode 100644 index f90e0b5f76872..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/lock.rb +++ /dev/null @@ -1,38 +0,0 @@ -require 'concurrent/synchronization/lockable_object' - -module Concurrent - module Synchronization - - # @!visibility private - # TODO (pitr-ch 04-Dec-2016): should be in edge - class Lock < LockableObject - # TODO use JavaReentrantLock on JRuby - - public :synchronize - - def wait(timeout = nil) - synchronize { ns_wait(timeout) } - end - - public :ns_wait - - def wait_until(timeout = nil, &condition) - synchronize { ns_wait_until(timeout, &condition) } - end - - public :ns_wait_until - - def signal - synchronize { ns_signal } - end - - public :ns_signal - - def broadcast - synchronize { ns_broadcast } - end - - public :ns_broadcast - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/lockable_object.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/lockable_object.rb deleted file mode 100644 index 08d2ff66cd824..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/lockable_object.rb +++ /dev/null @@ -1,75 +0,0 @@ -require 'concurrent/utility/engine' -require 'concurrent/synchronization/abstract_lockable_object' -require 'concurrent/synchronization/mutex_lockable_object' -require 'concurrent/synchronization/jruby_lockable_object' - -module Concurrent - module Synchronization - - # @!visibility private - # @!macro internal_implementation_note - LockableObjectImplementation = case - when Concurrent.on_cruby? - MutexLockableObject - when Concurrent.on_jruby? - JRubyLockableObject - when Concurrent.on_truffleruby? - MutexLockableObject - else - warn 'Possibly unsupported Ruby implementation' - MonitorLockableObject - end - private_constant :LockableObjectImplementation - - # Safe synchronization under any Ruby implementation. - # It provides methods like {#synchronize}, {#wait}, {#signal} and {#broadcast}. - # Provides a single layer which can improve its implementation over time without changes needed to - # the classes using it. Use {Synchronization::Object} not this abstract class. - # - # @note this object does not support usage together with - # [`Thread#wakeup`](http://ruby-doc.org/core/Thread.html#method-i-wakeup) - # and [`Thread#raise`](http://ruby-doc.org/core/Thread.html#method-i-raise). - # `Thread#sleep` and `Thread#wakeup` will work as expected but mixing `Synchronization::Object#wait` and - # `Thread#wakeup` will not work on all platforms. - # - # @see Event implementation as an example of this class use - # - # @example simple - # class AnClass < Synchronization::Object - # def initialize - # super - # synchronize { @value = 'asd' } - # end - # - # def value - # synchronize { @value } - # end - # end - # - # @!visibility private - class LockableObject < LockableObjectImplementation - - # TODO (pitr 12-Sep-2015): make private for c-r, prohibit subclassing - # TODO (pitr 12-Sep-2015): we inherit too much ourselves :/ - - # @!method initialize(*args, &block) - # @!macro synchronization_object_method_initialize - - # @!method synchronize - # @!macro synchronization_object_method_synchronize - - # @!method wait_until(timeout = nil, &condition) - # @!macro synchronization_object_method_ns_wait_until - - # @!method wait(timeout = nil) - # @!macro synchronization_object_method_ns_wait - - # @!method signal - # @!macro synchronization_object_method_ns_signal - - # @!method broadcast - # @!macro synchronization_object_method_ns_broadcast - - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/mutex_lockable_object.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/mutex_lockable_object.rb deleted file mode 100644 index acc9745a2e292..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/mutex_lockable_object.rb +++ /dev/null @@ -1,89 +0,0 @@ -require 'concurrent/synchronization/abstract_lockable_object' - -module Concurrent - module Synchronization - - # @!visibility private - # @!macro internal_implementation_note - module ConditionSignalling - protected - - def ns_signal - @__Condition__.signal - self - end - - def ns_broadcast - @__Condition__.broadcast - self - end - end - - - # @!visibility private - # @!macro internal_implementation_note - class MutexLockableObject < AbstractLockableObject - include ConditionSignalling - - safe_initialization! - - def initialize - super() - @__Lock__ = ::Mutex.new - @__Condition__ = ::ConditionVariable.new - end - - def initialize_copy(other) - super - @__Lock__ = ::Mutex.new - @__Condition__ = ::ConditionVariable.new - end - - protected - - def synchronize - if @__Lock__.owned? - yield - else - @__Lock__.synchronize { yield } - end - end - - def ns_wait(timeout = nil) - @__Condition__.wait @__Lock__, timeout - self - end - end - - # @!visibility private - # @!macro internal_implementation_note - class MonitorLockableObject < AbstractLockableObject - include ConditionSignalling - - safe_initialization! - - def initialize - super() - @__Lock__ = ::Monitor.new - @__Condition__ = @__Lock__.new_cond - end - - def initialize_copy(other) - super - @__Lock__ = ::Monitor.new - @__Condition__ = @__Lock__.new_cond - end - - protected - - def synchronize # TODO may be a problem with lock.synchronize { lock.wait } - @__Lock__.synchronize { yield } - end - - def ns_wait(timeout = nil) - @__Condition__.wait timeout - self - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/object.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/object.rb deleted file mode 100644 index e839c9f18890f..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/object.rb +++ /dev/null @@ -1,151 +0,0 @@ -require 'concurrent/utility/native_extension_loader' # load native parts first - -require 'concurrent/synchronization/safe_initialization' -require 'concurrent/synchronization/volatile' -require 'concurrent/atomic/atomic_reference' - -module Concurrent - module Synchronization - - # Abstract object providing final, volatile, ans CAS extensions to build other concurrent abstractions. - # - final instance variables see {Object.safe_initialization!} - # - volatile instance variables see {Object.attr_volatile} - # - volatile instance variables see {Object.attr_atomic} - # @!visibility private - class Object < AbstractObject - include Volatile - - # TODO make it a module if possible - - # @!method self.attr_volatile(*names) - # Creates methods for reading and writing (as `attr_accessor` does) to a instance variable with - # volatile (Java) semantic. The instance variable should be accessed only through generated methods. - # - # @param [::Array] names of the instance variables to be volatile - # @return [::Array] names of defined method names - - # Has to be called by children. - def initialize - super - __initialize_atomic_fields__ - end - - def self.safe_initialization! - extend SafeInitialization unless safe_initialization? - end - - def self.safe_initialization? - self.singleton_class < SafeInitialization - end - - # For testing purposes, quite slow. Injects assert code to new method which will raise if class instance contains - # any instance variables with CamelCase names and isn't {.safe_initialization?}. - # @raise when offend found - # @return [true] - def self.ensure_safe_initialization_when_final_fields_are_present - Object.class_eval do - def self.new(*args, &block) - object = super(*args, &block) - ensure - has_final_field = object.instance_variables.any? { |v| v.to_s =~ /^@[A-Z]/ } - if has_final_field && !safe_initialization? - raise "there was an instance of #{object.class} with final field but not marked with safe_initialization!" - end - end - end - true - end - - # Creates methods for reading and writing to a instance variable with - # volatile (Java) semantic as {.attr_volatile} does. - # The instance variable should be accessed oly through generated methods. - # This method generates following methods: `value`, `value=(new_value) #=> new_value`, - # `swap_value(new_value) #=> old_value`, - # `compare_and_set_value(expected, value) #=> true || false`, `update_value(&block)`. - # @param [::Array] names of the instance variables to be volatile with CAS. - # @return [::Array] names of defined method names. - # @!macro attr_atomic - # @!method $1 - # @return [Object] The $1. - # @!method $1=(new_$1) - # Set the $1. - # @return [Object] new_$1. - # @!method swap_$1(new_$1) - # Set the $1 to new_$1 and return the old $1. - # @return [Object] old $1 - # @!method compare_and_set_$1(expected_$1, new_$1) - # Sets the $1 to new_$1 if the current $1 is expected_$1 - # @return [true, false] - # @!method update_$1(&block) - # Updates the $1 using the block. - # @yield [Object] Calculate a new $1 using given (old) $1 - # @yieldparam [Object] old $1 - # @return [Object] new $1 - def self.attr_atomic(*names) - @__atomic_fields__ ||= [] - @__atomic_fields__ += names - safe_initialization! - define_initialize_atomic_fields - - names.each do |name| - ivar = :"@Atomic#{name.to_s.gsub(/(?:^|_)(.)/) { $1.upcase }}" - class_eval <<-RUBY, __FILE__, __LINE__ + 1 - def #{name} - #{ivar}.get - end - - def #{name}=(value) - #{ivar}.set value - end - - def swap_#{name}(value) - #{ivar}.swap value - end - - def compare_and_set_#{name}(expected, value) - #{ivar}.compare_and_set expected, value - end - - def update_#{name}(&block) - #{ivar}.update(&block) - end - RUBY - end - names.flat_map { |n| [n, :"#{n}=", :"swap_#{n}", :"compare_and_set_#{n}", :"update_#{n}"] } - end - - # @param [true, false] inherited should inherited volatile with CAS fields be returned? - # @return [::Array] Returns defined volatile with CAS fields on this class. - def self.atomic_attributes(inherited = true) - @__atomic_fields__ ||= [] - ((superclass.atomic_attributes if superclass.respond_to?(:atomic_attributes) && inherited) || []) + @__atomic_fields__ - end - - # @return [true, false] is the attribute with name atomic? - def self.atomic_attribute?(name) - atomic_attributes.include? name - end - - private - - def self.define_initialize_atomic_fields - assignments = @__atomic_fields__.map do |name| - "@Atomic#{name.to_s.gsub(/(?:^|_)(.)/) { $1.upcase }} = Concurrent::AtomicReference.new(nil)" - end.join("\n") - - class_eval <<-RUBY, __FILE__, __LINE__ + 1 - def __initialize_atomic_fields__ - super - #{assignments} - end - RUBY - end - - private_class_method :define_initialize_atomic_fields - - def __initialize_atomic_fields__ - end - - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/safe_initialization.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/safe_initialization.rb deleted file mode 100644 index f785e35229fa3..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/safe_initialization.rb +++ /dev/null @@ -1,36 +0,0 @@ -require 'concurrent/synchronization/full_memory_barrier' - -module Concurrent - module Synchronization - - # @!visibility private - # @!macro internal_implementation_note - # - # By extending this module, a class and all its children are marked to be constructed safely. Meaning that - # all writes (ivar initializations) are made visible to all readers of newly constructed object. It ensures - # same behaviour as Java's final fields. - # - # Due to using Kernel#extend, the module is not included again if already present in the ancestors, - # which avoids extra overhead. - # - # @example - # class AClass < Concurrent::Synchronization::Object - # extend Concurrent::Synchronization::SafeInitialization - # - # def initialize - # @AFinalValue = 'value' # published safely, #foo will never return nil - # end - # - # def foo - # @AFinalValue - # end - # end - module SafeInitialization - def new(*args, &block) - super(*args, &block) - ensure - Concurrent::Synchronization.full_memory_barrier - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/volatile.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/volatile.rb deleted file mode 100644 index 46e8ba6a48ca1..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/synchronization/volatile.rb +++ /dev/null @@ -1,101 +0,0 @@ -require 'concurrent/utility/native_extension_loader' # load native parts first -require 'concurrent/utility/engine' -require 'concurrent/synchronization/full_memory_barrier' - -module Concurrent - module Synchronization - - # Volatile adds the attr_volatile class method when included. - # - # @example - # class Foo - # include Concurrent::Synchronization::Volatile - # - # attr_volatile :bar - # - # def initialize - # self.bar = 1 - # end - # end - # - # foo = Foo.new - # foo.bar - # => 1 - # foo.bar = 2 - # => 2 - # - # @!visibility private - module Volatile - def self.included(base) - base.extend(ClassMethods) - end - - def full_memory_barrier - Synchronization.full_memory_barrier - end - - module ClassMethods - if Concurrent.on_cruby? - def attr_volatile(*names) - names.each do |name| - ivar = :"@volatile_#{name}" - class_eval <<-RUBY, __FILE__, __LINE__ + 1 - def #{name} - #{ivar} - end - - def #{name}=(value) - #{ivar} = value - end - RUBY - end - names.map { |n| [n, :"#{n}="] }.flatten - end - - elsif Concurrent.on_jruby? - def attr_volatile(*names) - names.each do |name| - ivar = :"@volatile_#{name}" - - class_eval <<-RUBY, __FILE__, __LINE__ + 1 - def #{name} - ::Concurrent::Synchronization::JRubyAttrVolatile.instance_variable_get_volatile(self, :#{ivar}) - end - - def #{name}=(value) - ::Concurrent::Synchronization::JRubyAttrVolatile.instance_variable_set_volatile(self, :#{ivar}, value) - end - RUBY - - end - names.map { |n| [n, :"#{n}="] }.flatten - end - - else - warn 'Possibly unsupported Ruby implementation' unless Concurrent.on_truffleruby? - - def attr_volatile(*names) - names.each do |name| - ivar = :"@volatile_#{name}" - - class_eval <<-RUBY, __FILE__, __LINE__ + 1 - def #{name} - ::Concurrent::Synchronization.full_memory_barrier - #{ivar} - end - - def #{name}=(value) - #{ivar} = value - ::Concurrent::Synchronization.full_memory_barrier - end - RUBY - end - - names.map { |n| [n, :"#{n}="] }.flatten - end - end - end - - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/synchronized_delegator.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/synchronized_delegator.rb deleted file mode 100644 index 019d84382d609..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/synchronized_delegator.rb +++ /dev/null @@ -1,47 +0,0 @@ -require 'delegate' -require 'monitor' - -module Concurrent - # This class provides a trivial way to synchronize all calls to a given object - # by wrapping it with a `Delegator` that performs `Monitor#enter/exit` calls - # around the delegated `#send`. Example: - # - # array = [] # not thread-safe on many impls - # array = SynchronizedDelegator.new([]) # thread-safe - # - # A simple `Monitor` provides a very coarse-grained way to synchronize a given - # object, in that it will cause synchronization for methods that have no need - # for it, but this is a trivial way to get thread-safety where none may exist - # currently on some implementations. - # - # This class is currently being considered for inclusion into stdlib, via - # https://bugs.ruby-lang.org/issues/8556 - # - # @!visibility private - class SynchronizedDelegator < SimpleDelegator - def setup - @old_abort = Thread.abort_on_exception - Thread.abort_on_exception = true - end - - def teardown - Thread.abort_on_exception = @old_abort - end - - def initialize(obj) - __setobj__(obj) - @monitor = Monitor.new - end - - def method_missing(method, *args, &block) - monitor = @monitor - begin - monitor.enter - super - ensure - monitor.exit - end - end - - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util.rb deleted file mode 100644 index c67084a26fa3a..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util.rb +++ /dev/null @@ -1,16 +0,0 @@ -module Concurrent - - # @!visibility private - module ThreadSafe - - # @!visibility private - module Util - - # TODO (pitr-ch 15-Oct-2016): migrate to Utility::NativeInteger - FIXNUM_BIT_SIZE = (0.size * 8) - 2 - MAX_INT = (2 ** FIXNUM_BIT_SIZE) - 1 - # TODO (pitr-ch 15-Oct-2016): migrate to Utility::ProcessorCounter - CPU_COUNT = 16 # is there a way to determine this? - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/adder.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/adder.rb deleted file mode 100644 index 7a6e8d5c0e132..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/adder.rb +++ /dev/null @@ -1,74 +0,0 @@ -require 'concurrent/thread_safe/util' -require 'concurrent/thread_safe/util/striped64' - -module Concurrent - - # @!visibility private - module ThreadSafe - - # @!visibility private - module Util - - # A Ruby port of the Doug Lea's jsr166e.LondAdder class version 1.8 - # available in public domain. - # - # Original source code available here: - # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/LongAdder.java?revision=1.8 - # - # One or more variables that together maintain an initially zero - # sum. When updates (method +add+) are contended across threads, - # the set of variables may grow dynamically to reduce contention. - # Method +sum+ returns the current total combined across the - # variables maintaining the sum. - # - # This class is usually preferable to single +Atomic+ reference when - # multiple threads update a common sum that is used for purposes such - # as collecting statistics, not for fine-grained synchronization - # control. Under low update contention, the two classes have similar - # characteristics. But under high contention, expected throughput of - # this class is significantly higher, at the expense of higher space - # consumption. - # - # @!visibility private - class Adder < Striped64 - # Adds the given value. - def add(x) - if (current_cells = cells) || !cas_base_computed {|current_base| current_base + x} - was_uncontended = true - hash = hash_code - unless current_cells && (cell = current_cells.volatile_get_by_hash(hash)) && (was_uncontended = cell.cas_computed {|current_value| current_value + x}) - retry_update(x, hash, was_uncontended) {|current_value| current_value + x} - end - end - end - - def increment - add(1) - end - - def decrement - add(-1) - end - - # Returns the current sum. The returned value is _NOT_ an - # atomic snapshot: Invocation in the absence of concurrent - # updates returns an accurate result, but concurrent updates that - # occur while the sum is being calculated might not be - # incorporated. - def sum - x = base - if current_cells = cells - current_cells.each do |cell| - x += cell.value if cell - end - end - x - end - - def reset - internal_reset(0) - end - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/cheap_lockable.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/cheap_lockable.rb deleted file mode 100644 index a07678df2e6d3..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/cheap_lockable.rb +++ /dev/null @@ -1,81 +0,0 @@ -require 'concurrent/thread_safe/util' -require 'concurrent/thread_safe/util/volatile' -require 'concurrent/utility/engine' - -module Concurrent - - # @!visibility private - module ThreadSafe - - # @!visibility private - module Util - - # Provides a cheapest possible (mainly in terms of memory usage) +Mutex+ - # with the +ConditionVariable+ bundled in. - # - # Usage: - # class A - # include CheapLockable - # - # def do_exlusively - # cheap_synchronize { yield } - # end - # - # def wait_for_something - # cheap_synchronize do - # cheap_wait until resource_available? - # do_something - # cheap_broadcast # wake up others - # end - # end - # end - # - # @!visibility private - module CheapLockable - private - if Concurrent.on_jruby? - # Use Java's native synchronized (this) { wait(); notifyAll(); } to avoid the overhead of the extra Mutex objects - require 'jruby' - - def cheap_synchronize - JRuby.reference0(self).synchronized { yield } - end - - def cheap_wait - JRuby.reference0(self).wait - end - - def cheap_broadcast - JRuby.reference0(self).notify_all - end - else - require 'thread' - - extend Volatile - attr_volatile :mutex - - # Non-reentrant Mutex#syncrhonize - def cheap_synchronize - true until (my_mutex = mutex) || cas_mutex(nil, my_mutex = Mutex.new) - my_mutex.synchronize { yield } - end - - # Releases this object's +cheap_synchronize+ lock and goes to sleep waiting for other threads to +cheap_broadcast+, reacquires the lock on wakeup. - # Must only be called in +cheap_broadcast+'s block. - def cheap_wait - conditional_variable = @conditional_variable ||= ConditionVariable.new - conditional_variable.wait(mutex) - end - - # Wakes up all threads waiting for this object's +cheap_synchronize+ lock. - # Must only be called in +cheap_broadcast+'s block. - def cheap_broadcast - if conditional_variable = @conditional_variable - conditional_variable.broadcast - end - end - end - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/data_structures.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/data_structures.rb deleted file mode 100644 index 01eb98f4aa603..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/data_structures.rb +++ /dev/null @@ -1,52 +0,0 @@ -require 'concurrent/thread_safe/util' -require 'concurrent/utility/engine' - -# Shim for TruffleRuby.synchronized -if Concurrent.on_truffleruby? && !TruffleRuby.respond_to?(:synchronized) - module TruffleRuby - def self.synchronized(object, &block) - Truffle::System.synchronized(object, &block) - end - end -end - -module Concurrent - module ThreadSafe - module Util - def self.make_synchronized_on_cruby(klass) - klass.class_eval do - def initialize(*args, &block) - @_monitor = Monitor.new - super - end - - def initialize_copy(other) - # make sure a copy is not sharing a monitor with the original object! - @_monitor = Monitor.new - super - end - end - - klass.superclass.instance_methods(false).each do |method| - klass.class_eval <<-RUBY, __FILE__, __LINE__ + 1 - def #{method}(*args) - monitor = @_monitor - monitor or raise("BUG: Internal monitor was not properly initialized. Please report this to the concurrent-ruby developers.") - monitor.synchronize { super } - end - RUBY - end - end - - def self.make_synchronized_on_truffleruby(klass) - klass.superclass.instance_methods(false).each do |method| - klass.class_eval <<-RUBY, __FILE__, __LINE__ + 1 - def #{method}(*args, &block) - TruffleRuby.synchronized(self) { super(*args, &block) } - end - RUBY - end - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/power_of_two_tuple.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/power_of_two_tuple.rb deleted file mode 100644 index b54be39c4cc84..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/power_of_two_tuple.rb +++ /dev/null @@ -1,38 +0,0 @@ -require 'concurrent/thread_safe/util' -require 'concurrent/tuple' - -module Concurrent - - # @!visibility private - module ThreadSafe - - # @!visibility private - module Util - - # @!visibility private - class PowerOfTwoTuple < Concurrent::Tuple - - def initialize(size) - raise ArgumentError, "size must be a power of 2 (#{size.inspect} provided)" unless size > 0 && size & (size - 1) == 0 - super(size) - end - - def hash_to_index(hash) - (size - 1) & hash - end - - def volatile_get_by_hash(hash) - volatile_get(hash_to_index(hash)) - end - - def volatile_set_by_hash(hash, value) - volatile_set(hash_to_index(hash), value) - end - - def next_in_size_table - self.class.new(size << 1) - end - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/striped64.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/striped64.rb deleted file mode 100644 index 4169c3d36607b..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/striped64.rb +++ /dev/null @@ -1,246 +0,0 @@ -require 'concurrent/thread_safe/util' -require 'concurrent/thread_safe/util/power_of_two_tuple' -require 'concurrent/thread_safe/util/volatile' -require 'concurrent/thread_safe/util/xor_shift_random' - -module Concurrent - - # @!visibility private - module ThreadSafe - - # @!visibility private - module Util - - # A Ruby port of the Doug Lea's jsr166e.Striped64 class version 1.6 - # available in public domain. - # - # Original source code available here: - # http://gee.cs.oswego.edu/cgi-bin/viewcvs.cgi/jsr166/src/jsr166e/Striped64.java?revision=1.6 - # - # Class holding common representation and mechanics for classes supporting - # dynamic striping on 64bit values. - # - # This class maintains a lazily-initialized table of atomically updated - # variables, plus an extra +base+ field. The table size is a power of two. - # Indexing uses masked per-thread hash codes. Nearly all methods on this - # class are private, accessed directly by subclasses. - # - # Table entries are of class +Cell+; a variant of AtomicLong padded to - # reduce cache contention on most processors. Padding is overkill for most - # Atomics because they are usually irregularly scattered in memory and thus - # don't interfere much with each other. But Atomic objects residing in - # arrays will tend to be placed adjacent to each other, and so will most - # often share cache lines (with a huge negative performance impact) without - # this precaution. - # - # In part because +Cell+s are relatively large, we avoid creating them until - # they are needed. When there is no contention, all updates are made to the - # +base+ field. Upon first contention (a failed CAS on +base+ update), the - # table is initialized to size 2. The table size is doubled upon further - # contention until reaching the nearest power of two greater than or equal - # to the number of CPUS. Table slots remain empty (+nil+) until they are - # needed. - # - # A single spinlock (+busy+) is used for initializing and resizing the - # table, as well as populating slots with new +Cell+s. There is no need for - # a blocking lock: When the lock is not available, threads try other slots - # (or the base). During these retries, there is increased contention and - # reduced locality, which is still better than alternatives. - # - # Per-thread hash codes are initialized to random values. Contention and/or - # table collisions are indicated by failed CASes when performing an update - # operation (see method +retry_update+). Upon a collision, if the table size - # is less than the capacity, it is doubled in size unless some other thread - # holds the lock. If a hashed slot is empty, and lock is available, a new - # +Cell+ is created. Otherwise, if the slot exists, a CAS is tried. Retries - # proceed by "double hashing", using a secondary hash (XorShift) to try to - # find a free slot. - # - # The table size is capped because, when there are more threads than CPUs, - # supposing that each thread were bound to a CPU, there would exist a - # perfect hash function mapping threads to slots that eliminates collisions. - # When we reach capacity, we search for this mapping by randomly varying the - # hash codes of colliding threads. Because search is random, and collisions - # only become known via CAS failures, convergence can be slow, and because - # threads are typically not bound to CPUS forever, may not occur at all. - # However, despite these limitations, observed contention rates are - # typically low in these cases. - # - # It is possible for a +Cell+ to become unused when threads that once hashed - # to it terminate, as well as in the case where doubling the table causes no - # thread to hash to it under expanded mask. We do not try to detect or - # remove such cells, under the assumption that for long-running instances, - # observed contention levels will recur, so the cells will eventually be - # needed again; and for short-lived ones, it does not matter. - # - # @!visibility private - class Striped64 - - # Padded variant of AtomicLong supporting only raw accesses plus CAS. - # The +value+ field is placed between pads, hoping that the JVM doesn't - # reorder them. - # - # Optimisation note: It would be possible to use a release-only - # form of CAS here, if it were provided. - # - # @!visibility private - class Cell < Concurrent::AtomicReference - - alias_method :cas, :compare_and_set - - def cas_computed - cas(current_value = value, yield(current_value)) - end - - # @!visibility private - def self.padding - # TODO: this only adds padding after the :value slot, need to find a way to add padding before the slot - # TODO (pitr-ch 28-Jul-2018): the padding instance vars may not be created - # hide from yardoc in a method - attr_reader :padding_0, :padding_1, :padding_2, :padding_3, :padding_4, :padding_5, :padding_6, :padding_7, :padding_8, :padding_9, :padding_10, :padding_11 - end - padding - end - - extend Volatile - attr_volatile :cells, # Table of cells. When non-null, size is a power of 2. - :base, # Base value, used mainly when there is no contention, but also as a fallback during table initialization races. Updated via CAS. - :busy # Spinlock (locked via CAS) used when resizing and/or creating Cells. - - alias_method :busy?, :busy - - def initialize - super() - self.busy = false - self.base = 0 - end - - # Handles cases of updates involving initialization, resizing, - # creating new Cells, and/or contention. See above for - # explanation. This method suffers the usual non-modularity - # problems of optimistic retry code, relying on rechecked sets of - # reads. - # - # Arguments: - # [+x+] - # the value - # [+hash_code+] - # hash code used - # [+x+] - # false if CAS failed before call - def retry_update(x, hash_code, was_uncontended) # :yields: current_value - hash = hash_code - collided = false # True if last slot nonempty - while true - if current_cells = cells - if !(cell = current_cells.volatile_get_by_hash(hash)) - if busy? - collided = false - else # Try to attach new Cell - if try_to_install_new_cell(Cell.new(x), hash) # Optimistically create and try to insert new cell - break - else - redo # Slot is now non-empty - end - end - elsif !was_uncontended # CAS already known to fail - was_uncontended = true # Continue after rehash - elsif cell.cas_computed {|current_value| yield current_value} - break - elsif current_cells.size >= CPU_COUNT || cells != current_cells # At max size or stale - collided = false - elsif collided && expand_table_unless_stale(current_cells) - collided = false - redo # Retry with expanded table - else - collided = true - end - hash = XorShiftRandom.xorshift(hash) - - elsif try_initialize_cells(x, hash) || cas_base_computed {|current_base| yield current_base} - break - end - end - self.hash_code = hash - end - - private - # Static per-thread hash code key. Shared across all instances to - # reduce Thread locals pollution and because adjustments due to - # collisions in one table are likely to be appropriate for - # others. - THREAD_LOCAL_KEY = "#{name}.hash_code".to_sym - - # A thread-local hash code accessor. The code is initially - # random, but may be set to a different value upon collisions. - def hash_code - Thread.current[THREAD_LOCAL_KEY] ||= XorShiftRandom.get - end - - def hash_code=(hash) - Thread.current[THREAD_LOCAL_KEY] = hash - end - - # Sets base and all +cells+ to the given value. - def internal_reset(initial_value) - current_cells = cells - self.base = initial_value - if current_cells - current_cells.each do |cell| - cell.value = initial_value if cell - end - end - end - - def cas_base_computed - cas_base(current_base = base, yield(current_base)) - end - - def free? - !busy? - end - - def try_initialize_cells(x, hash) - if free? && !cells - try_in_busy do - unless cells # Recheck under lock - new_cells = PowerOfTwoTuple.new(2) - new_cells.volatile_set_by_hash(hash, Cell.new(x)) - self.cells = new_cells - end - end - end - end - - def expand_table_unless_stale(current_cells) - try_in_busy do - if current_cells == cells # Recheck under lock - new_cells = current_cells.next_in_size_table - current_cells.each_with_index {|x, i| new_cells.volatile_set(i, x)} - self.cells = new_cells - end - end - end - - def try_to_install_new_cell(new_cell, hash) - try_in_busy do - # Recheck under lock - if (current_cells = cells) && !current_cells.volatile_get(i = current_cells.hash_to_index(hash)) - current_cells.volatile_set(i, new_cell) - end - end - end - - def try_in_busy - if cas_busy(false, true) - begin - yield - ensure - self.busy = false - end - end - end - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/volatile.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/volatile.rb deleted file mode 100644 index cdac2a396a791..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/volatile.rb +++ /dev/null @@ -1,75 +0,0 @@ -require 'concurrent/thread_safe/util' - -module Concurrent - - # @!visibility private - module ThreadSafe - - # @!visibility private - module Util - - # @!visibility private - module Volatile - - # Provides +volatile+ (in the JVM's sense) attribute accessors implemented - # atop of +Concurrent::AtomicReference+. - # - # Usage: - # class Foo - # extend Concurrent::ThreadSafe::Util::Volatile - # attr_volatile :foo, :bar - # - # def initialize(bar) - # super() # must super() into parent initializers before using the volatile attribute accessors - # self.bar = bar - # end - # - # def hello - # my_foo = foo # volatile read - # self.foo = 1 # volatile write - # cas_foo(1, 2) # => true | a strong CAS - # end - # end - def attr_volatile(*attr_names) - return if attr_names.empty? - include(Module.new do - atomic_ref_setup = attr_names.map {|attr_name| "@__#{attr_name} = Concurrent::AtomicReference.new"} - initialize_copy_setup = attr_names.zip(atomic_ref_setup).map do |attr_name, ref_setup| - "#{ref_setup}(other.instance_variable_get(:@__#{attr_name}).get)" - end - class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 - def initialize(*) - super - #{atomic_ref_setup.join('; ')} - end - - def initialize_copy(other) - super - #{initialize_copy_setup.join('; ')} - end - RUBY_EVAL - - attr_names.each do |attr_name| - class_eval <<-RUBY_EVAL, __FILE__, __LINE__ + 1 - def #{attr_name} - @__#{attr_name}.get - end - - def #{attr_name}=(value) - @__#{attr_name}.set(value) - end - - def compare_and_set_#{attr_name}(old_value, new_value) - @__#{attr_name}.compare_and_set(old_value, new_value) - end - RUBY_EVAL - - alias_method :"cas_#{attr_name}", :"compare_and_set_#{attr_name}" - alias_method :"lazy_set_#{attr_name}", :"#{attr_name}=" - end - end) - end - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/xor_shift_random.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/xor_shift_random.rb deleted file mode 100644 index bdde2dd8b363f..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/thread_safe/util/xor_shift_random.rb +++ /dev/null @@ -1,50 +0,0 @@ -require 'concurrent/thread_safe/util' - -module Concurrent - - # @!visibility private - module ThreadSafe - - # @!visibility private - module Util - - # A xorshift random number (positive +Fixnum+s) generator, provides - # reasonably cheap way to generate thread local random numbers without - # contending for the global +Kernel.rand+. - # - # Usage: - # x = XorShiftRandom.get # uses Kernel.rand to generate an initial seed - # while true - # if (x = XorShiftRandom.xorshift).odd? # thread-localy generate a next random number - # do_something_at_random - # end - # end - module XorShiftRandom - extend self - MAX_XOR_SHIFTABLE_INT = MAX_INT - 1 - - # Generates an initial non-zero positive +Fixnum+ via +Kernel.rand+. - def get - Kernel.rand(MAX_XOR_SHIFTABLE_INT) + 1 # 0 can't be xorshifted - end - - # xorshift based on: http://www.jstatsoft.org/v08/i14/paper - if 0.size == 4 - # using the "yˆ=y>>a; yˆ=y<>c;" transform with the (a,b,c) tuple with values (3,1,14) to minimise Bignum overflows - def xorshift(x) - x ^= x >> 3 - x ^= (x << 1) & MAX_INT # cut-off Bignum overflow - x ^= x >> 14 - end - else - # using the "yˆ=y>>a; yˆ=y<>c;" transform with the (a,b,c) tuple with values (1,1,54) to minimise Bignum overflows - def xorshift(x) - x ^= x >> 1 - x ^= (x << 1) & MAX_INT # cut-off Bignum overflow - x ^= x >> 54 - end - end - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/timer_task.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/timer_task.rb deleted file mode 100644 index b69cfc8d8abcf..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/timer_task.rb +++ /dev/null @@ -1,311 +0,0 @@ -require 'concurrent/collection/copy_on_notify_observer_set' -require 'concurrent/concern/dereferenceable' -require 'concurrent/concern/observable' -require 'concurrent/atomic/atomic_boolean' -require 'concurrent/executor/executor_service' -require 'concurrent/executor/ruby_executor_service' -require 'concurrent/executor/safe_task_executor' -require 'concurrent/scheduled_task' - -module Concurrent - - # A very common concurrency pattern is to run a thread that performs a task at - # regular intervals. The thread that performs the task sleeps for the given - # interval then wakes up and performs the task. Lather, rinse, repeat... This - # pattern causes two problems. First, it is difficult to test the business - # logic of the task because the task itself is tightly coupled with the - # concurrency logic. Second, an exception raised while performing the task can - # cause the entire thread to abend. In a long-running application where the - # task thread is intended to run for days/weeks/years a crashed task thread - # can pose a significant problem. `TimerTask` alleviates both problems. - # - # When a `TimerTask` is launched it starts a thread for monitoring the - # execution interval. The `TimerTask` thread does not perform the task, - # however. Instead, the TimerTask launches the task on a separate thread. - # Should the task experience an unrecoverable crash only the task thread will - # crash. This makes the `TimerTask` very fault tolerant. Additionally, the - # `TimerTask` thread can respond to the success or failure of the task, - # performing logging or ancillary operations. - # - # One other advantage of `TimerTask` is that it forces the business logic to - # be completely decoupled from the concurrency logic. The business logic can - # be tested separately then passed to the `TimerTask` for scheduling and - # running. - # - # In some cases it may be necessary for a `TimerTask` to affect its own - # execution cycle. To facilitate this, a reference to the TimerTask instance - # is passed as an argument to the provided block every time the task is - # executed. - # - # The `TimerTask` class includes the `Dereferenceable` mixin module so the - # result of the last execution is always available via the `#value` method. - # Dereferencing options can be passed to the `TimerTask` during construction or - # at any later time using the `#set_deref_options` method. - # - # `TimerTask` supports notification through the Ruby standard library - # {http://ruby-doc.org/stdlib-2.0/libdoc/observer/rdoc/Observable.html - # Observable} module. On execution the `TimerTask` will notify the observers - # with three arguments: time of execution, the result of the block (or nil on - # failure), and any raised exceptions (or nil on success). - # - # @!macro copy_options - # - # @example Basic usage - # task = Concurrent::TimerTask.new{ puts 'Boom!' } - # task.execute - # - # task.execution_interval #=> 60 (default) - # - # # wait 60 seconds... - # #=> 'Boom!' - # - # task.shutdown #=> true - # - # @example Configuring `:execution_interval` - # task = Concurrent::TimerTask.new(execution_interval: 5) do - # puts 'Boom!' - # end - # - # task.execution_interval #=> 5 - # - # @example Immediate execution with `:run_now` - # task = Concurrent::TimerTask.new(run_now: true){ puts 'Boom!' } - # task.execute - # - # #=> 'Boom!' - # - # @example Last `#value` and `Dereferenceable` mixin - # task = Concurrent::TimerTask.new( - # dup_on_deref: true, - # execution_interval: 5 - # ){ Time.now } - # - # task.execute - # Time.now #=> 2013-11-07 18:06:50 -0500 - # sleep(10) - # task.value #=> 2013-11-07 18:06:55 -0500 - # - # @example Controlling execution from within the block - # timer_task = Concurrent::TimerTask.new(execution_interval: 1) do |task| - # task.execution_interval.times{ print 'Boom! ' } - # print "\n" - # task.execution_interval += 1 - # if task.execution_interval > 5 - # puts 'Stopping...' - # task.shutdown - # end - # end - # - # timer_task.execute # blocking call - this task will stop itself - # #=> Boom! - # #=> Boom! Boom! - # #=> Boom! Boom! Boom! - # #=> Boom! Boom! Boom! Boom! - # #=> Boom! Boom! Boom! Boom! Boom! - # #=> Stopping... - # - # @example Observation - # class TaskObserver - # def update(time, result, ex) - # if result - # print "(#{time}) Execution successfully returned #{result}\n" - # else - # print "(#{time}) Execution failed with error #{ex}\n" - # end - # end - # end - # - # task = Concurrent::TimerTask.new(execution_interval: 1){ 42 } - # task.add_observer(TaskObserver.new) - # task.execute - # sleep 4 - # - # #=> (2013-10-13 19:08:58 -0400) Execution successfully returned 42 - # #=> (2013-10-13 19:08:59 -0400) Execution successfully returned 42 - # #=> (2013-10-13 19:09:00 -0400) Execution successfully returned 42 - # task.shutdown - # - # task = Concurrent::TimerTask.new(execution_interval: 1){ sleep } - # task.add_observer(TaskObserver.new) - # task.execute - # - # #=> (2013-10-13 19:07:25 -0400) Execution timed out - # #=> (2013-10-13 19:07:27 -0400) Execution timed out - # #=> (2013-10-13 19:07:29 -0400) Execution timed out - # task.shutdown - # - # task = Concurrent::TimerTask.new(execution_interval: 1){ raise StandardError } - # task.add_observer(TaskObserver.new) - # task.execute - # - # #=> (2013-10-13 19:09:37 -0400) Execution failed with error StandardError - # #=> (2013-10-13 19:09:38 -0400) Execution failed with error StandardError - # #=> (2013-10-13 19:09:39 -0400) Execution failed with error StandardError - # task.shutdown - # - # @see http://ruby-doc.org/stdlib-2.0/libdoc/observer/rdoc/Observable.html - # @see http://docs.oracle.com/javase/7/docs/api/java/util/TimerTask.html - class TimerTask < RubyExecutorService - include Concern::Dereferenceable - include Concern::Observable - - # Default `:execution_interval` in seconds. - EXECUTION_INTERVAL = 60 - - # Default `:timeout_interval` in seconds. - TIMEOUT_INTERVAL = 30 - - # Create a new TimerTask with the given task and configuration. - # - # @!macro timer_task_initialize - # @param [Hash] opts the options defining task execution. - # @option opts [Integer] :execution_interval number of seconds between - # task executions (default: EXECUTION_INTERVAL) - # @option opts [Boolean] :run_now Whether to run the task immediately - # upon instantiation or to wait until the first # execution_interval - # has passed (default: false) - # - # @!macro deref_options - # - # @raise ArgumentError when no block is given. - # - # @yield to the block after :execution_interval seconds have passed since - # the last yield - # @yieldparam task a reference to the `TimerTask` instance so that the - # block can control its own lifecycle. Necessary since `self` will - # refer to the execution context of the block rather than the running - # `TimerTask`. - # - # @return [TimerTask] the new `TimerTask` - def initialize(opts = {}, &task) - raise ArgumentError.new('no block given') unless block_given? - super - set_deref_options opts - end - - # Is the executor running? - # - # @return [Boolean] `true` when running, `false` when shutting down or shutdown - def running? - @running.true? - end - - # Execute a previously created `TimerTask`. - # - # @return [TimerTask] a reference to `self` - # - # @example Instance and execute in separate steps - # task = Concurrent::TimerTask.new(execution_interval: 10){ print "Hello World\n" } - # task.running? #=> false - # task.execute - # task.running? #=> true - # - # @example Instance and execute in one line - # task = Concurrent::TimerTask.new(execution_interval: 10){ print "Hello World\n" }.execute - # task.running? #=> true - def execute - synchronize do - if @running.false? - @running.make_true - schedule_next_task(@run_now ? 0 : @execution_interval) - end - end - self - end - - # Create and execute a new `TimerTask`. - # - # @!macro timer_task_initialize - # - # @example - # task = Concurrent::TimerTask.execute(execution_interval: 10){ print "Hello World\n" } - # task.running? #=> true - def self.execute(opts = {}, &task) - TimerTask.new(opts, &task).execute - end - - # @!attribute [rw] execution_interval - # @return [Fixnum] Number of seconds after the task completes before the - # task is performed again. - def execution_interval - synchronize { @execution_interval } - end - - # @!attribute [rw] execution_interval - # @return [Fixnum] Number of seconds after the task completes before the - # task is performed again. - def execution_interval=(value) - if (value = value.to_f) <= 0.0 - raise ArgumentError.new('must be greater than zero') - else - synchronize { @execution_interval = value } - end - end - - # @!attribute [rw] timeout_interval - # @return [Fixnum] Number of seconds the task can run before it is - # considered to have failed. - def timeout_interval - warn 'TimerTask timeouts are now ignored as these were not able to be implemented correctly' - end - - # @!attribute [rw] timeout_interval - # @return [Fixnum] Number of seconds the task can run before it is - # considered to have failed. - def timeout_interval=(value) - warn 'TimerTask timeouts are now ignored as these were not able to be implemented correctly' - end - - private :post, :<< - - private - - def ns_initialize(opts, &task) - set_deref_options(opts) - - self.execution_interval = opts[:execution] || opts[:execution_interval] || EXECUTION_INTERVAL - if opts[:timeout] || opts[:timeout_interval] - warn 'TimeTask timeouts are now ignored as these were not able to be implemented correctly' - end - @run_now = opts[:now] || opts[:run_now] - @executor = Concurrent::SafeTaskExecutor.new(task) - @running = Concurrent::AtomicBoolean.new(false) - @value = nil - - self.observers = Collection::CopyOnNotifyObserverSet.new - end - - # @!visibility private - def ns_shutdown_execution - @running.make_false - super - end - - # @!visibility private - def ns_kill_execution - @running.make_false - super - end - - # @!visibility private - def schedule_next_task(interval = execution_interval) - ScheduledTask.execute(interval, args: [Concurrent::Event.new], &method(:execute_task)) - nil - end - - # @!visibility private - def execute_task(completion) - return nil unless @running.true? - _success, value, reason = @executor.execute(self) - if completion.try? - self.value = value - schedule_next_task - time = Time.now - observers.notify_observers do - [time, self.value, reason] - end - end - nil - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/tuple.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/tuple.rb deleted file mode 100644 index 56212cfd15cc6..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/tuple.rb +++ /dev/null @@ -1,82 +0,0 @@ -require 'concurrent/atomic/atomic_reference' - -module Concurrent - - # A fixed size array with volatile (synchronized, thread safe) getters/setters. - # Mixes in Ruby's `Enumerable` module for enhanced search, sort, and traversal. - # - # @example - # tuple = Concurrent::Tuple.new(16) - # - # tuple.set(0, :foo) #=> :foo | volatile write - # tuple.get(0) #=> :foo | volatile read - # tuple.compare_and_set(0, :foo, :bar) #=> true | strong CAS - # tuple.cas(0, :foo, :baz) #=> false | strong CAS - # tuple.get(0) #=> :bar | volatile read - # - # @see https://en.wikipedia.org/wiki/Tuple Tuple entry at Wikipedia - # @see http://www.erlang.org/doc/reference_manual/data_types.html#id70396 Erlang Tuple - # @see http://ruby-doc.org/core-2.2.2/Enumerable.html Enumerable - class Tuple - include Enumerable - - # The (fixed) size of the tuple. - attr_reader :size - - # Create a new tuple of the given size. - # - # @param [Integer] size the number of elements in the tuple - def initialize(size) - @size = size - @tuple = tuple = ::Array.new(size) - i = 0 - while i < size - tuple[i] = Concurrent::AtomicReference.new - i += 1 - end - end - - # Get the value of the element at the given index. - # - # @param [Integer] i the index from which to retrieve the value - # @return [Object] the value at the given index or nil if the index is out of bounds - def get(i) - return nil if i >= @size || i < 0 - @tuple[i].get - end - alias_method :volatile_get, :get - - # Set the element at the given index to the given value - # - # @param [Integer] i the index for the element to set - # @param [Object] value the value to set at the given index - # - # @return [Object] the new value of the element at the given index or nil if the index is out of bounds - def set(i, value) - return nil if i >= @size || i < 0 - @tuple[i].set(value) - end - alias_method :volatile_set, :set - - # Set the value at the given index to the new value if and only if the current - # value matches the given old value. - # - # @param [Integer] i the index for the element to set - # @param [Object] old_value the value to compare against the current value - # @param [Object] new_value the value to set at the given index - # - # @return [Boolean] true if the value at the given element was set else false - def compare_and_set(i, old_value, new_value) - return false if i >= @size || i < 0 - @tuple[i].compare_and_set(old_value, new_value) - end - alias_method :cas, :compare_and_set - - # Calls the given block once for each element in self, passing that element as a parameter. - # - # @yieldparam [Object] ref the `Concurrent::AtomicReference` object at the current index - def each - @tuple.each {|ref| yield ref.get} - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/tvar.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/tvar.rb deleted file mode 100644 index 5d02ef090fd0c..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/tvar.rb +++ /dev/null @@ -1,222 +0,0 @@ -require 'set' -require 'concurrent/synchronization/object' - -module Concurrent - - # A `TVar` is a transactional variable - a single-element container that - # is used as part of a transaction - see `Concurrent::atomically`. - # - # @!macro thread_safe_variable_comparison - # - # {include:file:docs-source/tvar.md} - class TVar < Synchronization::Object - safe_initialization! - - # Create a new `TVar` with an initial value. - def initialize(value) - @value = value - @lock = Mutex.new - end - - # Get the value of a `TVar`. - def value - Concurrent::atomically do - Transaction::current.read(self) - end - end - - # Set the value of a `TVar`. - def value=(value) - Concurrent::atomically do - Transaction::current.write(self, value) - end - end - - # @!visibility private - def unsafe_value # :nodoc: - @value - end - - # @!visibility private - def unsafe_value=(value) # :nodoc: - @value = value - end - - # @!visibility private - def unsafe_lock # :nodoc: - @lock - end - - end - - # Run a block that reads and writes `TVar`s as a single atomic transaction. - # With respect to the value of `TVar` objects, the transaction is atomic, in - # that it either happens or it does not, consistent, in that the `TVar` - # objects involved will never enter an illegal state, and isolated, in that - # transactions never interfere with each other. You may recognise these - # properties from database transactions. - # - # There are some very important and unusual semantics that you must be aware of: - # - # * Most importantly, the block that you pass to atomically may be executed - # more than once. In most cases your code should be free of - # side-effects, except for via TVar. - # - # * If an exception escapes an atomically block it will abort the transaction. - # - # * It is undefined behaviour to use callcc or Fiber with atomically. - # - # * If you create a new thread within an atomically, it will not be part of - # the transaction. Creating a thread counts as a side-effect. - # - # Transactions within transactions are flattened to a single transaction. - # - # @example - # a = new TVar(100_000) - # b = new TVar(100) - # - # Concurrent::atomically do - # a.value -= 10 - # b.value += 10 - # end - def atomically - raise ArgumentError.new('no block given') unless block_given? - - # Get the current transaction - - transaction = Transaction::current - - # Are we not already in a transaction (not nested)? - - if transaction.nil? - # New transaction - - begin - # Retry loop - - loop do - - # Create a new transaction - - transaction = Transaction.new - Transaction::current = transaction - - # Run the block, aborting on exceptions - - begin - result = yield - rescue Transaction::AbortError => e - transaction.abort - result = Transaction::ABORTED - rescue Transaction::LeaveError => e - transaction.abort - break result - rescue => e - transaction.abort - raise e - end - # If we can commit, break out of the loop - - if result != Transaction::ABORTED - if transaction.commit - break result - end - end - end - ensure - # Clear the current transaction - - Transaction::current = nil - end - else - # Nested transaction - flatten it and just run the block - - yield - end - end - - # Abort a currently running transaction - see `Concurrent::atomically`. - def abort_transaction - raise Transaction::AbortError.new - end - - # Leave a transaction without committing or aborting - see `Concurrent::atomically`. - def leave_transaction - raise Transaction::LeaveError.new - end - - module_function :atomically, :abort_transaction, :leave_transaction - - private - - # @!visibility private - class Transaction - - ABORTED = ::Object.new - - OpenEntry = Struct.new(:value, :modified) - - AbortError = Class.new(StandardError) - LeaveError = Class.new(StandardError) - - def initialize - @open_tvars = {} - end - - def read(tvar) - entry = open(tvar) - entry.value - end - - def write(tvar, value) - entry = open(tvar) - entry.modified = true - entry.value = value - end - - def open(tvar) - entry = @open_tvars[tvar] - - unless entry - unless tvar.unsafe_lock.try_lock - Concurrent::abort_transaction - end - - entry = OpenEntry.new(tvar.unsafe_value, false) - @open_tvars[tvar] = entry - end - - entry - end - - def abort - unlock - end - - def commit - @open_tvars.each do |tvar, entry| - if entry.modified - tvar.unsafe_value = entry.value - end - end - - unlock - end - - def unlock - @open_tvars.each_key do |tvar| - tvar.unsafe_lock.unlock - end - end - - def self.current - Thread.current[:current_tvar_transaction] - end - - def self.current=(transaction) - Thread.current[:current_tvar_transaction] = transaction - end - - end - -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/engine.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/engine.rb deleted file mode 100644 index 0c574b2abb893..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/engine.rb +++ /dev/null @@ -1,45 +0,0 @@ -module Concurrent - # @!visibility private - module Utility - - # @!visibility private - module EngineDetector - def on_cruby? - RUBY_ENGINE == 'ruby' - end - - def on_jruby? - RUBY_ENGINE == 'jruby' - end - - def on_truffleruby? - RUBY_ENGINE == 'truffleruby' - end - - def on_windows? - !(RbConfig::CONFIG['host_os'] =~ /mswin|mingw|cygwin/).nil? - end - - def on_osx? - !(RbConfig::CONFIG['host_os'] =~ /darwin|mac os/).nil? - end - - def on_linux? - !(RbConfig::CONFIG['host_os'] =~ /linux/).nil? - end - - def ruby_version(version = RUBY_VERSION, comparison, major, minor, patch) - result = (version.split('.').map(&:to_i) <=> [major, minor, patch]) - comparisons = { :== => [0], - :>= => [1, 0], - :<= => [-1, 0], - :> => [1], - :< => [-1] } - comparisons.fetch(comparison).include? result - end - end - end - - # @!visibility private - extend Utility::EngineDetector -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/monotonic_time.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/monotonic_time.rb deleted file mode 100644 index 1c987d8a411fc..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/monotonic_time.rb +++ /dev/null @@ -1,19 +0,0 @@ -module Concurrent - - # @!macro monotonic_get_time - # - # Returns the current time as tracked by the application monotonic clock. - # - # @param [Symbol] unit the time unit to be returned, can be either - # :float_second, :float_millisecond, :float_microsecond, :second, - # :millisecond, :microsecond, or :nanosecond default to :float_second. - # - # @return [Float] The current monotonic time since some unspecified - # starting point - # - # @!macro monotonic_clock_warning - def monotonic_time(unit = :float_second) - Process.clock_gettime(Process::CLOCK_MONOTONIC, unit) - end - module_function :monotonic_time -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/native_extension_loader.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/native_extension_loader.rb deleted file mode 100644 index bf7bab354e832..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/native_extension_loader.rb +++ /dev/null @@ -1,77 +0,0 @@ -require 'concurrent/utility/engine' -# Synchronization::AbstractObject must be defined before loading the extension -require 'concurrent/synchronization/abstract_object' - -module Concurrent - # @!visibility private - module Utility - # @!visibility private - module NativeExtensionLoader - - def allow_c_extensions? - Concurrent.on_cruby? - end - - def c_extensions_loaded? - defined?(@c_extensions_loaded) && @c_extensions_loaded - end - - def load_native_extensions - if Concurrent.on_cruby? && !c_extensions_loaded? - ['concurrent/concurrent_ruby_ext', - "concurrent/#{RUBY_VERSION[0..2]}/concurrent_ruby_ext" - ].each { |p| try_load_c_extension p } - end - - if Concurrent.on_jruby? && !java_extensions_loaded? - begin - require 'concurrent/concurrent_ruby.jar' - set_java_extensions_loaded - rescue LoadError => e - raise e, "Java extensions are required for JRuby.\n" + e.message, e.backtrace - end - end - end - - private - - def load_error_path(error) - if error.respond_to? :path - error.path - else - error.message.split(' -- ').last - end - end - - def set_c_extensions_loaded - @c_extensions_loaded = true - end - - def java_extensions_loaded? - defined?(@java_extensions_loaded) && @java_extensions_loaded - end - - def set_java_extensions_loaded - @java_extensions_loaded = true - end - - def try_load_c_extension(path) - require path - set_c_extensions_loaded - rescue LoadError => e - if load_error_path(e) == path - # move on with pure-Ruby implementations - # TODO (pitr-ch 12-Jul-2018): warning on verbose? - else - raise e - end - end - - end - end - - # @!visibility private - extend Utility::NativeExtensionLoader -end - -Concurrent.load_native_extensions diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/native_integer.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/native_integer.rb deleted file mode 100644 index de1cdc306a1bd..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/native_integer.rb +++ /dev/null @@ -1,54 +0,0 @@ -module Concurrent - # @!visibility private - module Utility - # @private - module NativeInteger - # http://stackoverflow.com/questions/535721/ruby-max-integer - MIN_VALUE = -(2**(0.size * 8 - 2)) - MAX_VALUE = (2**(0.size * 8 - 2) - 1) - - def ensure_upper_bound(value) - if value > MAX_VALUE - raise RangeError.new("#{value} is greater than the maximum value of #{MAX_VALUE}") - end - value - end - - def ensure_lower_bound(value) - if value < MIN_VALUE - raise RangeError.new("#{value} is less than the maximum value of #{MIN_VALUE}") - end - value - end - - def ensure_integer(value) - unless value.is_a?(Integer) - raise ArgumentError.new("#{value} is not an Integer") - end - value - end - - def ensure_integer_and_bounds(value) - ensure_integer value - ensure_upper_bound value - ensure_lower_bound value - end - - def ensure_positive(value) - if value < 0 - raise ArgumentError.new("#{value} cannot be negative") - end - value - end - - def ensure_positive_and_no_zero(value) - if value < 1 - raise ArgumentError.new("#{value} cannot be negative or zero") - end - value - end - - extend self - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/processor_counter.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/processor_counter.rb deleted file mode 100644 index 986e2d5231663..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/utility/processor_counter.rb +++ /dev/null @@ -1,110 +0,0 @@ -require 'etc' -require 'rbconfig' -require 'concurrent/delay' - -module Concurrent - # @!visibility private - module Utility - - # @!visibility private - class ProcessorCounter - def initialize - @processor_count = Delay.new { compute_processor_count } - @physical_processor_count = Delay.new { compute_physical_processor_count } - end - - def processor_count - @processor_count.value - end - - def physical_processor_count - @physical_processor_count.value - end - - private - - def compute_processor_count - if Concurrent.on_jruby? - java.lang.Runtime.getRuntime.availableProcessors - else - Etc.nprocessors - end - end - - def compute_physical_processor_count - ppc = case RbConfig::CONFIG["target_os"] - when /darwin\d\d/ - IO.popen("/usr/sbin/sysctl -n hw.physicalcpu", &:read).to_i - when /linux/ - cores = {} # unique physical ID / core ID combinations - phy = 0 - IO.read("/proc/cpuinfo").scan(/^physical id.*|^core id.*/) do |ln| - if ln.start_with?("physical") - phy = ln[/\d+/] - elsif ln.start_with?("core") - cid = phy + ":" + ln[/\d+/] - cores[cid] = true if not cores[cid] - end - end - cores.count - when /mswin|mingw/ - require 'win32ole' - result_set = WIN32OLE.connect("winmgmts://").ExecQuery( - "select NumberOfCores from Win32_Processor") - result_set.to_enum.collect(&:NumberOfCores).reduce(:+) - else - processor_count - end - # fall back to logical count if physical info is invalid - ppc > 0 ? ppc : processor_count - rescue - return 1 - end - end - end - - # create the default ProcessorCounter on load - @processor_counter = Utility::ProcessorCounter.new - singleton_class.send :attr_reader, :processor_counter - - # Number of processors seen by the OS and used for process scheduling. For - # performance reasons the calculated value will be memoized on the first - # call. - # - # When running under JRuby the Java runtime call - # `java.lang.Runtime.getRuntime.availableProcessors` will be used. According - # to the Java documentation this "value may change during a particular - # invocation of the virtual machine... [applications] should therefore - # occasionally poll this property." Subsequently the result will NOT be - # memoized under JRuby. - # - # Otherwise Ruby's Etc.nprocessors will be used. - # - # @return [Integer] number of processors seen by the OS or Java runtime - # - # @see http://docs.oracle.com/javase/6/docs/api/java/lang/Runtime.html#availableProcessors() - def self.processor_count - processor_counter.processor_count - end - - # Number of physical processor cores on the current system. For performance - # reasons the calculated value will be memoized on the first call. - # - # On Windows the Win32 API will be queried for the `NumberOfCores from - # Win32_Processor`. This will return the total number "of cores for the - # current instance of the processor." On Unix-like operating systems either - # the `hwprefs` or `sysctl` utility will be called in a subshell and the - # returned value will be used. In the rare case where none of these methods - # work or an exception is raised the function will simply return 1. - # - # @return [Integer] number physical processor cores on the current system - # - # @see https://github.com/grosser/parallel/blob/4fc8b89d08c7091fe0419ca8fba1ec3ce5a8d185/lib/parallel.rb - # - # @see http://msdn.microsoft.com/en-us/library/aa394373(v=vs.85).aspx - # @see http://www.unix.com/man-page/osx/1/HWPREFS/ - # @see http://linux.die.net/man/8/sysctl - def self.physical_processor_count - processor_counter.physical_processor_count - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/version.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/version.rb deleted file mode 100644 index d1c098956a3d1..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/concurrent-ruby-1.2.2/lib/concurrent-ruby/concurrent/version.rb +++ /dev/null @@ -1,3 +0,0 @@ -module Concurrent - VERSION = '1.2.2' -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/MIT-LICENSE b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/MIT-LICENSE deleted file mode 100644 index ed8e9ee66db87..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/MIT-LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2008 The Ruby I18n team - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n.rb deleted file mode 100644 index d3369704abf1f..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n.rb +++ /dev/null @@ -1,435 +0,0 @@ -# frozen_string_literal: true - -require 'concurrent/map' -require 'concurrent/hash' - -require 'i18n/version' -require 'i18n/utils' -require 'i18n/exceptions' -require 'i18n/interpolate/ruby' - -module I18n - autoload :Backend, 'i18n/backend' - autoload :Config, 'i18n/config' - autoload :Gettext, 'i18n/gettext' - autoload :Locale, 'i18n/locale' - autoload :Tests, 'i18n/tests' - autoload :Middleware, 'i18n/middleware' - - RESERVED_KEYS = %i[ - cascade - deep_interpolation - default - exception_handler - fallback - fallback_in_progress - fallback_original_locale - format - object - raise - resolve - scope - separator - throw - ] - EMPTY_HASH = {}.freeze - - def self.new_double_nested_cache # :nodoc: - Concurrent::Map.new { |h, k| h[k] = Concurrent::Map.new } - end - - # Marks a key as reserved. Reserved keys are used internally, - # and can't also be used for interpolation. If you are using any - # extra keys as I18n options, you should call I18n.reserve_key - # before any I18n.translate (etc) calls are made. - def self.reserve_key(key) - RESERVED_KEYS << key.to_sym - @reserved_keys_pattern = nil - end - - def self.reserved_keys_pattern # :nodoc: - @reserved_keys_pattern ||= /%\{(#{RESERVED_KEYS.join("|")})\}/ - end - - module Base - # Gets I18n configuration object. - def config - Thread.current[:i18n_config] ||= I18n::Config.new - end - - # Sets I18n configuration object. - def config=(value) - Thread.current[:i18n_config] = value - end - - # Write methods which delegates to the configuration object - %w(locale backend default_locale available_locales default_separator - exception_handler load_path enforce_available_locales).each do |method| - module_eval <<-DELEGATORS, __FILE__, __LINE__ + 1 - def #{method} - config.#{method} - end - - def #{method}=(value) - config.#{method} = (value) - end - DELEGATORS - end - - # Tells the backend to reload translations. Used in situations like the - # Rails development environment. Backends can implement whatever strategy - # is useful. - def reload! - config.clear_available_locales_set - config.backend.reload! - end - - # Tells the backend to load translations now. Used in situations like the - # Rails production environment. Backends can implement whatever strategy - # is useful. - def eager_load! - config.backend.eager_load! - end - - # Translates, pluralizes and interpolates a given key using a given locale, - # scope, and default, as well as interpolation values. - # - # *LOOKUP* - # - # Translation data is organized as a nested hash using the upper-level keys - # as namespaces. E.g., ActionView ships with the translation: - # :date => {:formats => {:short => "%b %d"}}. - # - # Translations can be looked up at any level of this hash using the key argument - # and the scope option. E.g., in this example I18n.t :date - # returns the whole translations hash {:formats => {:short => "%b %d"}}. - # - # Key can be either a single key or a dot-separated key (both Strings and Symbols - # work). E.g., the short format can be looked up using both: - # I18n.t 'date.formats.short' - # I18n.t :'date.formats.short' - # - # Scope can be either a single key, a dot-separated key or an array of keys - # or dot-separated keys. Keys and scopes can be combined freely. So these - # examples will all look up the same short date format: - # I18n.t 'date.formats.short' - # I18n.t 'formats.short', :scope => 'date' - # I18n.t 'short', :scope => 'date.formats' - # I18n.t 'short', :scope => %w(date formats) - # - # *INTERPOLATION* - # - # Translations can contain interpolation variables which will be replaced by - # values passed to #translate as part of the options hash, with the keys matching - # the interpolation variable names. - # - # E.g., with a translation :foo => "foo %{bar}" the option - # value for the key +bar+ will be interpolated into the translation: - # I18n.t :foo, :bar => 'baz' # => 'foo baz' - # - # *PLURALIZATION* - # - # Translation data can contain pluralized translations. Pluralized translations - # are arrays of singular/plural versions of translations like ['Foo', 'Foos']. - # - # Note that I18n::Backend::Simple only supports an algorithm for English - # pluralization rules. Other algorithms can be supported by custom backends. - # - # This returns the singular version of a pluralized translation: - # I18n.t :foo, :count => 1 # => 'Foo' - # - # These both return the plural version of a pluralized translation: - # I18n.t :foo, :count => 0 # => 'Foos' - # I18n.t :foo, :count => 2 # => 'Foos' - # - # The :count option can be used both for pluralization and interpolation. - # E.g., with the translation - # :foo => ['%{count} foo', '%{count} foos'], count will - # be interpolated to the pluralized translation: - # I18n.t :foo, :count => 1 # => '1 foo' - # - # *DEFAULTS* - # - # This returns the translation for :foo or default if no translation was found: - # I18n.t :foo, :default => 'default' - # - # This returns the translation for :foo or the translation for :bar if no - # translation for :foo was found: - # I18n.t :foo, :default => :bar - # - # Returns the translation for :foo or the translation for :bar - # or default if no translations for :foo and :bar were found. - # I18n.t :foo, :default => [:bar, 'default'] - # - # *BULK LOOKUP* - # - # This returns an array with the translations for :foo and :bar. - # I18n.t [:foo, :bar] - # - # Can be used with dot-separated nested keys: - # I18n.t [:'baz.foo', :'baz.bar'] - # - # Which is the same as using a scope option: - # I18n.t [:foo, :bar], :scope => :baz - # - # *LAMBDAS* - # - # Both translations and defaults can be given as Ruby lambdas. Lambdas will be - # called and passed the key and options. - # - # E.g. assuming the key :salutation resolves to: - # lambda { |key, options| options[:gender] == 'm' ? "Mr. #{options[:name]}" : "Mrs. #{options[:name]}" } - # - # Then I18n.t(:salutation, :gender => 'w', :name => 'Smith') will result in "Mrs. Smith". - # - # Note that the string returned by lambda will go through string interpolation too, - # so the following lambda would give the same result: - # lambda { |key, options| options[:gender] == 'm' ? "Mr. %{name}" : "Mrs. %{name}" } - # - # It is recommended to use/implement lambdas in an "idempotent" way. E.g. when - # a cache layer is put in front of I18n.translate it will generate a cache key - # from the argument values passed to #translate. Therefore your lambdas should - # always return the same translations/values per unique combination of argument - # values. - # - # *Ruby 2.7+ keyword arguments warning* - # - # This method uses keyword arguments. - # There is a breaking change in ruby that produces warning with ruby 2.7 and won't work as expected with ruby 3.0 - # The "hash" parameter must be passed as keyword argument. - # - # Good: - # I18n.t(:salutation, :gender => 'w', :name => 'Smith') - # I18n.t(:salutation, **{ :gender => 'w', :name => 'Smith' }) - # I18n.t(:salutation, **any_hash) - # - # Bad: - # I18n.t(:salutation, { :gender => 'w', :name => 'Smith' }) - # I18n.t(:salutation, any_hash) - # - def translate(key = nil, throw: false, raise: false, locale: nil, **options) # TODO deprecate :raise - locale ||= config.locale - raise Disabled.new('t') if locale == false - enforce_available_locales!(locale) - - backend = config.backend - - if key.is_a?(Array) - key.map do |k| - translate_key(k, throw, raise, locale, backend, options) - end - else - translate_key(key, throw, raise, locale, backend, options) - end - end - alias :t :translate - - # Wrapper for translate that adds :raise => true. With - # this option, if no translation is found, it will raise I18n::MissingTranslationData - def translate!(key, **options) - translate(key, **options, raise: true) - end - alias :t! :translate! - - # Returns true if a translation exists for a given key, otherwise returns false. - def exists?(key, _locale = nil, locale: _locale, **options) - locale ||= config.locale - raise Disabled.new('exists?') if locale == false - raise I18n::ArgumentError if key.is_a?(String) && key.empty? - config.backend.exists?(locale, key, options) - end - - # Transliterates UTF-8 characters to ASCII. By default this method will - # transliterate only Latin strings to an ASCII approximation: - # - # I18n.transliterate("Ærøskøbing") - # # => "AEroskobing" - # - # I18n.transliterate("日本語") - # # => "???" - # - # It's also possible to add support for per-locale transliterations. I18n - # expects transliteration rules to be stored at - # i18n.transliterate.rule. - # - # Transliteration rules can either be a Hash or a Proc. Procs must accept a - # single string argument. Hash rules inherit the default transliteration - # rules, while Procs do not. - # - # *Examples* - # - # Setting a Hash in .yml: - # - # i18n: - # transliterate: - # rule: - # ü: "ue" - # ö: "oe" - # - # Setting a Hash using Ruby: - # - # store_translations(:de, i18n: { - # transliterate: { - # rule: { - # 'ü' => 'ue', - # 'ö' => 'oe' - # } - # } - # }) - # - # Setting a Proc: - # - # translit = lambda {|string| MyTransliterator.transliterate(string) } - # store_translations(:xx, :i18n => {:transliterate => {:rule => translit}) - # - # Transliterating strings: - # - # I18n.locale = :en - # I18n.transliterate("Jürgen") # => "Jurgen" - # I18n.locale = :de - # I18n.transliterate("Jürgen") # => "Juergen" - # I18n.transliterate("Jürgen", :locale => :en) # => "Jurgen" - # I18n.transliterate("Jürgen", :locale => :de) # => "Juergen" - def transliterate(key, throw: false, raise: false, locale: nil, replacement: nil, **options) - locale ||= config.locale - raise Disabled.new('transliterate') if locale == false - enforce_available_locales!(locale) - - config.backend.transliterate(locale, key, replacement) - rescue I18n::ArgumentError => exception - handle_exception((throw && :throw || raise && :raise), exception, locale, key, options) - end - - # Localizes certain objects, such as dates and numbers to local formatting. - def localize(object, locale: nil, format: nil, **options) - locale ||= config.locale - raise Disabled.new('l') if locale == false - enforce_available_locales!(locale) - - format ||= :default - config.backend.localize(locale, object, format, options) - end - alias :l :localize - - # Executes block with given I18n.locale set. - def with_locale(tmp_locale = nil) - if tmp_locale == nil - yield - else - current_locale = self.locale - self.locale = tmp_locale - begin - yield - ensure - self.locale = current_locale - end - end - end - - # Merges the given locale, key and scope into a single array of keys. - # Splits keys that contain dots into multiple keys. Makes sure all - # keys are Symbols. - def normalize_keys(locale, key, scope, separator = nil) - separator ||= I18n.default_separator - - [ - *normalize_key(locale, separator), - *normalize_key(scope, separator), - *normalize_key(key, separator) - ] - end - - # Returns true when the passed locale, which can be either a String or a - # Symbol, is in the list of available locales. Returns false otherwise. - def locale_available?(locale) - I18n.config.available_locales_set.include?(locale) - end - - # Raises an InvalidLocale exception when the passed locale is not available. - def enforce_available_locales!(locale) - if locale != false && config.enforce_available_locales - raise I18n::InvalidLocale.new(locale) if !locale_available?(locale) - end - end - - def available_locales_initialized? - config.available_locales_initialized? - end - - private - - def translate_key(key, throw, raise, locale, backend, options) - result = catch(:exception) do - backend.translate(locale, key, options) - end - - if result.is_a?(MissingTranslation) - handle_exception((throw && :throw || raise && :raise), result, locale, key, options) - else - result - end - end - - # Any exceptions thrown in translate will be sent to the @@exception_handler - # which can be a Symbol, a Proc or any other Object unless they're forced to - # be raised or thrown (MissingTranslation). - # - # If exception_handler is a Symbol then it will simply be sent to I18n as - # a method call. A Proc will simply be called. In any other case the - # method #call will be called on the exception_handler object. - # - # Examples: - # - # I18n.exception_handler = :custom_exception_handler # this is the default - # I18n.custom_exception_handler(exception, locale, key, options) # will be called like this - # - # I18n.exception_handler = lambda { |*args| ... } # a lambda - # I18n.exception_handler.call(exception, locale, key, options) # will be called like this - # - # I18n.exception_handler = I18nExceptionHandler.new # an object - # I18n.exception_handler.call(exception, locale, key, options) # will be called like this - def handle_exception(handling, exception, locale, key, options) - case handling - when :raise - raise exception.respond_to?(:to_exception) ? exception.to_exception : exception - when :throw - throw :exception, exception - else - case handler = options[:exception_handler] || config.exception_handler - when Symbol - send(handler, exception, locale, key, options) - else - handler.call(exception, locale, key, options) - end - end - end - - @@normalized_key_cache = I18n.new_double_nested_cache - - def normalize_key(key, separator) - @@normalized_key_cache[separator][key] ||= - case key - when Array - key.flat_map { |k| normalize_key(k, separator) } - else - keys = key.to_s.split(separator) - keys.delete('') - keys.map! do |k| - case k - when /\A[-+]?([1-9]\d*|0)\z/ # integer - k.to_i - when 'true' - true - when 'false' - false - else - k.to_sym - end - end - keys - end - end - end - - extend Base -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend.rb deleted file mode 100644 index 863d618782d57..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend.rb +++ /dev/null @@ -1,22 +0,0 @@ -# frozen_string_literal: true - -module I18n - module Backend - autoload :Base, 'i18n/backend/base' - autoload :Cache, 'i18n/backend/cache' - autoload :CacheFile, 'i18n/backend/cache_file' - autoload :Cascade, 'i18n/backend/cascade' - autoload :Chain, 'i18n/backend/chain' - autoload :Fallbacks, 'i18n/backend/fallbacks' - autoload :Flatten, 'i18n/backend/flatten' - autoload :Gettext, 'i18n/backend/gettext' - autoload :InterpolationCompiler, 'i18n/backend/interpolation_compiler' - autoload :KeyValue, 'i18n/backend/key_value' - autoload :LazyLoadable, 'i18n/backend/lazy_loadable' - autoload :Memoize, 'i18n/backend/memoize' - autoload :Metadata, 'i18n/backend/metadata' - autoload :Pluralization, 'i18n/backend/pluralization' - autoload :Simple, 'i18n/backend/simple' - autoload :Transliterator, 'i18n/backend/transliterator' - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/base.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/base.rb deleted file mode 100644 index 57756758ca6f5..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/base.rb +++ /dev/null @@ -1,304 +0,0 @@ -# frozen_string_literal: true - -require 'yaml' -require 'json' - -module I18n - module Backend - module Base - include I18n::Backend::Transliterator - - # Accepts a list of paths to translation files. Loads translations from - # plain Ruby (*.rb), YAML files (*.yml), or JSON files (*.json). See #load_rb, #load_yml, and #load_json - # for details. - def load_translations(*filenames) - filenames = I18n.load_path if filenames.empty? - filenames.flatten.each do |filename| - loaded_translations = load_file(filename) - yield filename, loaded_translations if block_given? - end - end - - # This method receives a locale, a data hash and options for storing translations. - # Should be implemented - def store_translations(locale, data, options = EMPTY_HASH) - raise NotImplementedError - end - - def translate(locale, key, options = EMPTY_HASH) - raise I18n::ArgumentError if (key.is_a?(String) || key.is_a?(Symbol)) && key.empty? - raise InvalidLocale.new(locale) unless locale - return nil if key.nil? && !options.key?(:default) - - entry = lookup(locale, key, options[:scope], options) unless key.nil? - - if entry.nil? && options.key?(:default) - entry = default(locale, key, options[:default], options) - else - entry = resolve_entry(locale, key, entry, options) - end - - count = options[:count] - - if entry.nil? && (subtrees? || !count) - if (options.key?(:default) && !options[:default].nil?) || !options.key?(:default) - throw(:exception, I18n::MissingTranslation.new(locale, key, options)) - end - end - - entry = entry.dup if entry.is_a?(String) - entry = pluralize(locale, entry, count) if count - - if entry.nil? && !subtrees? - throw(:exception, I18n::MissingTranslation.new(locale, key, options)) - end - - deep_interpolation = options[:deep_interpolation] - values = Utils.except(options, *RESERVED_KEYS) unless options.empty? - if values - entry = if deep_interpolation - deep_interpolate(locale, entry, values) - else - interpolate(locale, entry, values) - end - end - entry - end - - def exists?(locale, key, options = EMPTY_HASH) - lookup(locale, key, options[:scope]) != nil - end - - # Acts the same as +strftime+, but uses a localized version of the - # format string. Takes a key from the date/time formats translations as - # a format argument (e.g., :short in :'date.formats'). - def localize(locale, object, format = :default, options = EMPTY_HASH) - if object.nil? && options.include?(:default) - return options[:default] - end - raise ArgumentError, "Object must be a Date, DateTime or Time object. #{object.inspect} given." unless object.respond_to?(:strftime) - - if Symbol === format - key = format - type = object.respond_to?(:sec) ? 'time' : 'date' - options = options.merge(:raise => true, :object => object, :locale => locale) - format = I18n.t(:"#{type}.formats.#{key}", **options) - end - - format = translate_localization_format(locale, object, format, options) - object.strftime(format) - end - - # Returns an array of locales for which translations are available - # ignoring the reserved translation meta data key :i18n. - def available_locales - raise NotImplementedError - end - - def reload! - eager_load! if eager_loaded? - end - - def eager_load! - @eager_loaded = true - end - - protected - - def eager_loaded? - @eager_loaded ||= false - end - - # The method which actually looks up for the translation in the store. - def lookup(locale, key, scope = [], options = EMPTY_HASH) - raise NotImplementedError - end - - def subtrees? - true - end - - # Evaluates defaults. - # If given subject is an Array, it walks the array and returns the - # first translation that can be resolved. Otherwise it tries to resolve - # the translation directly. - def default(locale, object, subject, options = EMPTY_HASH) - if options.size == 1 && options.has_key?(:default) - options = {} - else - options = Utils.except(options, :default) - end - - case subject - when Array - subject.each do |item| - result = resolve(locale, object, item, options) - return result unless result.nil? - end and nil - else - resolve(locale, object, subject, options) - end - end - - # Resolves a translation. - # If the given subject is a Symbol, it will be translated with the - # given options. If it is a Proc then it will be evaluated. All other - # subjects will be returned directly. - def resolve(locale, object, subject, options = EMPTY_HASH) - return subject if options[:resolve] == false - result = catch(:exception) do - case subject - when Symbol - I18n.translate(subject, **options.merge(:locale => locale, :throw => true)) - when Proc - date_or_time = options.delete(:object) || object - resolve(locale, object, subject.call(date_or_time, **options)) - else - subject - end - end - result unless result.is_a?(MissingTranslation) - end - alias_method :resolve_entry, :resolve - - # Picks a translation from a pluralized mnemonic subkey according to English - # pluralization rules : - # - It will pick the :one subkey if count is equal to 1. - # - It will pick the :other subkey otherwise. - # - It will pick the :zero subkey in the special case where count is - # equal to 0 and there is a :zero subkey present. This behaviour is - # not standard with regards to the CLDR pluralization rules. - # Other backends can implement more flexible or complex pluralization rules. - def pluralize(locale, entry, count) - entry = entry.reject { |k, _v| k == :attributes } if entry.is_a?(Hash) - return entry unless entry.is_a?(Hash) && count - - key = pluralization_key(entry, count) - raise InvalidPluralizationData.new(entry, count, key) unless entry.has_key?(key) - entry[key] - end - - # Interpolates values into a given subject. - # - # if the given subject is a string then: - # method interpolates "file %{file} opened by %%{user}", :file => 'test.txt', :user => 'Mr. X' - # # => "file test.txt opened by %{user}" - # - # if the given subject is an array then: - # each element of the array is recursively interpolated (until it finds a string) - # method interpolates ["yes, %{user}", ["maybe no, %{user}, "no, %{user}"]], :user => "bartuz" - # # => "["yes, bartuz",["maybe no, bartuz", "no, bartuz"]]" - def interpolate(locale, subject, values = EMPTY_HASH) - return subject if values.empty? - - case subject - when ::String then I18n.interpolate(subject, values) - when ::Array then subject.map { |element| interpolate(locale, element, values) } - else - subject - end - end - - # Deep interpolation - # - # deep_interpolate { people: { ann: "Ann is %{ann}", john: "John is %{john}" } }, - # ann: 'good', john: 'big' - # #=> { people: { ann: "Ann is good", john: "John is big" } } - def deep_interpolate(locale, data, values = EMPTY_HASH) - return data if values.empty? - - case data - when ::String - I18n.interpolate(data, values) - when ::Hash - data.each_with_object({}) do |(k, v), result| - result[k] = deep_interpolate(locale, v, values) - end - when ::Array - data.map do |v| - deep_interpolate(locale, v, values) - end - else - data - end - end - - # Loads a single translations file by delegating to #load_rb or - # #load_yml depending on the file extension and directly merges the - # data to the existing translations. Raises I18n::UnknownFileType - # for all other file extensions. - def load_file(filename) - type = File.extname(filename).tr('.', '').downcase - raise UnknownFileType.new(type, filename) unless respond_to?(:"load_#{type}", true) - data, keys_symbolized = send(:"load_#{type}", filename) - unless data.is_a?(Hash) - raise InvalidLocaleData.new(filename, 'expects it to return a hash, but does not') - end - data.each { |locale, d| store_translations(locale, d || {}, skip_symbolize_keys: keys_symbolized) } - - data - end - - # Loads a plain Ruby translations file. eval'ing the file must yield - # a Hash containing translation data with locales as toplevel keys. - def load_rb(filename) - translations = eval(IO.read(filename), binding, filename) - [translations, false] - end - - # Loads a YAML translations file. The data must have locales as - # toplevel keys. - def load_yml(filename) - begin - if YAML.respond_to?(:unsafe_load_file) # Psych 4.0 way - [YAML.unsafe_load_file(filename, symbolize_names: true, freeze: true), true] - else - [YAML.load_file(filename), false] - end - rescue TypeError, ScriptError, StandardError => e - raise InvalidLocaleData.new(filename, e.inspect) - end - end - alias_method :load_yaml, :load_yml - - # Loads a JSON translations file. The data must have locales as - # toplevel keys. - def load_json(filename) - begin - # Use #load_file as a proxy for a version of JSON where symbolize_names and freeze are supported. - if ::JSON.respond_to?(:load_file) - [::JSON.load_file(filename, symbolize_names: true, freeze: true), true] - else - [::JSON.parse(File.read(filename)), false] - end - rescue TypeError, StandardError => e - raise InvalidLocaleData.new(filename, e.inspect) - end - end - - def translate_localization_format(locale, object, format, options) - format.to_s.gsub(/%(|\^)[aAbBpP]/) do |match| - case match - when '%a' then I18n.t!(:"date.abbr_day_names", :locale => locale, :format => format)[object.wday] - when '%^a' then I18n.t!(:"date.abbr_day_names", :locale => locale, :format => format)[object.wday].upcase - when '%A' then I18n.t!(:"date.day_names", :locale => locale, :format => format)[object.wday] - when '%^A' then I18n.t!(:"date.day_names", :locale => locale, :format => format)[object.wday].upcase - when '%b' then I18n.t!(:"date.abbr_month_names", :locale => locale, :format => format)[object.mon] - when '%^b' then I18n.t!(:"date.abbr_month_names", :locale => locale, :format => format)[object.mon].upcase - when '%B' then I18n.t!(:"date.month_names", :locale => locale, :format => format)[object.mon] - when '%^B' then I18n.t!(:"date.month_names", :locale => locale, :format => format)[object.mon].upcase - when '%p' then I18n.t!(:"time.#{(object.respond_to?(:hour) ? object.hour : 0) < 12 ? :am : :pm}", :locale => locale, :format => format).upcase - when '%P' then I18n.t!(:"time.#{(object.respond_to?(:hour) ? object.hour : 0) < 12 ? :am : :pm}", :locale => locale, :format => format).downcase - end - end - rescue MissingTranslationData => e - e.message - end - - def pluralization_key(entry, count) - key = :zero if count == 0 && entry.has_key?(:zero) - key ||= count == 1 ? :one : :other - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/cache.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/cache.rb deleted file mode 100644 index 40c18d657520d..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/cache.rb +++ /dev/null @@ -1,113 +0,0 @@ -# frozen_string_literal: true - -# This module allows you to easily cache all responses from the backend - thus -# speeding up the I18n aspects of your application quite a bit. -# -# To enable caching you can simply include the Cache module to the Simple -# backend - or whatever other backend you are using: -# -# I18n::Backend::Simple.send(:include, I18n::Backend::Cache) -# -# You will also need to set a cache store implementation that you want to use: -# -# I18n.cache_store = ActiveSupport::Cache.lookup_store(:memory_store) -# -# You can use any cache implementation you want that provides the same API as -# ActiveSupport::Cache (only the methods #fetch and #write are being used). -# -# The cache_key implementation by default assumes you pass values that return -# a valid key from #hash (see -# https://www.ruby-doc.org/core/classes/Object.html#M000337). However, you can -# configure your own digest method via which responds to #hexdigest (see -# https://ruby-doc.org/stdlib/libdoc/openssl/rdoc/OpenSSL/Digest.html): -# -# I18n.cache_key_digest = OpenSSL::Digest::SHA256.new -# -# If you use a lambda as a default value in your translation like this: -# -# I18n.t(:"date.order", :default => lambda {[:month, :day, :year]}) -# -# Then you will always have a cache miss, because each time this method -# is called the lambda will have a different hash value. If you know -# the result of the lambda is a constant as in the example above, then -# to cache this you can make the lambda a constant, like this: -# -# DEFAULT_DATE_ORDER = lambda {[:month, :day, :year]} -# ... -# I18n.t(:"date.order", :default => DEFAULT_DATE_ORDER) -# -# If the lambda may result in different values for each call then consider -# also using the Memoize backend. -# -module I18n - class << self - @@cache_store = nil - @@cache_namespace = nil - @@cache_key_digest = nil - - def cache_store - @@cache_store - end - - def cache_store=(store) - @@cache_store = store - end - - def cache_namespace - @@cache_namespace - end - - def cache_namespace=(namespace) - @@cache_namespace = namespace - end - - def cache_key_digest - @@cache_key_digest - end - - def cache_key_digest=(key_digest) - @@cache_key_digest = key_digest - end - - def perform_caching? - !cache_store.nil? - end - end - - module Backend - # TODO Should the cache be cleared if new translations are stored? - module Cache - def translate(locale, key, options = EMPTY_HASH) - I18n.perform_caching? ? fetch(cache_key(locale, key, options)) { super } : super - end - - protected - - def fetch(cache_key, &block) - result = _fetch(cache_key, &block) - throw(:exception, result) if result.is_a?(MissingTranslation) - result = result.dup if result.frozen? rescue result - result - end - - def _fetch(cache_key, &block) - result = I18n.cache_store.read(cache_key) - return result unless result.nil? - result = catch(:exception, &block) - I18n.cache_store.write(cache_key, result) unless result.is_a?(Proc) - result - end - - def cache_key(locale, key, options) - # This assumes that only simple, native Ruby values are passed to I18n.translate. - "i18n/#{I18n.cache_namespace}/#{locale}/#{digest_item(key)}/#{digest_item(options)}" - end - - private - - def digest_item(key) - I18n.cache_key_digest ? I18n.cache_key_digest.hexdigest(key.to_s) : key.to_s.hash - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/cache_file.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/cache_file.rb deleted file mode 100644 index 0c5e192210ad7..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/cache_file.rb +++ /dev/null @@ -1,36 +0,0 @@ -# frozen_string_literal: true - -require 'openssl' - -module I18n - module Backend - # Overwrites the Base load_file method to cache loaded file contents. - module CacheFile - # Optionally provide path_roots array to normalize filename paths, - # to make the cached i18n data portable across environments. - attr_accessor :path_roots - - protected - - # Track loaded translation files in the `i18n.load_file` scope, - # and skip loading the file if its contents are still up-to-date. - def load_file(filename) - initialized = !respond_to?(:initialized?) || initialized? - key = I18n::Backend::Flatten.escape_default_separator(normalized_path(filename)) - old_mtime, old_digest = initialized && lookup(:i18n, key, :load_file) - return if (mtime = File.mtime(filename).to_i) == old_mtime || - (digest = OpenSSL::Digest::SHA256.file(filename).hexdigest) == old_digest - super - store_translations(:i18n, load_file: { key => [mtime, digest] }) - end - - # Translate absolute filename to relative path for i18n key. - def normalized_path(file) - return file unless path_roots - path = path_roots.find(&file.method(:start_with?)) || - raise(InvalidLocaleData.new(file, 'outside expected path roots')) - file.sub(path, path_roots.index(path).to_s) - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/cascade.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/cascade.rb deleted file mode 100644 index 782b07b59480d..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/cascade.rb +++ /dev/null @@ -1,56 +0,0 @@ -# frozen_string_literal: true - -# The Cascade module adds the ability to do cascading lookups to backends that -# are compatible to the Simple backend. -# -# By cascading lookups we mean that for any key that can not be found the -# Cascade module strips one segment off the scope part of the key and then -# tries to look up the key in that scope. -# -# E.g. when a lookup for the key :"foo.bar.baz" does not yield a result then -# the segment :bar will be stripped off the scope part :"foo.bar" and the new -# scope :foo will be used to look up the key :baz. If that does not succeed -# then the remaining scope segment :foo will be omitted, too, and again the -# key :baz will be looked up (now with no scope). -# -# To enable a cascading lookup one passes the :cascade option: -# -# I18n.t(:'foo.bar.baz', :cascade => true) -# -# This will return the first translation found for :"foo.bar.baz", :"foo.baz" -# or :baz in this order. -# -# The cascading lookup takes precedence over resolving any given defaults. -# I.e. defaults will kick in after the cascading lookups haven't succeeded. -# -# This behavior is useful for libraries like ActiveRecord validations where -# the library wants to give users a bunch of more or less fine-grained options -# of scopes for a particular key. -# -# Thanks to Clemens Kofler for the initial idea and implementation! See -# http://github.com/clemens/i18n-cascading-backend - -module I18n - module Backend - module Cascade - def lookup(locale, key, scope = [], options = EMPTY_HASH) - return super unless cascade = options[:cascade] - - cascade = { :step => 1 } unless cascade.is_a?(Hash) - step = cascade[:step] || 1 - offset = cascade[:offset] || 1 - separator = options[:separator] || I18n.default_separator - skip_root = cascade.has_key?(:skip_root) ? cascade[:skip_root] : true - - scope = I18n.normalize_keys(nil, key, scope, separator) - key = (scope.slice!(-offset, offset) || []).join(separator) - - begin - result = super - return result unless result.nil? - scope = scope.dup - end while (!scope.empty? || !skip_root) && scope.slice!(-step, step) - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/chain.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/chain.rb deleted file mode 100644 index e081a91c2a3f2..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/chain.rb +++ /dev/null @@ -1,130 +0,0 @@ -# frozen_string_literal: true - -module I18n - module Backend - # Backend that chains multiple other backends and checks each of them when - # a translation needs to be looked up. This is useful when you want to use - # standard translations with a Simple backend but store custom application - # translations in a database or other backends. - # - # To use the Chain backend instantiate it and set it to the I18n module. - # You can add chained backends through the initializer or backends - # accessor: - # - # # preserves the existing Simple backend set to I18n.backend - # I18n.backend = I18n::Backend::Chain.new(I18n::Backend::ActiveRecord.new, I18n.backend) - # - # The implementation assumes that all backends added to the Chain implement - # a lookup method with the same API as Simple backend does. - # - # Fallback translations using the :default option are only used by the last backend of a chain. - class Chain - module Implementation - include Base - - attr_accessor :backends - - def initialize(*backends) - self.backends = backends - end - - def initialized? - backends.all? do |backend| - backend.instance_eval do - return false unless initialized? - end - end - true - end - - def reload! - backends.each { |backend| backend.reload! } - end - - def eager_load! - backends.each { |backend| backend.eager_load! } - end - - def store_translations(locale, data, options = EMPTY_HASH) - backends.first.store_translations(locale, data, options) - end - - def available_locales - backends.map { |backend| backend.available_locales }.flatten.uniq - end - - def translate(locale, key, default_options = EMPTY_HASH) - namespace = nil - options = Utils.except(default_options, :default) - - backends.each do |backend| - catch(:exception) do - options = default_options if backend == backends.last - translation = backend.translate(locale, key, options) - if namespace_lookup?(translation, options) - namespace = _deep_merge(translation, namespace || {}) - elsif !translation.nil? || (options.key?(:default) && options[:default].nil?) - return translation - end - end - end - - return namespace if namespace - throw(:exception, I18n::MissingTranslation.new(locale, key, options)) - end - - def exists?(locale, key, options = EMPTY_HASH) - backends.any? do |backend| - backend.exists?(locale, key, options) - end - end - - def localize(locale, object, format = :default, options = EMPTY_HASH) - backends.each do |backend| - catch(:exception) do - result = backend.localize(locale, object, format, options) and return result - end - end - throw(:exception, I18n::MissingTranslation.new(locale, format, options)) - end - - protected - def init_translations - backends.each do |backend| - backend.send(:init_translations) - end - end - - def translations - backends.reverse.each_with_object({}) do |backend, memo| - partial_translations = backend.instance_eval do - init_translations unless initialized? - translations - end - Utils.deep_merge!(memo, partial_translations) { |_, a, b| b || a } - end - end - - def namespace_lookup?(result, options) - result.is_a?(Hash) && !options.has_key?(:count) - end - - private - # This is approximately what gets used in ActiveSupport. - # However since we are not guaranteed to run in an ActiveSupport context - # it is wise to have our own copy. We underscore it - # to not pollute the namespace of the including class. - def _deep_merge(hash, other_hash) - copy = hash.dup - other_hash.each_pair do |k,v| - value_from_other = hash[k] - copy[k] = value_from_other.is_a?(Hash) && v.is_a?(Hash) ? _deep_merge(value_from_other, v) : v - end - copy - end - end - - include Implementation - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/fallbacks.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/fallbacks.rb deleted file mode 100644 index 6d4d6e138dc97..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/fallbacks.rb +++ /dev/null @@ -1,115 +0,0 @@ -# frozen_string_literal: true - -# I18n locale fallbacks are useful when you want your application to use -# translations from other locales when translations for the current locale are -# missing. E.g. you might want to use :en translations when translations in -# your applications main locale :de are missing. -# -# To enable locale fallbacks you can simply include the Fallbacks module to -# the Simple backend - or whatever other backend you are using: -# -# I18n::Backend::Simple.include(I18n::Backend::Fallbacks) -module I18n - @@fallbacks = nil - - class << self - # Returns the current fallbacks implementation. Defaults to +I18n::Locale::Fallbacks+. - def fallbacks - @@fallbacks ||= I18n::Locale::Fallbacks.new - Thread.current[:i18n_fallbacks] || @@fallbacks - end - - # Sets the current fallbacks implementation. Use this to set a different fallbacks implementation. - def fallbacks=(fallbacks) - @@fallbacks = fallbacks.is_a?(Array) ? I18n::Locale::Fallbacks.new(fallbacks) : fallbacks - Thread.current[:i18n_fallbacks] = @@fallbacks - end - end - - module Backend - module Fallbacks - # Overwrites the Base backend translate method so that it will try each - # locale given by I18n.fallbacks for the given locale. E.g. for the - # locale :"de-DE" it might try the locales :"de-DE", :de and :en - # (depends on the fallbacks implementation) until it finds a result with - # the given options. If it does not find any result for any of the - # locales it will then throw MissingTranslation as usual. - # - # The default option takes precedence over fallback locales only when - # it's a Symbol. When the default contains a String, Proc or Hash - # it is evaluated last after all the fallback locales have been tried. - def translate(locale, key, options = EMPTY_HASH) - return super unless options.fetch(:fallback, true) - return super if options[:fallback_in_progress] - default = extract_non_symbol_default!(options) if options[:default] - - fallback_options = options.merge(:fallback_in_progress => true, fallback_original_locale: locale) - I18n.fallbacks[locale].each do |fallback| - begin - catch(:exception) do - result = super(fallback, key, fallback_options) - unless result.nil? - on_fallback(locale, fallback, key, options) if locale.to_s != fallback.to_s - return result - end - end - rescue I18n::InvalidLocale - # we do nothing when the locale is invalid, as this is a fallback anyways. - end - end - - return if options.key?(:default) && options[:default].nil? - - return super(locale, nil, options.merge(:default => default)) if default - throw(:exception, I18n::MissingTranslation.new(locale, key, options)) - end - - def resolve_entry(locale, object, subject, options = EMPTY_HASH) - return subject if options[:resolve] == false - result = catch(:exception) do - options.delete(:fallback_in_progress) if options.key?(:fallback_in_progress) - - case subject - when Symbol - I18n.translate(subject, **options.merge(:locale => options[:fallback_original_locale], :throw => true)) - when Proc - date_or_time = options.delete(:object) || object - resolve_entry(options[:fallback_original_locale], object, subject.call(date_or_time, **options)) - else - subject - end - end - result unless result.is_a?(MissingTranslation) - end - - def extract_non_symbol_default!(options) - defaults = [options[:default]].flatten - first_non_symbol_default = defaults.detect{|default| !default.is_a?(Symbol)} - if first_non_symbol_default - options[:default] = defaults[0, defaults.index(first_non_symbol_default)] - end - return first_non_symbol_default - end - - def exists?(locale, key, options = EMPTY_HASH) - return super unless options.fetch(:fallback, true) - I18n.fallbacks[locale].each do |fallback| - begin - return true if super(fallback, key) - rescue I18n::InvalidLocale - # we do nothing when the locale is invalid, as this is a fallback anyways. - end - end - - false - end - - private - - # Overwrite on_fallback to add specified logic when the fallback succeeds. - def on_fallback(_original_locale, _fallback_locale, _key, _options) - nil - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/flatten.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/flatten.rb deleted file mode 100644 index e9bd9d531df88..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/flatten.rb +++ /dev/null @@ -1,118 +0,0 @@ -# frozen_string_literal: true - -module I18n - module Backend - # This module contains several helpers to assist flattening translations. - # You may want to flatten translations for: - # - # 1) speed up lookups, as in the Memoize backend; - # 2) In case you want to store translations in a data store, as in ActiveRecord backend; - # - # You can check both backends above for some examples. - # This module also keeps all links in a hash so they can be properly resolved when flattened. - module Flatten - SEPARATOR_ESCAPE_CHAR = "\001" - FLATTEN_SEPARATOR = "." - - # normalize_keys the flatten way. This method is significantly faster - # and creates way less objects than the one at I18n.normalize_keys. - # It also handles escaping the translation keys. - def self.normalize_flat_keys(locale, key, scope, separator) - keys = [scope, key] - keys.flatten! - keys.compact! - - separator ||= I18n.default_separator - - if separator != FLATTEN_SEPARATOR - from_str = "#{FLATTEN_SEPARATOR}#{separator}" - to_str = "#{SEPARATOR_ESCAPE_CHAR}#{FLATTEN_SEPARATOR}" - - keys.map! { |k| k.to_s.tr from_str, to_str } - end - - keys.join(".") - end - - # Receives a string and escape the default separator. - def self.escape_default_separator(key) #:nodoc: - key.to_s.tr(FLATTEN_SEPARATOR, SEPARATOR_ESCAPE_CHAR) - end - - # Shortcut to I18n::Backend::Flatten.normalize_flat_keys - # and then resolve_links. - def normalize_flat_keys(locale, key, scope, separator) - key = I18n::Backend::Flatten.normalize_flat_keys(locale, key, scope, separator) - resolve_link(locale, key) - end - - # Store flattened links. - def links - @links ||= I18n.new_double_nested_cache - end - - # Flatten keys for nested Hashes by chaining up keys: - # - # >> { "a" => { "b" => { "c" => "d", "e" => "f" }, "g" => "h" }, "i" => "j"}.wind - # => { "a.b.c" => "d", "a.b.e" => "f", "a.g" => "h", "i" => "j" } - # - def flatten_keys(hash, escape, prev_key=nil, &block) - hash.each_pair do |key, value| - key = escape_default_separator(key) if escape - curr_key = [prev_key, key].compact.join(FLATTEN_SEPARATOR).to_sym - yield curr_key, value - flatten_keys(value, escape, curr_key, &block) if value.is_a?(Hash) - end - end - - # Receives a hash of translations (where the key is a locale and - # the value is another hash) and return a hash with all - # translations flattened. - # - # Nested hashes are included in the flattened hash just if subtree - # is true and Symbols are automatically stored as links. - def flatten_translations(locale, data, escape, subtree) - hash = {} - flatten_keys(data, escape) do |key, value| - if value.is_a?(Hash) - hash[key] = value if subtree - else - store_link(locale, key, value) if value.is_a?(Symbol) - hash[key] = value - end - end - hash - end - - protected - - def store_link(locale, key, link) - links[locale.to_sym][key.to_s] = link.to_s - end - - def resolve_link(locale, key) - key, locale = key.to_s, locale.to_sym - links = self.links[locale] - - if links.key?(key) - links[key] - elsif link = find_link(locale, key) - store_link(locale, key, key.gsub(*link)) - else - key - end - end - - def find_link(locale, key) #:nodoc: - links[locale].each_pair do |from, to| - return [from, to] if key[0, from.length] == from - end && nil - end - - def escape_default_separator(key) #:nodoc: - I18n::Backend::Flatten.escape_default_separator(key) - end - - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/gettext.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/gettext.rb deleted file mode 100644 index 076964638eb07..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/gettext.rb +++ /dev/null @@ -1,83 +0,0 @@ -# frozen_string_literal: true - -require 'i18n/gettext' -require 'i18n/gettext/po_parser' - -module I18n - module Backend - # Experimental support for using Gettext po files to store translations. - # - # To use this you can simply include the module to the Simple backend - or - # whatever other backend you are using. - # - # I18n::Backend::Simple.include(I18n::Backend::Gettext) - # - # Now you should be able to include your Gettext translation (*.po) files to - # the +I18n.load_path+ so they're loaded to the backend and you can use them as - # usual: - # - # I18n.load_path += Dir["path/to/locales/*.po"] - # - # Following the Gettext convention this implementation expects that your - # translation files are named by their locales. E.g. the file en.po would - # contain the translations for the English locale. - # - # To translate text you must use one of the translate methods provided by - # I18n::Gettext::Helpers. - # - # include I18n::Gettext::Helpers - # puts _("some string") - # - # Without it strings containing periods (".") will not be translated. - - module Gettext - class PoData < Hash - def set_comment(msgid_or_sym, comment) - # ignore - end - end - - protected - def load_po(filename) - locale = ::File.basename(filename, '.po').to_sym - data = normalize(locale, parse(filename)) - [{ locale => data }, false] - end - - def parse(filename) - GetText::PoParser.new.parse(::File.read(filename), PoData.new) - end - - def normalize(locale, data) - data.inject({}) do |result, (key, value)| - unless key.nil? || key.empty? - key = key.gsub(I18n::Gettext::CONTEXT_SEPARATOR, '|') - key, value = normalize_pluralization(locale, key, value) if key.index("\000") - - parts = key.split('|').reverse - normalized = parts.inject({}) do |_normalized, part| - { part => _normalized.empty? ? value : _normalized } - end - - Utils.deep_merge!(result, normalized) - end - result - end - end - - def normalize_pluralization(locale, key, value) - # FIXME po_parser includes \000 chars that can not be turned into Symbols - key = key.gsub("\000", I18n::Gettext::PLURAL_SEPARATOR).split(I18n::Gettext::PLURAL_SEPARATOR).first - - keys = I18n::Gettext.plural_keys(locale) - values = value.split("\000") - raise "invalid number of plurals: #{values.size}, keys: #{keys.inspect} on #{locale} locale for msgid #{key.inspect} with values #{values.inspect}" if values.size != keys.size - - result = {} - values.each_with_index { |_value, ix| result[keys[ix]] = _value } - [key, result] - end - - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/interpolation_compiler.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/interpolation_compiler.rb deleted file mode 100644 index 8b52e7b3e9f69..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/interpolation_compiler.rb +++ /dev/null @@ -1,123 +0,0 @@ -# frozen_string_literal: true - -# The InterpolationCompiler module contains optimizations that can tremendously -# speed up the interpolation process on the Simple backend. -# -# It works by defining a pre-compiled method on stored translation Strings that -# already bring all the knowledge about contained interpolation variables etc. -# so that the actual recurring interpolation will be very fast. -# -# To enable pre-compiled interpolations you can simply include the -# InterpolationCompiler module to the Simple backend: -# -# I18n::Backend::Simple.include(I18n::Backend::InterpolationCompiler) -# -# Note that InterpolationCompiler does not yield meaningful results and consequently -# should not be used with Ruby 1.9 (YARV) but improves performance everywhere else -# (jRuby, Rubinius). -module I18n - module Backend - module InterpolationCompiler - module Compiler - extend self - - TOKENIZER = /(%%\{[^\}]+\}|%\{[^\}]+\})/ - INTERPOLATION_SYNTAX_PATTERN = /(%)?(%\{([^\}]+)\})/ - - def compile_if_an_interpolation(string) - if interpolated_str?(string) - string.instance_eval <<-RUBY_EVAL, __FILE__, __LINE__ - def i18n_interpolate(v = {}) - "#{compiled_interpolation_body(string)}" - end - RUBY_EVAL - end - - string - end - - def interpolated_str?(str) - str.kind_of?(::String) && str =~ INTERPOLATION_SYNTAX_PATTERN - end - - protected - # tokenize("foo %{bar} baz %%{buz}") # => ["foo ", "%{bar}", " baz ", "%%{buz}"] - def tokenize(str) - str.split(TOKENIZER) - end - - def compiled_interpolation_body(str) - tokenize(str).map do |token| - (matchdata = token.match(INTERPOLATION_SYNTAX_PATTERN)) ? handle_interpolation_token(token, matchdata) : escape_plain_str(token) - end.join - end - - def handle_interpolation_token(interpolation, matchdata) - escaped, pattern, key = matchdata.values_at(1, 2, 3) - escaped ? pattern : compile_interpolation_token(key.to_sym) - end - - def compile_interpolation_token(key) - "\#{#{interpolate_or_raise_missing(key)}}" - end - - def interpolate_or_raise_missing(key) - escaped_key = escape_key_sym(key) - RESERVED_KEYS.include?(key) ? reserved_key(escaped_key) : interpolate_key(escaped_key) - end - - def interpolate_key(key) - [direct_key(key), nil_key(key), missing_key(key)].join('||') - end - - def direct_key(key) - "((t = v[#{key}]) && t.respond_to?(:call) ? t.call : t)" - end - - def nil_key(key) - "(v.has_key?(#{key}) && '')" - end - - def missing_key(key) - "I18n.config.missing_interpolation_argument_handler.call(#{key}, v, self)" - end - - def reserved_key(key) - "raise(ReservedInterpolationKey.new(#{key}, self))" - end - - def escape_plain_str(str) - str.gsub(/"|\\|#/) {|x| "\\#{x}"} - end - - def escape_key_sym(key) - # rely on Ruby to do all the hard work :) - key.to_sym.inspect - end - end - - def interpolate(locale, string, values) - if string.respond_to?(:i18n_interpolate) - string.i18n_interpolate(values) - elsif values - super - else - string - end - end - - def store_translations(locale, data, options = EMPTY_HASH) - compile_all_strings_in(data) - super - end - - protected - def compile_all_strings_in(data) - data.each_value do |value| - Compiler.compile_if_an_interpolation(value) - compile_all_strings_in(value) if value.kind_of?(Hash) - end - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/key_value.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/key_value.rb deleted file mode 100644 index b937e253a893c..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/key_value.rb +++ /dev/null @@ -1,204 +0,0 @@ -# frozen_string_literal: true - -require 'i18n/backend/base' - -module I18n - - begin - require 'oj' - class JSON - class << self - def encode(value) - Oj::Rails.encode(value) - end - def decode(value) - Oj.load(value) - end - end - end - rescue LoadError - require 'active_support/json' - JSON = ActiveSupport::JSON - end - - module Backend - # This is a basic backend for key value stores. It receives on - # initialization the store, which should respond to three methods: - # - # * store#[](key) - Used to get a value - # * store#[]=(key, value) - Used to set a value - # * store#keys - Used to get all keys - # - # Since these stores only supports string, all values are converted - # to JSON before being stored, allowing it to also store booleans, - # hashes and arrays. However, this store does not support Procs. - # - # As the ActiveRecord backend, Symbols are just supported when loading - # translations from the filesystem or through explicit store translations. - # - # Also, avoid calling I18n.available_locales since it's a somehow - # expensive operation in most stores. - # - # == Example - # - # To setup I18n to use TokyoCabinet in memory is quite straightforward: - # - # require 'rufus/tokyo/cabinet' # gem install rufus-tokyo - # I18n.backend = I18n::Backend::KeyValue.new(Rufus::Tokyo::Cabinet.new('*')) - # - # == Performance - # - # You may make this backend even faster by including the Memoize module. - # However, notice that you should properly clear the cache if you change - # values directly in the key-store. - # - # == Subtrees - # - # In most backends, you are allowed to retrieve part of a translation tree: - # - # I18n.backend.store_translations :en, :foo => { :bar => :baz } - # I18n.t "foo" #=> { :bar => :baz } - # - # This backend supports this feature by default, but it slows down the storage - # of new data considerably and makes hard to delete entries. That said, you are - # allowed to disable the storage of subtrees on initialization: - # - # I18n::Backend::KeyValue.new(@store, false) - # - # This is useful if you are using a KeyValue backend chained to a Simple backend. - class KeyValue - module Implementation - attr_accessor :store - - include Base, Flatten - - def initialize(store, subtrees=true) - @store, @subtrees = store, subtrees - end - - def initialized? - !@store.nil? - end - - def store_translations(locale, data, options = EMPTY_HASH) - escape = options.fetch(:escape, true) - flatten_translations(locale, data, escape, @subtrees).each do |key, value| - key = "#{locale}.#{key}" - - case value - when Hash - if @subtrees && (old_value = @store[key]) - old_value = JSON.decode(old_value) - value = Utils.deep_merge!(Utils.deep_symbolize_keys(old_value), value) if old_value.is_a?(Hash) - end - when Proc - raise "Key-value stores cannot handle procs" - end - - @store[key] = JSON.encode(value) unless value.is_a?(Symbol) - end - end - - def available_locales - locales = @store.keys.map { |k| k =~ /\./; $` } - locales.uniq! - locales.compact! - locales.map! { |k| k.to_sym } - locales - end - - protected - - # Queries the translations from the key-value store and converts - # them into a hash such as the one returned from loading the - # haml files - def translations - @translations = Utils.deep_symbolize_keys(@store.keys.clone.map do |main_key| - main_value = JSON.decode(@store[main_key]) - main_key.to_s.split(".").reverse.inject(main_value) do |value, key| - {key.to_sym => value} - end - end.inject{|hash, elem| Utils.deep_merge!(hash, elem)}) - end - - def init_translations - # NO OP - # This call made also inside Simple Backend and accessed by - # other plugins like I18n-js and babilu and - # to use it along with the Chain backend we need to - # provide a uniform API even for protected methods :S - end - - def subtrees? - @subtrees - end - - def lookup(locale, key, scope = [], options = EMPTY_HASH) - key = normalize_flat_keys(locale, key, scope, options[:separator]) - value = @store["#{locale}.#{key}"] - value = JSON.decode(value) if value - - if value.is_a?(Hash) - Utils.deep_symbolize_keys(value) - elsif !value.nil? - value - elsif !@subtrees - SubtreeProxy.new("#{locale}.#{key}", @store) - end - end - - def pluralize(locale, entry, count) - if subtrees? - super - else - return entry unless entry.is_a?(Hash) - key = pluralization_key(entry, count) - entry[key] - end - end - end - - class SubtreeProxy - def initialize(master_key, store) - @master_key = master_key - @store = store - @subtree = nil - end - - def has_key?(key) - @subtree && @subtree.has_key?(key) || self[key] - end - - def [](key) - unless @subtree && value = @subtree[key] - value = @store["#{@master_key}.#{key}"] - if value - value = JSON.decode(value) - (@subtree ||= {})[key] = value - end - end - value - end - - def is_a?(klass) - Hash == klass || super - end - alias :kind_of? :is_a? - - def instance_of?(klass) - Hash == klass || super - end - - def nil? - @subtree.nil? - end - - def inspect - @subtree.inspect - end - end - - include Implementation - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/lazy_loadable.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/lazy_loadable.rb deleted file mode 100644 index 575b32bfa590e..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/lazy_loadable.rb +++ /dev/null @@ -1,184 +0,0 @@ -# frozen_string_literal: true - -module I18n - module Backend - # Backend that lazy loads translations based on the current locale. This - # implementation avoids loading all translations up front. Instead, it only - # loads the translations that belong to the current locale. This offers a - # performance incentive in local development and test environments for - # applications with many translations for many different locales. It's - # particularly useful when the application only refers to a single locales' - # translations at a time (ex. A Rails workload). The implementation - # identifies which translation files from the load path belong to the - # current locale by pattern matching against their path name. - # - # Specifically, a translation file is considered to belong to a locale if: - # a) the filename is in the I18n load path - # b) the filename ends in a supported extension (ie. .yml, .json, .po, .rb) - # c) the filename starts with the locale identifier - # d) the locale identifier and optional proceeding text is separated by an underscore, ie. "_". - # - # Examples: - # Valid files that will be selected by this backend: - # - # "files/locales/en_translation.yml" (Selected for locale "en") - # "files/locales/fr.po" (Selected for locale "fr") - # - # Invalid files that won't be selected by this backend: - # - # "files/locales/translation-file" - # "files/locales/en-translation.unsupported" - # "files/locales/french/translation.yml" - # "files/locales/fr/translation.yml" - # - # The implementation uses this assumption to defer the loading of - # translation files until the current locale actually requires them. - # - # The backend has two working modes: lazy_load and eager_load. - # - # Note: This backend should only be enabled in test environments! - # When the mode is set to false, the backend behaves exactly like the - # Simple backend, with an additional check that the paths being loaded - # abide by the format. If paths can't be matched to the format, an error is raised. - # - # You can configure lazy loaded backends through the initializer or backends - # accessor: - # - # # In test environments - # - # I18n.backend = I18n::Backend::LazyLoadable.new(lazy_load: true) - # - # # In other environments, such as production and CI - # - # I18n.backend = I18n::Backend::LazyLoadable.new(lazy_load: false) # default - # - class LocaleExtractor - class << self - def locale_from_path(path) - name = File.basename(path, ".*") - locale = name.split("_").first - locale.to_sym unless locale.nil? - end - end - end - - class LazyLoadable < Simple - def initialize(lazy_load: false) - @lazy_load = lazy_load - end - - # Returns whether the current locale is initialized. - def initialized? - if lazy_load? - initialized_locales[I18n.locale] - else - super - end - end - - # Clean up translations and uninitialize all locales. - def reload! - if lazy_load? - @initialized_locales = nil - @translations = nil - else - super - end - end - - # Eager loading is not supported in the lazy context. - def eager_load! - if lazy_load? - raise UnsupportedMethod.new(__method__, self.class, "Cannot eager load translations because backend was configured with lazy_load: true.") - else - super - end - end - - # Parse the load path and extract all locales. - def available_locales - if lazy_load? - I18n.load_path.map { |path| LocaleExtractor.locale_from_path(path) }.uniq - else - super - end - end - - def lookup(locale, key, scope = [], options = EMPTY_HASH) - if lazy_load? - I18n.with_locale(locale) do - super - end - else - super - end - end - - protected - - - # Load translations from files that belong to the current locale. - def init_translations - file_errors = if lazy_load? - initialized_locales[I18n.locale] = true - load_translations_and_collect_file_errors(filenames_for_current_locale) - else - @initialized = true - load_translations_and_collect_file_errors(I18n.load_path) - end - - raise InvalidFilenames.new(file_errors) unless file_errors.empty? - end - - def initialized_locales - @initialized_locales ||= Hash.new(false) - end - - private - - def lazy_load? - @lazy_load - end - - class FilenameIncorrect < StandardError - def initialize(file, expected_locale, unexpected_locales) - super "#{file} can only load translations for \"#{expected_locale}\". Found translations for: #{unexpected_locales}." - end - end - - # Loads each file supplied and asserts that the file only loads - # translations as expected by the name. The method returns a list of - # errors corresponding to offending files. - def load_translations_and_collect_file_errors(files) - errors = [] - - load_translations(files) do |file, loaded_translations| - assert_file_named_correctly!(file, loaded_translations) - rescue FilenameIncorrect => e - errors << e - end - - errors - end - - # Select all files from I18n load path that belong to current locale. - # These files must start with the locale identifier (ie. "en", "pt-BR"), - # followed by an "_" demarcation to separate proceeding text. - def filenames_for_current_locale - I18n.load_path.flatten.select do |path| - LocaleExtractor.locale_from_path(path) == I18n.locale - end - end - - # Checks if a filename is named in correspondence to the translations it loaded. - # The locale extracted from the path must be the single locale loaded in the translations. - def assert_file_named_correctly!(file, translations) - loaded_locales = translations.keys.map(&:to_sym) - expected_locale = LocaleExtractor.locale_from_path(file) - unexpected_locales = loaded_locales.reject { |locale| locale == expected_locale } - - raise FilenameIncorrect.new(file, expected_locale, unexpected_locales) unless unexpected_locales.empty? - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/memoize.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/memoize.rb deleted file mode 100644 index 3293d2b4273d8..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/memoize.rb +++ /dev/null @@ -1,54 +0,0 @@ -# frozen_string_literal: true - -# Memoize module simply memoizes the values returned by lookup using -# a flat hash and can tremendously speed up the lookup process in a backend. -# -# To enable it you can simply include the Memoize module to your backend: -# -# I18n::Backend::Simple.include(I18n::Backend::Memoize) -# -# Notice that it's the responsibility of the backend to define whenever the -# cache should be cleaned. -module I18n - module Backend - module Memoize - def available_locales - @memoized_locales ||= super - end - - def store_translations(locale, data, options = EMPTY_HASH) - reset_memoizations!(locale) - super - end - - def reload! - reset_memoizations! - super - end - - def eager_load! - memoized_lookup - available_locales - super - end - - protected - - def lookup(locale, key, scope = nil, options = EMPTY_HASH) - flat_key = I18n::Backend::Flatten.normalize_flat_keys(locale, - key, scope, options[:separator]).to_sym - flat_hash = memoized_lookup[locale.to_sym] - flat_hash.key?(flat_key) ? flat_hash[flat_key] : (flat_hash[flat_key] = super) - end - - def memoized_lookup - @memoized_lookup ||= I18n.new_double_nested_cache - end - - def reset_memoizations!(locale=nil) - @memoized_locales = nil - (locale ? memoized_lookup[locale.to_sym] : memoized_lookup).clear - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/metadata.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/metadata.rb deleted file mode 100644 index 51ea7a2a8875a..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/metadata.rb +++ /dev/null @@ -1,71 +0,0 @@ -# frozen_string_literal: true - -# I18n translation metadata is useful when you want to access information -# about how a translation was looked up, pluralized or interpolated in -# your application. -# -# msg = I18n.t(:message, :default => 'Hi!', :scope => :foo) -# msg.translation_metadata -# # => { :key => :message, :scope => :foo, :default => 'Hi!' } -# -# If a :count option was passed to #translate it will be set to the metadata. -# Likewise, if any interpolation variables were passed they will also be set. -# -# To enable translation metadata you can simply include the Metadata module -# into the Simple backend class - or whatever other backend you are using: -# -# I18n::Backend::Simple.include(I18n::Backend::Metadata) -# -module I18n - module Backend - module Metadata - class << self - def included(base) - Object.class_eval do - def translation_metadata - unless self.frozen? - @translation_metadata ||= {} - else - {} - end - end - - def translation_metadata=(translation_metadata) - @translation_metadata = translation_metadata unless self.frozen? - end - end unless Object.method_defined?(:translation_metadata) - end - end - - def translate(locale, key, options = EMPTY_HASH) - metadata = { - :locale => locale, - :key => key, - :scope => options[:scope], - :default => options[:default], - :separator => options[:separator], - :values => options.reject { |name, _value| RESERVED_KEYS.include?(name) } - } - with_metadata(metadata) { super } - end - - def interpolate(locale, entry, values = EMPTY_HASH) - metadata = entry.translation_metadata.merge(:original => entry) - with_metadata(metadata) { super } - end - - def pluralize(locale, entry, count) - with_metadata(:count => count) { super } - end - - protected - - def with_metadata(metadata, &block) - result = yield - result.translation_metadata = result.translation_metadata.merge(metadata) if result - result - end - - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/pluralization.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/pluralization.rb deleted file mode 100644 index 1d3277b886159..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/pluralization.rb +++ /dev/null @@ -1,96 +0,0 @@ -# frozen_string_literal: true - -# I18n Pluralization are useful when you want your application to -# customize pluralization rules. -# -# To enable locale specific pluralizations you can simply include the -# Pluralization module to the Simple backend - or whatever other backend you -# are using. -# -# I18n::Backend::Simple.include(I18n::Backend::Pluralization) -# -# You also need to make sure to provide pluralization algorithms to the -# backend, i.e. include them to your I18n.load_path accordingly. -module I18n - module Backend - module Pluralization - # Overwrites the Base backend translate method so that it will check the - # translation meta data space (:i18n) for a locale specific pluralization - # rule and use it to pluralize the given entry. I.e., the library expects - # pluralization rules to be stored at I18n.t(:'i18n.plural.rule') - # - # Pluralization rules are expected to respond to #call(count) and - # return a pluralization key. Valid keys depend on the pluralization - # rules for the locale, as defined in the CLDR. - # As of v41, 6 locale-specific plural categories are defined: - # :few, :many, :one, :other, :two, :zero - # - # n.b., The :one plural category does not imply the number 1. - # Instead, :one is a category for any number that behaves like 1 in - # that locale. For example, in some locales, :one is used for numbers - # that end in "1" (like 1, 21, 151) but that don't end in - # 11 (like 11, 111, 10311). - # Similar notes apply to the :two, and :zero plural categories. - # - # If you want to have different strings for the categories of count == 0 - # (e.g. "I don't have any cars") or count == 1 (e.g. "I have a single car") - # use the explicit `"0"` and `"1"` keys. - # https://unicode-org.github.io/cldr/ldml/tr35-numbers.html#Explicit_0_1_rules - def pluralize(locale, entry, count) - return entry unless entry.is_a?(Hash) && count - - pluralizer = pluralizer(locale) - if pluralizer.respond_to?(:call) - # Deprecation: The use of the `zero` key in this way is incorrect. - # Users that want a different string for the case of `count == 0` should use the explicit "0" key instead. - # We keep this incorrect behaviour for now for backwards compatibility until we can remove it. - # Ref: https://github.com/ruby-i18n/i18n/issues/629 - return entry[:zero] if count == 0 && entry.has_key?(:zero) - - # "0" and "1" are special cases - # https://unicode-org.github.io/cldr/ldml/tr35-numbers.html#Explicit_0_1_rules - if count == 0 || count == 1 - value = entry[symbolic_count(count)] - return value if value - end - - # Lateral Inheritance of "count" attribute (http://www.unicode.org/reports/tr35/#Lateral_Inheritance): - # > If there is no value for a path, and that path has a [@count="x"] attribute and value, then: - # > 1. If "x" is numeric, the path falls back to the path with [@count=«the plural rules category for x for that locale»], within that the same locale. - # > 2. If "x" is anything but "other", it falls back to a path [@count="other"], within that the same locale. - # > 3. If "x" is "other", it falls back to the path that is completely missing the count item, within that the same locale. - # Note: We don't yet implement #3 above, since we haven't decided how lateral inheritance attributes should be represented. - plural_rule_category = pluralizer.call(count) - - value = if entry.has_key?(plural_rule_category) || entry.has_key?(:other) - entry[plural_rule_category] || entry[:other] - else - raise InvalidPluralizationData.new(entry, count, plural_rule_category) - end - else - super - end - end - - protected - - def pluralizers - @pluralizers ||= {} - end - - def pluralizer(locale) - pluralizers[locale] ||= I18n.t(:'i18n.plural.rule', :locale => locale, :resolve => false) - end - - private - - # Normalizes categories of 0.0 and 1.0 - # and returns the symbolic version - def symbolic_count(count) - count = 0 if count == 0 - count = 1 if count == 1 - count.to_s.to_sym - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/simple.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/simple.rb deleted file mode 100644 index 7caa7dd1ad2ac..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/simple.rb +++ /dev/null @@ -1,113 +0,0 @@ -# frozen_string_literal: true - -require 'i18n/backend/base' - -module I18n - module Backend - # A simple backend that reads translations from YAML files and stores them in - # an in-memory hash. Relies on the Base backend. - # - # The implementation is provided by a Implementation module allowing to easily - # extend Simple backend's behavior by including modules. E.g.: - # - # module I18n::Backend::Pluralization - # def pluralize(*args) - # # extended pluralization logic - # super - # end - # end - # - # I18n::Backend::Simple.include(I18n::Backend::Pluralization) - class Simple - module Implementation - include Base - - # Mutex to ensure that concurrent translations loading will be thread-safe - MUTEX = Mutex.new - - def initialized? - @initialized ||= false - end - - # Stores translations for the given locale in memory. - # This uses a deep merge for the translations hash, so existing - # translations will be overwritten by new ones only at the deepest - # level of the hash. - def store_translations(locale, data, options = EMPTY_HASH) - if I18n.enforce_available_locales && - I18n.available_locales_initialized? && - !I18n.locale_available?(locale) - return data - end - locale = locale.to_sym - translations[locale] ||= Concurrent::Hash.new - data = Utils.deep_symbolize_keys(data) unless options.fetch(:skip_symbolize_keys, false) - Utils.deep_merge!(translations[locale], data) - end - - # Get available locales from the translations hash - def available_locales - init_translations unless initialized? - translations.inject([]) do |locales, (locale, data)| - locales << locale unless data.size <= 1 && (data.empty? || data.has_key?(:i18n)) - locales - end - end - - # Clean up translations hash and set initialized to false on reload! - def reload! - @initialized = false - @translations = nil - super - end - - def eager_load! - init_translations unless initialized? - super - end - - def translations(do_init: false) - # To avoid returning empty translations, - # call `init_translations` - init_translations if do_init && !initialized? - - @translations ||= Concurrent::Hash.new do |h, k| - MUTEX.synchronize do - h[k] = Concurrent::Hash.new - end - end - end - - protected - - def init_translations - load_translations - @initialized = true - end - - # Looks up a translation from the translations hash. Returns nil if - # either key is nil, or locale, scope or key do not exist as a key in the - # nested translations hash. Splits keys or scopes containing dots - # into multiple keys, i.e. currency.format is regarded the same as - # %w(currency format). - def lookup(locale, key, scope = [], options = EMPTY_HASH) - init_translations unless initialized? - keys = I18n.normalize_keys(locale, key, scope, options[:separator]) - - keys.inject(translations) do |result, _key| - return nil unless result.is_a?(Hash) - unless result.has_key?(_key) - _key = _key.to_s.to_sym - return nil unless result.has_key?(_key) - end - result = result[_key] - result = resolve_entry(locale, _key, result, Utils.except(options.merge(:scope => nil), :count)) if result.is_a?(Symbol) - result - end - end - end - - include Implementation - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/transliterator.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/transliterator.rb deleted file mode 100644 index 70c0df3dc81cf..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/backend/transliterator.rb +++ /dev/null @@ -1,108 +0,0 @@ -# encoding: utf-8 -# frozen_string_literal: true - -module I18n - module Backend - module Transliterator - DEFAULT_REPLACEMENT_CHAR = "?" - - # Given a locale and a UTF-8 string, return the locale's ASCII - # approximation for the string. - def transliterate(locale, string, replacement = nil) - @transliterators ||= {} - @transliterators[locale] ||= Transliterator.get I18n.t(:'i18n.transliterate.rule', - :locale => locale, :resolve => false, :default => {}) - @transliterators[locale].transliterate(string, replacement) - end - - # Get a transliterator instance. - def self.get(rule = nil) - if !rule || rule.kind_of?(Hash) - HashTransliterator.new(rule) - elsif rule.kind_of? Proc - ProcTransliterator.new(rule) - else - raise I18n::ArgumentError, "Transliteration rule must be a proc or a hash." - end - end - - # A transliterator which accepts a Proc as its transliteration rule. - class ProcTransliterator - def initialize(rule) - @rule = rule - end - - def transliterate(string, replacement = nil) - @rule.call(string) - end - end - - # A transliterator which accepts a Hash of characters as its translation - # rule. - class HashTransliterator - DEFAULT_APPROXIMATIONS = { - "À"=>"A", "Á"=>"A", "Â"=>"A", "Ã"=>"A", "Ä"=>"A", "Å"=>"A", "Æ"=>"AE", - "Ç"=>"C", "È"=>"E", "É"=>"E", "Ê"=>"E", "Ë"=>"E", "Ì"=>"I", "Í"=>"I", - "Î"=>"I", "Ï"=>"I", "Ð"=>"D", "Ñ"=>"N", "Ò"=>"O", "Ó"=>"O", "Ô"=>"O", - "Õ"=>"O", "Ö"=>"O", "×"=>"x", "Ø"=>"O", "Ù"=>"U", "Ú"=>"U", "Û"=>"U", - "Ü"=>"U", "Ý"=>"Y", "Þ"=>"Th", "ß"=>"ss", "ẞ"=>"SS", "à"=>"a", - "á"=>"a", "â"=>"a", "ã"=>"a", "ä"=>"a", "å"=>"a", "æ"=>"ae", "ç"=>"c", - "è"=>"e", "é"=>"e", "ê"=>"e", "ë"=>"e", "ì"=>"i", "í"=>"i", "î"=>"i", - "ï"=>"i", "ð"=>"d", "ñ"=>"n", "ò"=>"o", "ó"=>"o", "ô"=>"o", "õ"=>"o", - "ö"=>"o", "ø"=>"o", "ù"=>"u", "ú"=>"u", "û"=>"u", "ü"=>"u", "ý"=>"y", - "þ"=>"th", "ÿ"=>"y", "Ā"=>"A", "ā"=>"a", "Ă"=>"A", "ă"=>"a", "Ą"=>"A", - "ą"=>"a", "Ć"=>"C", "ć"=>"c", "Ĉ"=>"C", "ĉ"=>"c", "Ċ"=>"C", "ċ"=>"c", - "Č"=>"C", "č"=>"c", "Ď"=>"D", "ď"=>"d", "Đ"=>"D", "đ"=>"d", "Ē"=>"E", - "ē"=>"e", "Ĕ"=>"E", "ĕ"=>"e", "Ė"=>"E", "ė"=>"e", "Ę"=>"E", "ę"=>"e", - "Ě"=>"E", "ě"=>"e", "Ĝ"=>"G", "ĝ"=>"g", "Ğ"=>"G", "ğ"=>"g", "Ġ"=>"G", - "ġ"=>"g", "Ģ"=>"G", "ģ"=>"g", "Ĥ"=>"H", "ĥ"=>"h", "Ħ"=>"H", "ħ"=>"h", - "Ĩ"=>"I", "ĩ"=>"i", "Ī"=>"I", "ī"=>"i", "Ĭ"=>"I", "ĭ"=>"i", "Į"=>"I", - "į"=>"i", "İ"=>"I", "ı"=>"i", "IJ"=>"IJ", "ij"=>"ij", "Ĵ"=>"J", "ĵ"=>"j", - "Ķ"=>"K", "ķ"=>"k", "ĸ"=>"k", "Ĺ"=>"L", "ĺ"=>"l", "Ļ"=>"L", "ļ"=>"l", - "Ľ"=>"L", "ľ"=>"l", "Ŀ"=>"L", "ŀ"=>"l", "Ł"=>"L", "ł"=>"l", "Ń"=>"N", - "ń"=>"n", "Ņ"=>"N", "ņ"=>"n", "Ň"=>"N", "ň"=>"n", "ʼn"=>"'n", "Ŋ"=>"NG", - "ŋ"=>"ng", "Ō"=>"O", "ō"=>"o", "Ŏ"=>"O", "ŏ"=>"o", "Ő"=>"O", "ő"=>"o", - "Œ"=>"OE", "œ"=>"oe", "Ŕ"=>"R", "ŕ"=>"r", "Ŗ"=>"R", "ŗ"=>"r", "Ř"=>"R", - "ř"=>"r", "Ś"=>"S", "ś"=>"s", "Ŝ"=>"S", "ŝ"=>"s", "Ş"=>"S", "ş"=>"s", - "Š"=>"S", "š"=>"s", "Ţ"=>"T", "ţ"=>"t", "Ť"=>"T", "ť"=>"t", "Ŧ"=>"T", - "ŧ"=>"t", "Ũ"=>"U", "ũ"=>"u", "Ū"=>"U", "ū"=>"u", "Ŭ"=>"U", "ŭ"=>"u", - "Ů"=>"U", "ů"=>"u", "Ű"=>"U", "ű"=>"u", "Ų"=>"U", "ų"=>"u", "Ŵ"=>"W", - "ŵ"=>"w", "Ŷ"=>"Y", "ŷ"=>"y", "Ÿ"=>"Y", "Ź"=>"Z", "ź"=>"z", "Ż"=>"Z", - "ż"=>"z", "Ž"=>"Z", "ž"=>"z" - }.freeze - - def initialize(rule = nil) - @rule = rule - add_default_approximations - add rule if rule - end - - def transliterate(string, replacement = nil) - replacement ||= DEFAULT_REPLACEMENT_CHAR - string.gsub(/[^\x00-\x7f]/u) do |char| - approximations[char] || replacement - end - end - - private - - def approximations - @approximations ||= {} - end - - def add_default_approximations - DEFAULT_APPROXIMATIONS.each do |key, value| - approximations[key] = value - end - end - - # Add transliteration rules to the approximations hash. - def add(hash) - hash.each do |key, value| - approximations[key.to_s] = value.to_s - end - end - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/config.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/config.rb deleted file mode 100644 index 9878e02e708c5..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/config.rb +++ /dev/null @@ -1,165 +0,0 @@ -# frozen_string_literal: true - -require 'set' - -module I18n - class Config - # The only configuration value that is not global and scoped to thread is :locale. - # It defaults to the default_locale. - def locale - defined?(@locale) && @locale != nil ? @locale : default_locale - end - - # Sets the current locale pseudo-globally, i.e. in the Thread.current hash. - def locale=(locale) - I18n.enforce_available_locales!(locale) - @locale = locale && locale.to_sym - end - - # Returns the current backend. Defaults to +Backend::Simple+. - def backend - @@backend ||= Backend::Simple.new - end - - # Sets the current backend. Used to set a custom backend. - def backend=(backend) - @@backend = backend - end - - # Returns the current default locale. Defaults to :'en' - def default_locale - @@default_locale ||= :en - end - - # Sets the current default locale. Used to set a custom default locale. - def default_locale=(locale) - I18n.enforce_available_locales!(locale) - @@default_locale = locale && locale.to_sym - end - - # Returns an array of locales for which translations are available. - # Unless you explicitly set these through I18n.available_locales= - # the call will be delegated to the backend. - def available_locales - @@available_locales ||= nil - @@available_locales || backend.available_locales - end - - # Caches the available locales list as both strings and symbols in a Set, so - # that we can have faster lookups to do the available locales enforce check. - def available_locales_set #:nodoc: - @@available_locales_set ||= available_locales.inject(Set.new) do |set, locale| - set << locale.to_s << locale.to_sym - end - end - - # Sets the available locales. - def available_locales=(locales) - @@available_locales = Array(locales).map { |locale| locale.to_sym } - @@available_locales = nil if @@available_locales.empty? - @@available_locales_set = nil - end - - # Returns true if the available_locales have been initialized - def available_locales_initialized? - ( !!defined?(@@available_locales) && !!@@available_locales ) - end - - # Clears the available locales set so it can be recomputed again after I18n - # gets reloaded. - def clear_available_locales_set #:nodoc: - @@available_locales_set = nil - end - - # Returns the current default scope separator. Defaults to '.' - def default_separator - @@default_separator ||= '.' - end - - # Sets the current default scope separator. - def default_separator=(separator) - @@default_separator = separator - end - - # Returns the current exception handler. Defaults to an instance of - # I18n::ExceptionHandler. - def exception_handler - @@exception_handler ||= ExceptionHandler.new - end - - # Sets the exception handler. - def exception_handler=(exception_handler) - @@exception_handler = exception_handler - end - - # Returns the current handler for situations when interpolation argument - # is missing. MissingInterpolationArgument will be raised by default. - def missing_interpolation_argument_handler - @@missing_interpolation_argument_handler ||= lambda do |missing_key, provided_hash, string| - raise MissingInterpolationArgument.new(missing_key, provided_hash, string) - end - end - - # Sets the missing interpolation argument handler. It can be any - # object that responds to #call. The arguments that will be passed to #call - # are the same as for MissingInterpolationArgument initializer. Use +Proc.new+ - # if you don't care about arity. - # - # == Example: - # You can suppress raising an exception and return string instead: - # - # I18n.config.missing_interpolation_argument_handler = Proc.new do |key| - # "#{key} is missing" - # end - def missing_interpolation_argument_handler=(exception_handler) - @@missing_interpolation_argument_handler = exception_handler - end - - # Allow clients to register paths providing translation data sources. The - # backend defines acceptable sources. - # - # E.g. the provided SimpleBackend accepts a list of paths to translation - # files which are either named *.rb and contain plain Ruby Hashes or are - # named *.yml and contain YAML data. So for the SimpleBackend clients may - # register translation files like this: - # I18n.load_path << 'path/to/locale/en.yml' - def load_path - @@load_path ||= [] - end - - # Sets the load path instance. Custom implementations are expected to - # behave like a Ruby Array. - def load_path=(load_path) - @@load_path = load_path - @@available_locales_set = nil - backend.reload! - end - - # Whether or not to verify if locales are in the list of available locales. - # Defaults to true. - @@enforce_available_locales = true - def enforce_available_locales - @@enforce_available_locales - end - - def enforce_available_locales=(enforce_available_locales) - @@enforce_available_locales = enforce_available_locales - end - - # Returns the current interpolation patterns. Defaults to - # I18n::DEFAULT_INTERPOLATION_PATTERNS. - def interpolation_patterns - @@interpolation_patterns ||= I18n::DEFAULT_INTERPOLATION_PATTERNS.dup - end - - # Sets the current interpolation patterns. Used to set a interpolation - # patterns. - # - # E.g. using {{}} as a placeholder like "{{hello}}, world!": - # - # I18n.config.interpolation_patterns << /\{\{(\w+)\}\}/ - def interpolation_patterns=(interpolation_patterns) - @@interpolation_patterns = interpolation_patterns - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/exceptions.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/exceptions.rb deleted file mode 100644 index 23ca46ecbd461..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/exceptions.rb +++ /dev/null @@ -1,157 +0,0 @@ -# frozen_string_literal: true - -require 'cgi' - -module I18n - class ExceptionHandler - def call(exception, _locale, _key, _options) - if exception.is_a?(MissingTranslation) - exception.message - else - raise exception - end - end - end - - class ArgumentError < ::ArgumentError; end - - class Disabled < ArgumentError - def initialize(method) - super(<<~MESSAGE) - I18n.#{method} is currently disabled, likely because your application is still in its loading phase. - - This method is meant to display text in the user locale, so calling it before the user locale has - been set is likely to display text from the wrong locale to some users. - - If you have a legitimate reason to access i18n data outside of the user flow, you can do so by passing - the desired locale explicitly with the `locale` argument, e.g. `I18n.#{method}(..., locale: :en)` - MESSAGE - end - end - - class InvalidLocale < ArgumentError - attr_reader :locale - def initialize(locale) - @locale = locale - super "#{locale.inspect} is not a valid locale" - end - end - - class InvalidLocaleData < ArgumentError - attr_reader :filename - def initialize(filename, exception_message) - @filename, @exception_message = filename, exception_message - super "can not load translations from #{filename}: #{exception_message}" - end - end - - class MissingTranslation < ArgumentError - module Base - PERMITTED_KEYS = [:scope, :default].freeze - - attr_reader :locale, :key, :options - - def initialize(locale, key, options = EMPTY_HASH) - @key, @locale, @options = key, locale, options.slice(*PERMITTED_KEYS) - options.each { |k, v| self.options[k] = v.inspect if v.is_a?(Proc) } - end - - def keys - @keys ||= I18n.normalize_keys(locale, key, options[:scope]).tap do |keys| - keys << 'no key' if keys.size < 2 - end - end - - def message - if (default = options[:default]).is_a?(Array) && default.any? - other_options = ([key, *default]).map { |k| normalized_option(k).prepend('- ') }.join("\n") - "Translation missing. Options considered were:\n#{other_options}" - else - "Translation missing: #{keys.join('.')}" - end - end - - def normalized_option(key) - I18n.normalize_keys(locale, key, options[:scope]).join('.') - end - - alias :to_s :message - - def to_exception - MissingTranslationData.new(locale, key, options) - end - end - - include Base - end - - class MissingTranslationData < ArgumentError - include MissingTranslation::Base - end - - class InvalidPluralizationData < ArgumentError - attr_reader :entry, :count, :key - def initialize(entry, count, key) - @entry, @count, @key = entry, count, key - super "translation data #{entry.inspect} can not be used with :count => #{count}. key '#{key}' is missing." - end - end - - class MissingInterpolationArgument < ArgumentError - attr_reader :key, :values, :string - def initialize(key, values, string) - @key, @values, @string = key, values, string - super "missing interpolation argument #{key.inspect} in #{string.inspect} (#{values.inspect} given)" - end - end - - class ReservedInterpolationKey < ArgumentError - attr_reader :key, :string - def initialize(key, string) - @key, @string = key, string - super "reserved key #{key.inspect} used in #{string.inspect}" - end - end - - class UnknownFileType < ArgumentError - attr_reader :type, :filename - def initialize(type, filename) - @type, @filename = type, filename - super "can not load translations from #{filename}, the file type #{type} is not known" - end - end - - class UnsupportedMethod < ArgumentError - attr_reader :method, :backend_klass, :msg - def initialize(method, backend_klass, msg) - @method = method - @backend_klass = backend_klass - @msg = msg - super "#{backend_klass} does not support the ##{method} method. #{msg}" - end - end - - class InvalidFilenames < ArgumentError - NUMBER_OF_ERRORS_SHOWN = 20 - def initialize(file_errors) - super <<~MSG - Found #{file_errors.count} error(s). - The first #{[file_errors.count, NUMBER_OF_ERRORS_SHOWN].min} error(s): - #{file_errors.map(&:message).first(NUMBER_OF_ERRORS_SHOWN).join("\n")} - - To use the LazyLoadable backend: - 1. Filenames must start with the locale. - 2. An underscore must separate the locale with any optional text that follows. - 3. The file must only contain translation data for the single locale. - - Example: - "/config/locales/fr.yml" which contains: - ```yml - fr: - dog: - chien - ``` - MSG - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/gettext.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/gettext.rb deleted file mode 100644 index 858daff44a871..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/gettext.rb +++ /dev/null @@ -1,28 +0,0 @@ -# frozen_string_literal: true - -module I18n - module Gettext - PLURAL_SEPARATOR = "\001" - CONTEXT_SEPARATOR = "\004" - - autoload :Helpers, 'i18n/gettext/helpers' - - @@plural_keys = { :en => [:one, :other] } - - class << self - # returns an array of plural keys for the given locale or the whole hash - # of locale mappings to plural keys so that we can convert from gettext's - # integer-index based style - # TODO move this information to the pluralization module - def plural_keys(*args) - args.empty? ? @@plural_keys : @@plural_keys[args.first] || @@plural_keys[:en] - end - - def extract_scope(msgid, separator) - scope = msgid.to_s.split(separator) - msgid = scope.pop - [scope, msgid] - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/gettext/helpers.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/gettext/helpers.rb deleted file mode 100644 index d077619ff212f..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/gettext/helpers.rb +++ /dev/null @@ -1,75 +0,0 @@ -# frozen_string_literal: true - -require 'i18n/gettext' - -module I18n - module Gettext - # Implements classical Gettext style accessors. To use this include the - # module to the global namespace or wherever you want to use it. - # - # include I18n::Gettext::Helpers - module Helpers - # Makes dynamic translation messages readable for the gettext parser. - # _(fruit) cannot be understood by the gettext parser. To help the parser find all your translations, - # you can add fruit = N_("Apple") which does not translate, but tells the parser: "Apple" needs translation. - # * msgid: the message id. - # * Returns: msgid. - def N_(msgsid) - msgsid - end - - def gettext(msgid, options = EMPTY_HASH) - I18n.t(msgid, **{:default => msgid, :separator => '|'}.merge(options)) - end - alias _ gettext - - def sgettext(msgid, separator = '|') - scope, msgid = I18n::Gettext.extract_scope(msgid, separator) - I18n.t(msgid, :scope => scope, :default => msgid, :separator => separator) - end - alias s_ sgettext - - def pgettext(msgctxt, msgid) - separator = I18n::Gettext::CONTEXT_SEPARATOR - sgettext([msgctxt, msgid].join(separator), separator) - end - alias p_ pgettext - - def ngettext(msgid, msgid_plural, n = 1) - nsgettext(msgid, msgid_plural, n) - end - alias n_ ngettext - - # Method signatures: - # nsgettext('Fruits|apple', 'apples', 2) - # nsgettext(['Fruits|apple', 'apples'], 2) - def nsgettext(msgid, msgid_plural, n = 1, separator = '|') - if msgid.is_a?(Array) - msgid, msgid_plural, n, separator = msgid[0], msgid[1], msgid_plural, n - separator = '|' unless separator.is_a?(::String) - end - - scope, msgid = I18n::Gettext.extract_scope(msgid, separator) - default = { :one => msgid, :other => msgid_plural } - I18n.t(msgid, :default => default, :count => n, :scope => scope, :separator => separator) - end - alias ns_ nsgettext - - # Method signatures: - # npgettext('Fruits', 'apple', 'apples', 2) - # npgettext('Fruits', ['apple', 'apples'], 2) - def npgettext(msgctxt, msgid, msgid_plural, n = 1) - separator = I18n::Gettext::CONTEXT_SEPARATOR - - if msgid.is_a?(Array) - msgid_plural, msgid, n = msgid[1], [msgctxt, msgid[0]].join(separator), msgid_plural - else - msgid = [msgctxt, msgid].join(separator) - end - - nsgettext(msgid, msgid_plural, n, separator) - end - alias np_ npgettext - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/gettext/po_parser.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/gettext/po_parser.rb deleted file mode 100644 index a07fdc5874911..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/gettext/po_parser.rb +++ /dev/null @@ -1,329 +0,0 @@ -=begin - poparser.rb - Generate a .mo - - Copyright (C) 2003-2009 Masao Mutoh - - You may redistribute it and/or modify it under the same - license terms as Ruby. -=end - -#MODIFIED -# removed include GetText etc -# added stub translation method _(x) -require 'racc/parser' - -module GetText - - class PoParser < Racc::Parser - - def _(x) - x - end - -module_eval <<'..end src/poparser.ry modeval..id7a99570e05', 'src/poparser.ry', 108 - def unescape(orig) - ret = orig.gsub(/\\n/, "\n") - ret.gsub!(/\\t/, "\t") - ret.gsub!(/\\r/, "\r") - ret.gsub!(/\\"/, "\"") - ret - end - - def parse(str, data, ignore_fuzzy = true) - @comments = [] - @data = data - @fuzzy = false - @msgctxt = "" - $ignore_fuzzy = ignore_fuzzy - - str.strip! - @q = [] - until str.empty? do - case str - when /\A\s+/ - str = $' - when /\Amsgctxt/ - @q.push [:MSGCTXT, $&] - str = $' - when /\Amsgid_plural/ - @q.push [:MSGID_PLURAL, $&] - str = $' - when /\Amsgid/ - @q.push [:MSGID, $&] - str = $' - when /\Amsgstr/ - @q.push [:MSGSTR, $&] - str = $' - when /\A\[(\d+)\]/ - @q.push [:PLURAL_NUM, $1] - str = $' - when /\A\#~(.*)/ - $stderr.print _("Warning: obsolete msgid exists.\n") - $stderr.print " #{$&}\n" - @q.push [:COMMENT, $&] - str = $' - when /\A\#(.*)/ - @q.push [:COMMENT, $&] - str = $' - when /\A\"(.*)\"/ - @q.push [:STRING, $1] - str = $' - else - #c = str[0,1] - #@q.push [:STRING, c] - str = str[1..-1] - end - end - @q.push [false, '$end'] - if $DEBUG - @q.each do |a,b| - puts "[#{a}, #{b}]" - end - end - @yydebug = true if $DEBUG - do_parse - - if @comments.size > 0 - @data.set_comment(:last, @comments.join("\n")) - end - @data - end - - def next_token - @q.shift - end - - def on_message(msgid, msgstr) - if msgstr.size > 0 - @data[msgid] = msgstr - @data.set_comment(msgid, @comments.join("\n")) - end - @comments.clear - @msgctxt = "" - end - - def on_comment(comment) - @fuzzy = true if (/fuzzy/ =~ comment) - @comments << comment - end - - -..end src/poparser.ry modeval..id7a99570e05 - -##### racc 1.4.5 generates ### - -racc_reduce_table = [ - 0, 0, :racc_error, - 0, 10, :_reduce_none, - 2, 10, :_reduce_none, - 2, 10, :_reduce_none, - 2, 10, :_reduce_none, - 2, 12, :_reduce_5, - 1, 13, :_reduce_none, - 1, 13, :_reduce_none, - 4, 15, :_reduce_8, - 5, 16, :_reduce_9, - 2, 17, :_reduce_10, - 1, 17, :_reduce_none, - 3, 18, :_reduce_12, - 1, 11, :_reduce_13, - 2, 14, :_reduce_14, - 1, 14, :_reduce_15 ] - -racc_reduce_n = 16 - -racc_shift_n = 26 - -racc_action_table = [ - 3, 13, 5, 7, 9, 15, 16, 17, 20, 17, - 13, 17, 13, 13, 11, 17, 23, 20, 13, 17 ] - -racc_action_check = [ - 1, 16, 1, 1, 1, 12, 12, 12, 18, 18, - 7, 14, 15, 9, 3, 19, 20, 21, 23, 25 ] - -racc_action_pointer = [ - nil, 0, nil, 14, nil, nil, nil, 3, nil, 6, - nil, nil, 0, nil, 4, 5, -6, nil, 2, 8, - 8, 11, nil, 11, nil, 12 ] - -racc_action_default = [ - -1, -16, -2, -16, -3, -13, -4, -16, -6, -16, - -7, 26, -16, -15, -5, -16, -16, -14, -16, -8, - -16, -9, -11, -16, -10, -12 ] - -racc_goto_table = [ - 12, 22, 14, 4, 24, 6, 2, 8, 18, 19, - 10, 21, 1, nil, nil, nil, 25 ] - -racc_goto_check = [ - 5, 9, 5, 3, 9, 4, 2, 6, 5, 5, - 7, 8, 1, nil, nil, nil, 5 ] - -racc_goto_pointer = [ - nil, 12, 5, 2, 4, -7, 6, 9, -7, -17 ] - -racc_goto_default = [ - nil, nil, nil, nil, nil, nil, nil, nil, nil, nil ] - -racc_token_table = { - false => 0, - Object.new => 1, - :COMMENT => 2, - :MSGID => 3, - :MSGCTXT => 4, - :MSGID_PLURAL => 5, - :MSGSTR => 6, - :STRING => 7, - :PLURAL_NUM => 8 } - -racc_use_result_var = true - -racc_nt_base = 9 - -Racc_arg = [ - racc_action_table, - racc_action_check, - racc_action_default, - racc_action_pointer, - racc_goto_table, - racc_goto_check, - racc_goto_default, - racc_goto_pointer, - racc_nt_base, - racc_reduce_table, - racc_token_table, - racc_shift_n, - racc_reduce_n, - racc_use_result_var ] - -Racc_token_to_s_table = [ -'$end', -'error', -'COMMENT', -'MSGID', -'MSGCTXT', -'MSGID_PLURAL', -'MSGSTR', -'STRING', -'PLURAL_NUM', -'$start', -'msgfmt', -'comment', -'msgctxt', -'message', -'string_list', -'single_message', -'plural_message', -'msgstr_plural', -'msgstr_plural_line'] - -Racc_debug_parser = true - -##### racc system variables end ##### - - # reduce 0 omitted - - # reduce 1 omitted - - # reduce 2 omitted - - # reduce 3 omitted - - # reduce 4 omitted - -module_eval <<'.,.,', 'src/poparser.ry', 25 - def _reduce_5( val, _values, result ) - @msgctxt = unescape(val[1]) + "\004" - result - end -.,., - - # reduce 6 omitted - - # reduce 7 omitted - -module_eval <<'.,.,', 'src/poparser.ry', 48 - def _reduce_8( val, _values, result ) - if @fuzzy and $ignore_fuzzy - if val[1] != "" - $stderr.print _("Warning: fuzzy message was ignored.\n") - $stderr.print " msgid '#{val[1]}'\n" - else - on_message('', unescape(val[3])) - end - @fuzzy = false - else - on_message(@msgctxt + unescape(val[1]), unescape(val[3])) - end - result = "" - result - end -.,., - -module_eval <<'.,.,', 'src/poparser.ry', 65 - def _reduce_9( val, _values, result ) - if @fuzzy and $ignore_fuzzy - if val[1] != "" - $stderr.print _("Warning: fuzzy message was ignored.\n") - $stderr.print "msgid = '#{val[1]}\n" - else - on_message('', unescape(val[3])) - end - @fuzzy = false - else - on_message(@msgctxt + unescape(val[1]) + "\000" + unescape(val[3]), unescape(val[4])) - end - result = "" - result - end -.,., - -module_eval <<'.,.,', 'src/poparser.ry', 76 - def _reduce_10( val, _values, result ) - if val[0].size > 0 - result = val[0] + "\000" + val[1] - else - result = "" - end - result - end -.,., - - # reduce 11 omitted - -module_eval <<'.,.,', 'src/poparser.ry', 84 - def _reduce_12( val, _values, result ) - result = val[2] - result - end -.,., - -module_eval <<'.,.,', 'src/poparser.ry', 91 - def _reduce_13( val, _values, result ) - on_comment(val[0]) - result - end -.,., - -module_eval <<'.,.,', 'src/poparser.ry', 99 - def _reduce_14( val, _values, result ) - result = val.delete_if{|item| item == ""}.join - result - end -.,., - -module_eval <<'.,.,', 'src/poparser.ry', 103 - def _reduce_15( val, _values, result ) - result = val[0] - result - end -.,., - - def _reduce_none( val, _values, result ) - result - end - - end # class PoParser - -end # module GetText diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/interpolate/ruby.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/interpolate/ruby.rb deleted file mode 100644 index 5b50593fec7a1..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/interpolate/ruby.rb +++ /dev/null @@ -1,53 +0,0 @@ -# frozen_string_literal: true - -# heavily based on Masao Mutoh's gettext String interpolation extension -# http://github.com/mutoh/gettext/blob/f6566738b981fe0952548c421042ad1e0cdfb31e/lib/gettext/core_ext/string.rb - -module I18n - DEFAULT_INTERPOLATION_PATTERNS = [ - /%%/, - /%\{([\w|]+)\}/, # matches placeholders like "%{foo} or %{foo|word}" - /%<(\w+)>([^\d]*?\d*\.?\d*[bBdiouxXeEfgGcps])/ # matches placeholders like "%.d" - ].freeze - INTERPOLATION_PATTERN = Regexp.union(DEFAULT_INTERPOLATION_PATTERNS) - deprecate_constant :INTERPOLATION_PATTERN - - INTERPOLATION_PATTERNS_CACHE = Hash.new do |hash, patterns| - hash[patterns] = Regexp.union(patterns) - end - private_constant :INTERPOLATION_PATTERNS_CACHE - - class << self - # Return String or raises MissingInterpolationArgument exception. - # Missing argument's logic is handled by I18n.config.missing_interpolation_argument_handler. - def interpolate(string, values) - raise ReservedInterpolationKey.new($1.to_sym, string) if string =~ I18n.reserved_keys_pattern - raise ArgumentError.new('Interpolation values must be a Hash.') unless values.kind_of?(Hash) - interpolate_hash(string, values) - end - - def interpolate_hash(string, values) - pattern = INTERPOLATION_PATTERNS_CACHE[config.interpolation_patterns] - interpolated = false - - interpolated_string = string.gsub(pattern) do |match| - interpolated = true - - if match == '%%' - '%' - else - key = ($1 || $2 || match.tr("%{}", "")).to_sym - value = if values.key?(key) - values[key] - else - config.missing_interpolation_argument_handler.call(key, values, string) - end - value = value.call(values) if value.respond_to?(:call) - $3 ? sprintf("%#{$3}", value) : value - end - end - - interpolated ? interpolated_string : string - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/locale.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/locale.rb deleted file mode 100644 index c4078e614b7a2..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/locale.rb +++ /dev/null @@ -1,8 +0,0 @@ -# frozen_string_literal: true - -module I18n - module Locale - autoload :Fallbacks, 'i18n/locale/fallbacks' - autoload :Tag, 'i18n/locale/tag' - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/locale/fallbacks.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/locale/fallbacks.rb deleted file mode 100644 index e30acc4340ac7..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/locale/fallbacks.rb +++ /dev/null @@ -1,97 +0,0 @@ -# Locale Fallbacks -# -# Extends the I18n module to hold a fallbacks instance which is set to an -# instance of I18n::Locale::Fallbacks by default but can be swapped with a -# different implementation. -# -# Locale fallbacks will compute a number of fallback locales for a given locale. -# For example: -# -#

-# I18n.fallbacks[:"es-MX"] # => [:"es-MX", :es, :en] 
-# -# Locale fallbacks always fall back to -# -# * all parent locales of a given locale (e.g. :es for :"es-MX") first, -# * the current default locales and all of their parents second -# -# The default locales are set to [] by default but can be set to something else. -# -# One can additionally add any number of additional fallback locales manually. -# These will be added before the default locales to the fallback chain. For -# example: -# -# # using a custom locale as default fallback locale -# -# I18n.fallbacks = I18n::Locale::Fallbacks.new(:"en-GB", :"de-AT" => :de, :"de-CH" => :de) -# I18n.fallbacks[:"de-AT"] # => [:"de-AT", :de, :"en-GB", :en] -# I18n.fallbacks[:"de-CH"] # => [:"de-CH", :de, :"en-GB", :en] -# -# # mapping fallbacks to an existing instance -# -# # people speaking Catalan also speak Spanish as spoken in Spain -# fallbacks = I18n.fallbacks -# fallbacks.map(:ca => :"es-ES") -# fallbacks[:ca] # => [:ca, :"es-ES", :es, :"en-US", :en] -# -# # people speaking Arabian as spoken in Palestine also speak Hebrew as spoken in Israel -# fallbacks.map(:"ar-PS" => :"he-IL") -# fallbacks[:"ar-PS"] # => [:"ar-PS", :ar, :"he-IL", :he, :"en-US", :en] -# fallbacks[:"ar-EG"] # => [:"ar-EG", :ar, :"en-US", :en] -# -# # people speaking Sami as spoken in Finland also speak Swedish and Finnish as spoken in Finland -# fallbacks.map(:sms => [:"se-FI", :"fi-FI"]) -# fallbacks[:sms] # => [:sms, :"se-FI", :se, :"fi-FI", :fi, :"en-US", :en] - -module I18n - module Locale - class Fallbacks < Hash - def initialize(*mappings) - @map = {} - map(mappings.pop) if mappings.last.is_a?(Hash) - self.defaults = mappings.empty? ? [] : mappings - end - - def defaults=(defaults) - @defaults = defaults.flat_map { |default| compute(default, false) } - end - attr_reader :defaults - - def [](locale) - raise InvalidLocale.new(locale) if locale.nil? - raise Disabled.new('fallback#[]') if locale == false - locale = locale.to_sym - super || store(locale, compute(locale)) - end - - def map(*args, &block) - if args.count == 1 && !block_given? - mappings = args.first - mappings.each do |from, to| - from, to = from.to_sym, Array(to) - to.each do |_to| - @map[from] ||= [] - @map[from] << _to.to_sym - end - end - else - @map.map(*args, &block) - end - end - - protected - - def compute(tags, include_defaults = true, exclude = []) - result = Array(tags).flat_map do |tag| - tags = I18n::Locale::Tag.tag(tag).self_and_parents.map! { |t| t.to_sym } - exclude - tags.each { |_tag| tags += compute(@map[_tag], false, exclude + tags) if @map[_tag] } - tags - end - result.push(*defaults) if include_defaults - result.uniq! - result.compact! - result - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/locale/tag.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/locale/tag.rb deleted file mode 100644 index a640b4465f6c9..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/locale/tag.rb +++ /dev/null @@ -1,28 +0,0 @@ -# encoding: utf-8 - -module I18n - module Locale - module Tag - autoload :Parents, 'i18n/locale/tag/parents' - autoload :Rfc4646, 'i18n/locale/tag/rfc4646' - autoload :Simple, 'i18n/locale/tag/simple' - - class << self - # Returns the current locale tag implementation. Defaults to +I18n::Locale::Tag::Simple+. - def implementation - @@implementation ||= Simple - end - - # Sets the current locale tag implementation. Use this to set a different locale tag implementation. - def implementation=(implementation) - @@implementation = implementation - end - - # Factory method for locale tags. Delegates to the current locale tag implementation. - def tag(tag) - implementation.tag(tag) - end - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/locale/tag/parents.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/locale/tag/parents.rb deleted file mode 100644 index 6283e667ff00f..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/locale/tag/parents.rb +++ /dev/null @@ -1,24 +0,0 @@ -module I18n - module Locale - module Tag - module Parents - def parent - @parent ||= - begin - segs = to_a - segs.compact! - segs.length > 1 ? self.class.tag(*segs[0..(segs.length - 2)].join('-')) : nil - end - end - - def self_and_parents - @self_and_parents ||= [self].concat parents - end - - def parents - @parents ||= parent ? [parent].concat(parent.parents) : [] - end - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/locale/tag/rfc4646.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/locale/tag/rfc4646.rb deleted file mode 100644 index 4ce4c751ae11f..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/locale/tag/rfc4646.rb +++ /dev/null @@ -1,74 +0,0 @@ -# RFC 4646/47 compliant Locale tag implementation that parses locale tags to -# subtags such as language, script, region, variant etc. -# -# For more information see by http://en.wikipedia.org/wiki/IETF_language_tag -# -# Rfc4646::Parser does not implement grandfathered tags. - -module I18n - module Locale - module Tag - RFC4646_SUBTAGS = [ :language, :script, :region, :variant, :extension, :privateuse, :grandfathered ] - RFC4646_FORMATS = { :language => :downcase, :script => :capitalize, :region => :upcase, :variant => :downcase } - - class Rfc4646 < Struct.new(*RFC4646_SUBTAGS) - class << self - # Parses the given tag and returns a Tag instance if it is valid. - # Returns false if the given tag is not valid according to RFC 4646. - def tag(tag) - matches = parser.match(tag) - new(*matches) if matches - end - - def parser - @@parser ||= Rfc4646::Parser - end - - def parser=(parser) - @@parser = parser - end - end - - include Parents - - RFC4646_FORMATS.each do |name, format| - define_method(name) { self[name].send(format) unless self[name].nil? } - end - - def to_sym - to_s.to_sym - end - - def to_s - @tag ||= to_a.compact.join("-") - end - - def to_a - members.collect { |attr| self.send(attr) } - end - - module Parser - PATTERN = %r{\A(?: - ([a-z]{2,3}(?:(?:-[a-z]{3}){0,3})?|[a-z]{4}|[a-z]{5,8}) # language - (?:-([a-z]{4}))? # script - (?:-([a-z]{2}|\d{3}))? # region - (?:-([0-9a-z]{5,8}|\d[0-9a-z]{3}))* # variant - (?:-([0-9a-wyz](?:-[0-9a-z]{2,8})+))* # extension - (?:-(x(?:-[0-9a-z]{1,8})+))?| # privateuse subtag - (x(?:-[0-9a-z]{1,8})+)| # privateuse tag - /* ([a-z]{1,3}(?:-[0-9a-z]{2,8}){1,2}) */ # grandfathered - )\z}xi - - class << self - def match(tag) - c = PATTERN.match(tag.to_s).captures - c[0..4] << (c[5].nil? ? c[6] : c[5]) << c[7] # TODO c[7] is grandfathered, throw a NotImplemented exception here? - rescue - false - end - end - end - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/locale/tag/simple.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/locale/tag/simple.rb deleted file mode 100644 index 18d55c2861dda..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/locale/tag/simple.rb +++ /dev/null @@ -1,39 +0,0 @@ -# Simple Locale tag implementation that computes subtags by simply splitting -# the locale tag at '-' occurrences. -module I18n - module Locale - module Tag - class Simple - class << self - def tag(tag) - new(tag) - end - end - - include Parents - - attr_reader :tag - - def initialize(*tag) - @tag = tag.join('-').to_sym - end - - def subtags - @subtags = tag.to_s.split('-').map!(&:to_s) - end - - def to_sym - tag - end - - def to_s - tag.to_s - end - - def to_a - subtags - end - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/middleware.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/middleware.rb deleted file mode 100644 index 59b377e280cda..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/middleware.rb +++ /dev/null @@ -1,17 +0,0 @@ -# frozen_string_literal: true - -module I18n - class Middleware - - def initialize(app) - @app = app - end - - def call(env) - @app.call(env) - ensure - Thread.current[:i18n_config] = I18n::Config.new - end - - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/utils.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/utils.rb deleted file mode 100644 index 88415615f2de5..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/utils.rb +++ /dev/null @@ -1,55 +0,0 @@ -# frozen_string_literal: true - -module I18n - module Utils - class << self - if Hash.method_defined?(:except) - def except(hash, *keys) - hash.except(*keys) - end - else - def except(hash, *keys) - hash = hash.dup - keys.each { |k| hash.delete(k) } - hash - end - end - - def deep_merge(hash, other_hash, &block) - deep_merge!(hash.dup, other_hash, &block) - end - - def deep_merge!(hash, other_hash, &block) - hash.merge!(other_hash) do |key, this_val, other_val| - if this_val.is_a?(Hash) && other_val.is_a?(Hash) - deep_merge(this_val, other_val, &block) - elsif block_given? - yield key, this_val, other_val - else - other_val - end - end - end - - def deep_symbolize_keys(hash) - hash.each_with_object({}) do |(key, value), result| - result[key.respond_to?(:to_sym) ? key.to_sym : key] = deep_symbolize_keys_in_object(value) - result - end - end - - private - - def deep_symbolize_keys_in_object(value) - case value - when Hash - deep_symbolize_keys(value) - when Array - value.map { |e| deep_symbolize_keys_in_object(e) } - else - value - end - end - end - end -end diff --git a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/version.rb b/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/version.rb deleted file mode 100644 index 965f5dd8d6941..0000000000000 --- a/Library/Homebrew/vendor/bundle/ruby/2.6.0/gems/i18n-1.14.1/lib/i18n/version.rb +++ /dev/null @@ -1,5 +0,0 @@ -# frozen_string_literal: true - -module I18n - VERSION = "1.14.1" -end From 410fc64b7b4e2d633b67268d40d1c773961a198a Mon Sep 17 00:00:00 2001 From: Douglas Eichelberger Date: Sun, 26 Nov 2023 15:36:02 -0800 Subject: [PATCH 7/8] Use encoding cache --- Library/Homebrew/extend/blank.rb | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/Library/Homebrew/extend/blank.rb b/Library/Homebrew/extend/blank.rb index 735af262765de..5aa09c033af57 100644 --- a/Library/Homebrew/extend/blank.rb +++ b/Library/Homebrew/extend/blank.rb @@ -133,6 +133,12 @@ def present? # :nodoc: class String BLANK_RE = /\A[[:space:]]*\z/.freeze + # This is a cache that is intentionally mutable + # rubocop:disable Style/MutableConstant + ENCODED_BLANKS_ = T.let(Hash.new do |h, enc| + h[enc] = Regexp.new(BLANK_RE.source.encode(enc), BLANK_RE.options | Regexp::FIXEDENCODING) + end, T::Hash[Encoding, Regexp]) + # rubocop:enable Style/MutableConstant # A string is blank if it's empty or contains whitespaces only: # @@ -153,7 +159,7 @@ def blank? begin BLANK_RE.match?(self) rescue Encoding::CompatibilityError - Regexp.new(BLANK_RE.source.encode(encoding), BLANK_RE.options | Regexp::FIXEDENCODING).match?(self) + T.must(ENCODED_BLANKS_[encoding]).match?(self) end end From cfb8ec81937f64f239ee0c98c9f149f995452696 Mon Sep 17 00:00:00 2001 From: Douglas Eichelberger Date: Sun, 26 Nov 2023 15:36:27 -0800 Subject: [PATCH 8/8] Add tests --- Library/Homebrew/test/extend/blank_spec.rb | 50 ++++++++++++++++++++++ 1 file changed, 50 insertions(+) create mode 100644 Library/Homebrew/test/extend/blank_spec.rb diff --git a/Library/Homebrew/test/extend/blank_spec.rb b/Library/Homebrew/test/extend/blank_spec.rb new file mode 100644 index 0000000000000..4c9689020ffae --- /dev/null +++ b/Library/Homebrew/test/extend/blank_spec.rb @@ -0,0 +1,50 @@ +# frozen_string_literal: true + +require "extend/blank" + +describe Object do + let(:empty_true) do + Class.new(described_class) do + def empty? + 0 + end + end + end + let(:empty_false) do + Class.new(described_class) do + def empty? + nil + end + end + end + let(:blank) { [empty_true.new, nil, false, "", " ", " \n\t \r ", " ", "\u00a0", [], {}] } + let(:present) { [empty_false.new, described_class.new, true, 0, 1, "a", [nil], { nil => 0 }, Time.now] } + + describe ".blank?" do + it "checks if an object is blank" do + blank.each { |v| expect(v.blank?).to be true } + present.each { |v| expect(v.blank?).to be false } + end + + it "checks if an object is blank with bundled string encodings" do + Encoding.list.reject(&:dummy?).each do |encoding| + expect(" ".encode(encoding).blank?).to be true + expect("a".encode(encoding).blank?).to be false + end + end + end + + describe ".present?" do + it "checks if an object is present" do + blank.each { |v| expect(v.present?).to be false } + present.each { |v| expect(v.present?).to be true } + end + end + + describe ".presence" do + it "returns the object if present, or nil" do + blank.each { |v| expect(v.presence).to be_nil } + present.each { |v| expect(v.presence).to be v } + end + end +end