All Files ( 81.76% covered at 22.57 hits/line )
72 files in total.
2539 relevant lines,
2076 lines covered and
463 lines missed.
(
81.76%
)
# typed: strict
# frozen_string_literal: true
# Core library files
- 1
require "log_struct/sorbet"
- 1
require "log_struct/version"
- 1
require "log_struct/enums" # All enums are now in the enums directory
- 1
require "log_struct/configuration"
- 1
require "log_struct/formatter"
- 1
require "log_struct/railtie"
- 1
require "log_struct/concerns/error_handling"
- 1
require "log_struct/concerns/configuration"
- 1
require "log_struct/concerns/logging"
# Monkey-patch ActiveSupport::TaggedLogging::Formatter to support hash input/output
- 1
require "log_struct/monkey_patches/active_support/tagged_logging/formatter"
# Require integrations
- 1
require "log_struct/integrations"
# SemanticLogger integration - core feature for high-performance logging
- 1
require "log_struct/semantic_logger/formatter"
- 1
require "log_struct/semantic_logger/color_formatter"
- 1
require "log_struct/semantic_logger/logger"
- 1
require "log_struct/semantic_logger/setup"
- 1
module LogStruct
- 1
class Error < StandardError; end
- 1
extend Concerns::ErrorHandling::ClassMethods
- 1
extend Concerns::Configuration::ClassMethods
- 1
extend Concerns::Logging::ClassMethods
# Set enabled at require time based on current Rails environment.
# (Users can disable or enable LogStruct later in an initializer.)
- 1
set_enabled_from_rails_env!
end
# typed: strict
# frozen_string_literal: true
- 1
require_relative "../configuration"
- 1
module LogStruct
- 1
module Concerns
# Concern for handling errors according to configured modes
- 1
module Configuration
- 1
module ClassMethods
- 1
extend T::Sig
- 2
sig { params(block: T.proc.params(config: LogStruct::Configuration).void).void }
- 1
def configure(&block)
- 30
yield(config)
end
- 2
sig { returns(LogStruct::Configuration) }
- 1
def config
- 1272
LogStruct::Configuration.instance
end
# (Can't use alias_method since this module is extended into LogStruct)
- 2
sig { returns(LogStruct::Configuration) }
- 1
def configuration
- 55
config
end
# Setter method to replace the configuration (for testing purposes)
- 2
sig { params(config: LogStruct::Configuration).void }
- 1
def configuration=(config)
- 93
LogStruct::Configuration.set_instance(config)
end
- 1
sig { returns(T::Boolean) }
- 1
def enabled?
config.enabled
end
- 2
sig { void }
- 1
def set_enabled_from_rails_env!
# Set enabled based on current Rails environment and the LOGSTRUCT_ENABLED env var.
# Precedence:
# 1. Check if LOGSTRUCT_ENABLED env var is defined
# - Sets enabled=true only when value is "true"
# - Sets enabled=false when value is "false" (or any non-"true")
# 2. Otherwise, check if current Rails environment is in enabled_environments
# 3. Otherwise, leave as config.enabled (defaults to true)
# Then check if LOGSTRUCT_ENABLED env var is set
- 7
config.enabled = if ENV["LOGSTRUCT_ENABLED"]
# Override to true only if env var is "true"
- 3
ENV["LOGSTRUCT_ENABLED"] == "true"
else
- 4
config.enabled_environments.include?(::Rails.env.to_sym)
end
end
- 1
sig { returns(T::Boolean) }
- 1
def is_local?
config.local_environments.include?(::Rails.env.to_sym)
end
- 1
sig { returns(T::Boolean) }
- 1
def is_production?
!is_local?
end
- 2
sig { void }
- 1
def merge_rails_filter_parameters!
- 1
return unless ::Rails.application.config.respond_to?(:filter_parameters)
- 1
rails_filter_params = ::Rails.application.config.filter_parameters
- 1
return unless rails_filter_params.is_a?(Array)
# Convert all Rails filter parameters to symbols and merge with our filter keys
- 1
converted_params = rails_filter_params.map do |param|
- 4
param.respond_to?(:to_sym) ? param.to_sym : param
end
# Add Rails filter parameters to our filter keys
- 1
config.filters.filter_keys += converted_params
# Ensure no duplicates
- 1
config.filters.filter_keys.uniq!
# Clear Rails filter parameters since we've incorporated them
- 1
::Rails.application.config.filter_parameters.clear
end
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
module LogStruct
- 1
module Concerns
# Concern for handling errors according to configured modes
- 1
module ErrorHandling
- 1
module ClassMethods
- 1
extend T::Sig
- 1
extend T::Helpers
# Needed for raise
- 1
requires_ancestor { Module }
# Get the error handling mode for a given source
- 2
sig { params(source: Source).returns(ErrorHandlingMode) }
- 1
def error_handling_mode_for(source)
- 1
config = LogStruct.config
# Use a case statement for type-safety
- 1
case source
when Source::TypeChecking
config.error_handling_modes.type_checking_errors
when Source::LogStruct
config.error_handling_modes.logstruct_errors
when Source::Security
config.error_handling_modes.security_errors
when Source::Rails, Source::App, Source::Job, Source::Storage, Source::Mailer,
Source::Shrine, Source::CarrierWave, Source::Sidekiq
- 1
config.error_handling_modes.standard_errors
else
# Ensures the case statement is exhaustive
T.absurd(source)
end
end
# Log an errors with structured data
- 1
sig { params(error: StandardError, source: Source, context: T.nilable(T::Hash[Symbol, T.untyped])).void }
- 1
def log_error(error, source:, context: nil)
# Create structured log entry
error_log = Log::Error.from_exception(
source,
error,
context || {}
)
LogStruct.error(error_log)
end
# Report an error using the configured handler or MultiErrorReporter
- 1
sig { params(error: StandardError, source: Source, context: T.nilable(T::Hash[Symbol, T.untyped])).void }
- 1
def log_and_report_error(error, source:, context: nil)
log_error(error, source: source, context: context)
error_handler = LogStruct.config.error_reporting_handler
if error_handler
# Use the configured handler
error_handler.call(error, context, source)
else
# Fall back to MultiErrorReporter (detects Sentry, Bugsnag, etc.)
LogStruct::MultiErrorReporter.report_error(error, context || {})
end
end
# Handle an error according to the configured error handling mode (log, report, raise, etc)
- 2
sig { params(error: StandardError, source: Source, context: T.nilable(T::Hash[Symbol, T.untyped])).void }
- 1
def handle_exception(error, source:, context: nil)
- 1
mode = error_handling_mode_for(source)
# Log / report in production, raise locally (dev/test)
- 1
if mode == ErrorHandlingMode::LogProduction || mode == ErrorHandlingMode::ReportProduction
raise(error) if !LogStruct.is_production?
end
- 1
case mode
when ErrorHandlingMode::Ignore
# Do nothing
when ErrorHandlingMode::Raise
- 1
raise(error)
when ErrorHandlingMode::Log, ErrorHandlingMode::LogProduction
log_error(error, source: source, context: context)
when ErrorHandlingMode::Report, ErrorHandlingMode::ReportProduction
log_and_report_error(error, source: source, context: context)
else
# Ensures the case statement is exhaustive
T.absurd(mode)
end
end
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require_relative "../log"
- 1
module LogStruct
- 1
module Concerns
# Concern for handling errors according to configured modes
- 1
module Logging
- 1
module ClassMethods
- 1
extend T::Sig
# Log a log struct at debug level
- 2
sig { params(log: Log::Interfaces::CommonFields).void }
- 1
def debug(log)
- 1
Rails.logger.debug(log)
end
# Log a log struct at info level
- 2
sig { params(log: Log::Interfaces::CommonFields).void }
- 1
def info(log)
- 5
Rails.logger.info(log)
end
# Log a log struct at warn level
- 2
sig { params(log: Log::Interfaces::CommonFields).void }
- 1
def warn(log)
- 1
Rails.logger.warn(log)
end
# Log a log struct at error level
- 2
sig { params(log: Log::Interfaces::CommonFields).void }
- 1
def error(log)
- 4
Rails.logger.error(log)
end
# Log a log struct at fatal level
- 2
sig { params(log: Log::Interfaces::CommonFields).void }
- 1
def fatal(log)
- 1
Rails.logger.fatal(log)
end
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
module LogStruct
- 1
module ConfigStruct
- 1
class ErrorHandlingModes < T::Struct
- 1
include Sorbet::SerializeSymbolKeys
# How to handle different types of errors
# Modes:
# - Ignore - Ignore the error
# - Log - Log the error
# - Report - Log and report to error tracking service (but don't crash)
# - LogProduction - Log error in production, raise locally (dev/test)
# - ReportProduction - Report error in production, raise locally (dev/test)
# - Raise - Always raise the error
# Configurable error handling categories
- 1
prop :type_checking_errors, ErrorHandlingMode, default: ErrorHandlingMode::LogProduction
- 1
prop :logstruct_errors, ErrorHandlingMode, default: ErrorHandlingMode::LogProduction
- 1
prop :security_errors, ErrorHandlingMode, default: ErrorHandlingMode::Report
- 1
prop :standard_errors, ErrorHandlingMode, default: ErrorHandlingMode::Raise
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
module LogStruct
- 1
module ConfigStruct
- 1
class Filters < T::Struct
- 1
include Sorbet::SerializeSymbolKeys
# Keys that should be filtered in nested structures such as request params and job arguments.
# Filtered data includes information about Hashes and Arrays.
#
# { _filtered: {
# _class: "Hash", # Class of the filtered value
# _bytes: 1234, # Length of JSON string in bytes
# _keys_count: 3, # Number of keys in the hash
# _keys: [:key1, :key2, :key3], # First 10 keys in the hash
# }
# }
#
# Default: [:password, :password_confirmation, :pass, :pw, :token, :secret,
# :credentials, :creds, :auth, :authentication, :authorization]
#
- 1
prop :filter_keys,
T::Array[Symbol],
factory: -> {
- 42
%i[
password password_confirmation pass pw token secret
credentials auth authentication authorization
credit_card ssn social_security
]
}
# Keys where string values should include an SHA256 hash.
# Useful for tracing emails across requests (e.g. sign in, sign up) while protecting privacy.
# Default: [:email, :email_address]
- 1
prop :filter_keys_with_hashes,
T::Array[Symbol],
- 42
factory: -> { %i[email email_address] }
# Hash salt for SHA256 hashing (typically used for email addresses)
# Used for both param filters and string scrubbing
# Default: "l0g5t0p"
- 1
prop :hash_salt, String, default: "l0g5t0p"
# Hash length for SHA256 hashing (typically used for email addresses)
# Used for both param filters and string scrubbing
# Default: 12
- 1
prop :hash_length, Integer, default: 12
# Filter email addresses. Also controls email filtering for the ActionMailer integration
# (to, from, recipient fields, etc.)
# Default: true
- 1
prop :email_addresses, T::Boolean, default: true
# Filter URL passwords
# Default: true
- 1
prop :url_passwords, T::Boolean, default: true
# Filter credit card numbers
# Default: true
- 1
prop :credit_card_numbers, T::Boolean, default: true
# Filter phone numbers
# Default: true
- 1
prop :phone_numbers, T::Boolean, default: true
# Filter social security numbers
# Default: true
- 1
prop :ssns, T::Boolean, default: true
# Filter IP addresses
# Default: false
- 1
prop :ip_addresses, T::Boolean, default: false
# Filter MAC addresses
# Default: false
- 1
prop :mac_addresses, T::Boolean, default: false
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require "active_support/notifications"
- 1
module LogStruct
- 1
module ConfigStruct
- 1
class Integrations < T::Struct
- 1
include Sorbet::SerializeSymbolKeys
# Enable or disable Sorbet error handler integration
# Default: true
- 1
prop :enable_sorbet_error_handlers, T::Boolean, default: true
# Enable or disable Lograge integration
# Default: true
- 1
prop :enable_lograge, T::Boolean, default: true
# Custom options for Lograge
# Default: nil
- 1
prop :lograge_custom_options, T.nilable(Handlers::LogrageCustomOptions), default: nil
# Enable or disable ActionMailer integration
# Default: true
- 1
prop :enable_actionmailer, T::Boolean, default: true
# Enable or disable host authorization logging
# Default: true
- 1
prop :enable_host_authorization, T::Boolean, default: true
# Enable or disable ActiveJob integration
# Default: true
- 1
prop :enable_activejob, T::Boolean, default: true
# Enable or disable Rack middleware
# Default: true
- 1
prop :enable_rack_error_handler, T::Boolean, default: true
# Enable or disable Sidekiq integration
# Default: true
- 1
prop :enable_sidekiq, T::Boolean, default: true
# Enable or disable Shrine integration
# Default: true
- 1
prop :enable_shrine, T::Boolean, default: true
# Enable or disable ActiveStorage integration
# Default: true
- 1
prop :enable_activestorage, T::Boolean, default: true
# Enable or disable CarrierWave integration
# Default: true
- 1
prop :enable_carrierwave, T::Boolean, default: true
# Enable or disable GoodJob integration
# Default: true
- 1
prop :enable_goodjob, T::Boolean, default: true
# Enable SemanticLogger integration for high-performance logging
# Default: true
- 1
prop :enable_semantic_logger, T::Boolean, default: true
# Enable colored JSON output in development
# Default: true
- 1
prop :enable_color_output, T::Boolean, default: true
# Color configuration for JSON output
# Default: nil (uses SemanticLogger defaults)
- 1
prop :color_map, T.nilable(T::Hash[Symbol, Symbol]), default: nil
# Filter noisy loggers (ActionView, etc.)
# Default: false
- 1
prop :filter_noisy_loggers, T::Boolean, default: false
# Enable SQL query logging through ActiveRecord instrumentation
# Default: false (can be resource intensive)
- 1
prop :enable_sql_logging, T::Boolean, default: false
# Only log SQL queries slower than this threshold (in milliseconds)
# Set to 0 or nil to log all queries
# Default: 100.0 (log queries taking >100ms)
- 1
prop :sql_slow_query_threshold, T.nilable(Float), default: 100.0
# Include bind parameters in SQL logs (disable in production for security)
# Default: true in development/test, false in production
- 44
prop :sql_log_bind_params, T::Boolean, factory: -> { !defined?(::Rails) || !::Rails.respond_to?(:env) || !::Rails.env.production? }
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require_relative "handlers"
- 1
require_relative "config_struct/error_handling_modes"
- 1
require_relative "config_struct/integrations"
- 1
require_relative "config_struct/filters"
- 1
module LogStruct
# Core configuration class that provides a type-safe API
- 1
class Configuration < T::Struct
- 1
extend T::Sig
- 1
include Sorbet::SerializeSymbolKeys
# -------------------------------------------------------------------------------------
# Props
# -------------------------------------------------------------------------------------
- 1
prop :enabled, T::Boolean, default: true
- 44
prop :enabled_environments, T::Array[Symbol], factory: -> { [:test, :production] }
- 44
prop :local_environments, T::Array[Symbol], factory: -> { [:development, :test] }
- 44
const :integrations, ConfigStruct::Integrations, factory: -> { ConfigStruct::Integrations.new }
- 32
const :filters, ConfigStruct::Filters, factory: -> { ConfigStruct::Filters.new }
# Custom log scrubbing handler for any additional string scrubbing
# Default: nil
- 1
prop :string_scrubbing_handler, T.nilable(Handlers::StringScrubber)
# Custom handler for error reporting
# Default: Errors are handled by MultiErrorReporter
# (auto-detects Sentry, Bugsnag, Rollbar, Honeybadger, etc.)
- 1
prop :error_reporting_handler, T.nilable(Handlers::ErrorReporter), default: nil
# How to handle errors from various sources
- 1
const :error_handling_modes,
ConfigStruct::ErrorHandlingModes,
factory: -> {
- 43
ConfigStruct::ErrorHandlingModes.new
}
# -------------------------------------------------------------------------------------
# Class Methods
# -------------------------------------------------------------------------------------
# Class‐instance variable
- 1
@instance = T.let(nil, T.nilable(Configuration))
- 2
sig { returns(Configuration) }
- 1
def self.instance
- 1282
@instance ||= T.let(Configuration.new, T.nilable(Configuration))
end
- 2
sig { params(config: Configuration).void }
- 1
def self.set_instance(config)
- 93
@instance = config
end
end
end
# typed: strict
# frozen_string_literal: true
# Require all enums in this directory
- 1
require_relative "enums/error_handling_mode"
- 1
require_relative "enums/error_reporter"
- 1
require_relative "enums/event"
- 1
require_relative "enums/level"
- 1
require_relative "enums/source"
# typed: strict
# frozen_string_literal: true
- 1
module LogStruct
# Enum for error handling modes
- 1
class ErrorHandlingMode < T::Enum
- 1
enums do
# Always ignore the error
- 1
Ignore = new(:ignore)
# Always log the error
- 1
Log = new(:log)
# Always report to tracking service and continue
- 1
Report = new(:report)
# Log in production, raise locally (dev/test)
- 1
LogProduction = new(:log_production)
# Report in production, raise locally (dev/test)
- 1
ReportProduction = new(:report_production)
# Always raise regardless of environment
- 1
Raise = new(:raise)
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
module LogStruct
- 1
class ErrorReporter < T::Enum
- 1
enums do
- 1
RailsLogger = new(:rails_logger)
- 1
Sentry = new(:sentry)
- 1
Bugsnag = new(:bugsnag)
- 1
Rollbar = new(:rollbar)
- 1
Honeybadger = new(:honeybadger)
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
module LogStruct
# Define log event types as an enum
- 1
class Event < T::Enum
- 1
enums do
# Plain log messages
- 1
Log = new(:log)
# Request events
- 1
Request = new(:request)
# Job events
- 1
Enqueue = new(:enqueue)
- 1
Schedule = new(:schedule)
- 1
Start = new(:start)
- 1
Finish = new(:finish)
# File storage events (ActiveStorage, Shrine, CarrierWave, etc.)
- 1
Upload = new(:upload)
- 1
Download = new(:download)
- 1
Delete = new(:delete)
- 1
Metadata = new(:metadata)
- 1
Exist = new(:exist)
- 1
Stream = new(:stream)
- 1
Url = new(:url)
# Email events
- 1
Delivery = new(:delivery)
- 1
Delivered = new(:delivered)
# Security events
- 1
IPSpoof = new(:ip_spoof)
- 1
CSRFViolation = new(:csrf_violation)
- 1
BlockedHost = new(:blocked_host)
# Database events
- 1
Database = new(:database)
# Error events
- 1
Error = new(:error)
# Fallback
- 1
Unknown = new(:unknown)
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require "logger"
- 1
module LogStruct
# Define log levels as an enum
- 1
class Level < T::Enum
- 1
extend T::Sig
- 1
enums do
# Standard log levels
- 1
Debug = new(:debug)
- 1
Info = new(:info)
- 1
Warn = new(:warn)
- 1
Error = new(:error)
- 1
Fatal = new(:fatal)
- 1
Unknown = new(:unknown)
end
# Convert a Level to the corresponding Logger integer constant
- 2
sig { returns(Integer) }
- 1
def to_severity_int
- 6
case serialize
- 1
when :debug then ::Logger::DEBUG
- 1
when :info then ::Logger::INFO
- 1
when :warn then ::Logger::WARN
- 1
when :error then ::Logger::ERROR
- 1
when :fatal then ::Logger::FATAL
- 1
else ::Logger::UNKNOWN
end
end
# Convert a string or integer severity to a Level
- 2
sig { params(severity: T.any(String, Symbol, Integer, NilClass)).returns(Level) }
- 1
def self.from_severity(severity)
- 76
return Unknown if severity.nil?
- 75
return from_severity_int(severity) if severity.is_a?(Integer)
- 64
from_severity_sym(severity.downcase.to_sym)
end
- 2
sig { params(severity: Symbol).returns(Level) }
- 1
def self.from_severity_sym(severity)
- 64
case severity.to_s.downcase.to_sym
- 6
when :debug then Debug
- 36
when :info then Info
- 5
when :warn then Warn
- 9
when :error then Error
- 5
when :fatal then Fatal
- 3
else Unknown
end
end
- 2
sig { params(severity: Integer).returns(Level) }
- 1
def self.from_severity_int(severity)
- 11
case severity
- 1
when ::Logger::DEBUG then Debug
- 5
when ::Logger::INFO then Info
- 1
when ::Logger::WARN then Warn
- 1
when ::Logger::ERROR then Error
- 1
when ::Logger::FATAL then Fatal
- 2
else Unknown
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
module LogStruct
# Combined Source class that unifies log and error sources
- 1
class Source < T::Enum
- 1
enums do
# Error sources
- 1
TypeChecking = new(:type_checking) # For type checking errors (Sorbet)
- 1
LogStruct = new(:logstruct) # Errors from LogStruct itself
- 1
Security = new(:security) # Security-related events
# Application sources
- 1
Rails = new(:rails) # For request-related logs/errors
- 1
Job = new(:job) # ActiveJob logs/errors
- 1
Storage = new(:storage) # ActiveStorage logs/errors
- 1
Mailer = new(:mailer) # ActionMailer logs/errors
- 1
App = new(:app) # General application logs/errors
# Third-party gem sources
- 1
Shrine = new(:shrine)
- 1
CarrierWave = new(:carrierwave)
- 1
Sidekiq = new(:sidekiq)
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require "logger"
- 1
require "active_support/core_ext/object/blank"
- 1
require "json"
- 1
require "globalid"
- 1
require_relative "enums/source"
- 1
require_relative "enums/event"
- 1
require_relative "string_scrubber"
- 1
require_relative "log"
- 1
require_relative "param_filters"
- 1
require_relative "multi_error_reporter"
- 1
module LogStruct
- 1
class Formatter < ::Logger::Formatter
- 1
extend T::Sig
# Add current_tags method to support ActiveSupport::TaggedLogging
- 2
sig { returns(T::Array[String]) }
- 1
def current_tags
- 7
Thread.current[:activesupport_tagged_logging_tags] ||= []
end
# Add tagged method to support ActiveSupport::TaggedLogging
- 2
sig { params(tags: T::Array[String], blk: T.proc.params(formatter: Formatter).void).returns(T.untyped) }
- 1
def tagged(*tags, &blk)
- 1
new_tags = tags.flatten
- 1
current_tags.concat(new_tags) if new_tags.any?
- 1
yield self
ensure
- 1
current_tags.pop(new_tags.size) if new_tags&.any?
end
# Add clear_tags! method to support ActiveSupport::TaggedLogging
- 2
sig { void }
- 1
def clear_tags!
- 1
Thread.current[:activesupport_tagged_logging_tags] = []
end
- 1
sig { params(tags: T::Array[String]).returns(T.untyped) }
- 1
def push_tags(*tags)
current_tags.concat(tags)
end
- 2
sig { params(string: String).returns(String) }
- 1
def scrub_string(string)
# Use StringScrubber module to scrub sensitive information from strings
- 273
StringScrubber.scrub(string)
end
- 2
sig { params(arg: T.untyped, recursion_depth: Integer).returns(T.untyped) }
- 1
def process_values(arg, recursion_depth: 0)
# Prevent infinite recursion in case any args have circular references
# or are too deeply nested. Just return args.
- 453
return arg if recursion_depth > 20
- 451
case arg
when Hash
- 71
result = {}
# Process each key-value pair
- 71
arg.each do |key, value|
# Check if this key should be filtered at any depth
- 350
result[key] = if ParamFilters.should_filter_key?(key)
# Filter the value
- 1
{_filtered: ParamFilters.summarize_json_attribute(key, value)}
else
# Process the value normally
- 349
process_values(value, recursion_depth: recursion_depth + 1)
end
end
- 71
result
when Array
- 69
result = arg.map { |value| process_values(value, recursion_depth: recursion_depth + 1) }
# Filter large arrays, but don't truncate backtraces (arrays of strings that look like file:line)
- 12
if result.size > 10 && !looks_like_backtrace?(result)
- 1
result = result.take(10) + ["... and #{result.size - 10} more items"]
end
- 12
result
when GlobalID::Identification
begin
- 5
arg.to_global_id
rescue
begin
- 1
case arg
when ActiveRecord::Base
"#{arg.class}(##{arg.id})"
else
# For non-ActiveRecord objects that failed to_global_id, try to get a string representation
# If this also fails, we want to catch it and return the error placeholder
T.unsafe(arg).to_s
end
rescue => e
- 1
LogStruct.handle_exception(e, source: Source::LogStruct)
- 1
"[GLOBALID_ERROR]"
end
end
when Source, Event
arg.serialize
when String
- 273
scrub_string(arg)
when Time
arg.iso8601(3)
else
# Any other type (e.g. Symbol, Integer, Float, Boolean etc.)
- 90
arg
end
rescue => e
# Report error through LogStruct's framework
context = {
processor_method: "process_values",
value_type: arg.class.name,
recursion_depth: recursion_depth
}
LogStruct.handle_exception(e, source: Source::LogStruct, context: context)
arg
end
- 2
sig { params(log_value: T.untyped, time: Time).returns(T::Hash[Symbol, T.untyped]) }
- 1
def log_value_to_hash(log_value, time:)
- 48
case log_value
when Log::Interfaces::CommonFields
# Our log classes all implement a custom #serialize method that use symbol keys
- 33
log_value.serialize
when T::Struct
# Default T::Struct.serialize methods returns a hash with string keys, so convert them to symbols
log_value.serialize.deep_symbolize_keys
when Hash
# Use hash as is and convert string keys to symbols
- 7
log_value.dup.deep_symbolize_keys
else
# Create a Plain log with the message as a string and serialize it with symbol keys
# log_value can be literally anything: Integer, Float, Boolean, NilClass, etc.
- 8
log_message = case log_value
# Handle all the basic types without any further processing
when String, Symbol, TrueClass, FalseClass, NilClass, Array, Hash, Time, Numeric
- 5
log_value
else
# Handle the serialization of complex objects in a useful way:
#
# 1. For ActiveRecord models: Use as_json which includes attributes
# 2. For objects with custom as_json implementations: Use their implementation
# 3. For basic objects that only have ActiveSupport's as_json: Use to_s
begin
- 3
method_owner = log_value.method(:as_json).owner
# If it's ActiveRecord, ActiveModel, or a custom implementation, use as_json
- 2
if method_owner.to_s.include?("ActiveRecord") ||
method_owner.to_s.include?("ActiveModel") ||
method_owner.to_s.exclude?("ActiveSupport::CoreExtensions") &&
method_owner.to_s.exclude?("Object")
- 1
log_value.as_json
else
# For plain objects with only the default ActiveSupport as_json
- 1
log_value.to_s
end
rescue => e
# Handle serialization errors
context = {
- 1
object_class: log_value.class.name,
object_inspect: log_value.inspect.truncate(100)
}
- 1
LogStruct.handle_exception(e, source: Source::LogStruct, context: context)
# Fall back to the string representation to ensure we continue processing
- 1
log_value.to_s
end
end
- 8
Log::Plain.new(
message: log_message,
timestamp: time
).serialize
end
end
# Serializes Log (or string) into JSON
- 2
sig { params(severity: T.any(String, Symbol, Integer), time: Time, progname: T.nilable(String), log_value: T.untyped).returns(String) }
- 1
def call(severity, time, progname, log_value)
- 41
level_enum = Level.from_severity(severity)
- 41
data = log_value_to_hash(log_value, time: time)
# Filter params, scrub sensitive values, format ActiveJob GlobalID arguments
- 41
data = process_values(data)
# Add standard fields if not already present
- 41
data[:src] ||= Source::App
- 41
data[:evt] ||= Event::Log
- 41
data[:ts] ||= time.iso8601(3)
- 41
data[:lvl] = level_enum # Set level from severity parameter
- 41
data[:prog] = progname if progname.present?
- 41
generate_json(data)
end
# Output as JSON with a newline. We mock this method in tests so we can
# inspect the data right before it gets turned into a JSON string.
- 2
sig { params(data: T::Hash[T.untyped, T.untyped]).returns(String) }
- 1
def generate_json(data)
- 42
"#{data.to_json}\n"
end
# Check if an array looks like a backtrace (array of strings with file:line pattern)
- 2
sig { params(array: T::Array[T.untyped]).returns(T::Boolean) }
- 1
def looks_like_backtrace?(array)
- 2
return false if array.empty?
# Check if most elements look like backtrace lines (file.rb:123 or similar patterns)
- 2
backtrace_like_count = array.first(5).count do |element|
- 10
element.is_a?(String) && element.match?(/\A[^:\s]+:\d+/)
end
# If at least 3 out of the first 5 elements look like backtrace lines, treat as backtrace
- 2
backtrace_like_count >= 3
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
module LogStruct
# Module for custom handlers used throughout the library
- 1
module Handlers
# Type for Lograge custom options
- 1
LogrageCustomOptions = T.type_alias {
- 1
T.proc.params(
event: ActiveSupport::Notifications::Event,
options: T::Hash[Symbol, T.untyped]
).returns(T.untyped)
}
# Type for error reporting handlers
- 1
ErrorReporter = T.type_alias {
- 1
T.proc.params(
error: StandardError,
context: T.nilable(T::Hash[Symbol, T.untyped]),
source: Source
).void
}
# Type for string scrubbing handlers
- 2
StringScrubber = T.type_alias { T.proc.params(string: String).returns(String) }
end
end
# typed: strict
# frozen_string_literal: true
- 1
require "digest"
- 1
module LogStruct
# Utility module for hashing sensitive data
- 1
module HashUtils
- 1
class << self
- 1
extend T::Sig
# Create a hash of a string value for tracing while preserving privacy
- 2
sig { params(value: String).returns(String) }
- 1
def hash_value(value)
- 16
salt = LogStruct.config.filters.hash_salt
- 16
length = LogStruct.config.filters.hash_length
- 16
Digest::SHA256.hexdigest("#{salt}#{value}")[0...length] || "error"
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require_relative "integrations/integration_interface"
- 1
require_relative "integrations/active_job"
- 1
require_relative "integrations/active_record"
- 1
require_relative "integrations/rack_error_handler"
- 1
require_relative "integrations/host_authorization"
- 1
require_relative "integrations/action_mailer"
- 1
require_relative "integrations/lograge"
- 1
require_relative "integrations/shrine"
- 1
require_relative "integrations/sidekiq"
- 1
require_relative "integrations/good_job"
- 1
require_relative "integrations/active_storage"
- 1
require_relative "integrations/carrierwave"
- 1
require_relative "integrations/sorbet"
- 1
module LogStruct
- 1
module Integrations
- 1
extend T::Sig
- 1
sig { void }
- 1
def self.setup_integrations
config = LogStruct.config
# Set up each integration with consistent configuration pattern
Integrations::Lograge.setup(config) if config.integrations.enable_lograge
Integrations::ActionMailer.setup(config) if config.integrations.enable_actionmailer
Integrations::ActiveJob.setup(config) if config.integrations.enable_activejob
Integrations::ActiveRecord.setup(config) if config.integrations.enable_sql_logging
Integrations::Sidekiq.setup(config) if config.integrations.enable_sidekiq
Integrations::GoodJob.setup(config) if config.integrations.enable_goodjob
Integrations::HostAuthorization.setup(config) if config.integrations.enable_host_authorization
Integrations::RackErrorHandler.setup(config) if config.integrations.enable_rack_error_handler
Integrations::Shrine.setup(config) if config.integrations.enable_shrine
Integrations::ActiveStorage.setup(config) if config.integrations.enable_activestorage
Integrations::CarrierWave.setup(config) if config.integrations.enable_carrierwave
Integrations::Sorbet.setup(config) if config.integrations.enable_sorbet_error_handlers
end
end
end
# typed: strict
# frozen_string_literal: true
begin
- 1
require "action_mailer"
rescue LoadError
# actionmailer gem is not available, integration will be skipped
end
- 1
if defined?(::ActionMailer)
- 1
require "logger"
- 1
require_relative "action_mailer/metadata_collection"
- 1
require_relative "action_mailer/event_logging"
- 1
require_relative "action_mailer/error_handling"
- 1
require_relative "action_mailer/callbacks"
end
- 1
module LogStruct
- 1
module Integrations
# ActionMailer integration for structured logging
- 1
module ActionMailer
- 1
extend T::Sig
- 1
extend IntegrationInterface
# Set up ActionMailer structured logging
- 2
sig { override.params(config: LogStruct::Configuration).returns(T.nilable(T::Boolean)) }
- 1
def self.setup(config)
- 6
return nil unless defined?(::ActionMailer)
- 6
return nil unless config.enabled
- 6
return nil unless config.integrations.enable_actionmailer
# Silence default ActionMailer logs (we use our own structured logging)
# This is required because we replace the logging using our own callbacks
- 5
if defined?(::ActionMailer::Base)
- 5
::ActionMailer::Base.logger = ::Logger.new(File::NULL)
end
# Register our custom observers and handlers
# Registering these at the class level means all mailers will use them
- 10
ActiveSupport.on_load(:action_mailer) { prepend LogStruct::Integrations::ActionMailer::MetadataCollection }
- 10
ActiveSupport.on_load(:action_mailer) { prepend LogStruct::Integrations::ActionMailer::EventLogging }
- 10
ActiveSupport.on_load(:action_mailer) { prepend LogStruct::Integrations::ActionMailer::ErrorHandling }
- 10
ActiveSupport.on_load(:action_mailer) { prepend LogStruct::Integrations::ActionMailer::Callbacks }
- 10
ActiveSupport.on_load(:action_mailer) { LogStruct::Integrations::ActionMailer::Callbacks.patch_message_delivery }
- 5
true
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
module LogStruct
- 1
module Integrations
- 1
module ActionMailer
# Backport of the *_deliver callbacks from Rails 7.1
- 1
module Callbacks
- 1
extend T::Sig
- 1
extend ::ActiveSupport::Concern
# Track if we've already patched MessageDelivery
- 1
@patched_message_delivery = T.let(false, T::Boolean)
# We can't use included block with strict typing
# This will be handled by ActiveSupport::Concern at runtime
- 1
included do
- 2
include ::ActiveSupport::Callbacks
- 2
if defined?(::ActiveSupport) && ::ActiveSupport.gem_version >= Gem::Version.new("7.1.0")
- 2
define_callbacks :deliver, skip_after_callbacks_if_terminated: true
else
define_callbacks :deliver
end
end
# When this module is prepended (our integration uses prepend), ensure callbacks are defined
- 1
if respond_to?(:prepended)
- 1
prepended do
- 1
include ::ActiveSupport::Callbacks
- 1
if defined?(::ActiveSupport) && ::ActiveSupport.gem_version >= Gem::Version.new("7.1.0")
- 1
define_callbacks :deliver, skip_after_callbacks_if_terminated: true
else
define_callbacks :deliver
end
end
end
# Define class methods in a separate module
- 1
module ClassMethods
- 1
extend T::Sig
# Defines a callback that will get called right before the
# message is sent to the delivery method.
- 2
sig { params(filters: T.untyped, blk: T.nilable(T.proc.bind(T.untyped).void)).void }
- 1
def before_deliver(*filters, &blk)
# Use T.unsafe for splat arguments due to Sorbet limitation
- 1
T.unsafe(self).set_callback(:deliver, :before, *filters, &blk)
end
# Defines a callback that will get called right after the
# message's delivery method is finished.
- 2
sig { params(filters: T.untyped, blk: T.nilable(T.proc.bind(T.untyped).void)).void }
- 1
def after_deliver(*filters, &blk)
# Use T.unsafe for splat arguments due to Sorbet limitation
- 1
T.unsafe(self).set_callback(:deliver, :after, *filters, &blk)
end
# Defines a callback that will get called around the message's deliver method.
- 1
sig { params(filters: T.untyped, blk: T.nilable(T.proc.bind(T.untyped).params(arg0: T.untyped).void)).void }
- 1
def around_deliver(*filters, &blk)
# Use T.unsafe for splat arguments due to Sorbet limitation
T.unsafe(self).set_callback(:deliver, :around, *filters, &blk)
end
end
# Module to patch ActionMailer::MessageDelivery with callback support
- 1
module MessageDeliveryCallbacks
- 1
extend T::Sig
- 2
sig { returns(T.untyped) }
- 1
def deliver_now
- 7
processed_mailer.run_callbacks(:deliver) do
- 3
message.deliver
end
end
- 1
sig { returns(T.untyped) }
- 1
def deliver_now!
processed_mailer.run_callbacks(:deliver) do
message.deliver!
end
end
end
- 2
sig { returns(T::Boolean) }
- 1
def self.patch_message_delivery
# Return early if we've already patched
- 12
return true if @patched_message_delivery
# Prepend our module to add callback support to MessageDelivery
- 1
::ActionMailer::MessageDelivery.prepend(MessageDeliveryCallbacks)
# Mark as patched so we don't do it again
- 1
@patched_message_delivery = true
- 1
true
end
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
module LogStruct
- 1
module Integrations
- 1
module ActionMailer
# Handles error handling for ActionMailer
#
# IMPORTANT LIMITATIONS:
# 1. This module must be included BEFORE users define rescue_from handlers
# to ensure proper handler precedence (user handlers are checked first)
# 2. Rails rescue_from handlers don't bubble to parent class handlers after reraise
# 3. Handler order matters: Rails checks rescue_from handlers in reverse declaration order
- 1
module ErrorHandling
- 1
extend T::Sig
- 1
extend ActiveSupport::Concern
# NOTE: rescue_from handlers are checked in reverse order of declaration.
# We want LogStruct handlers to be checked AFTER user handlers (lower priority),
# so we need to add them BEFORE user handlers are declared.
# This will be called when the module is included/prepended
- 2
sig { params(base: T.untyped).void }
- 1
def self.install_handler(base)
# Only add the handler once per class
- 5
return if base.instance_variable_get(:@_logstruct_handler_installed)
# Add our handler FIRST so it has lower priority than user handlers
- 1
base.rescue_from StandardError, with: :log_and_reraise_error
# Mark as installed to prevent duplicates
- 1
base.instance_variable_set(:@_logstruct_handler_installed, true)
end
- 1
included do
LogStruct::Integrations::ActionMailer::ErrorHandling.install_handler(self)
end
# Also support prepended (used by tests and manual setup)
- 2
sig { params(base: T.untyped).void }
- 1
def self.prepended(base)
- 5
install_handler(base)
end
- 1
protected
# Just log the error without reporting or retrying
- 1
sig { params(ex: StandardError).void }
- 1
def log_and_ignore_error(ex)
log_email_delivery_error(ex, notify: false, report: false, reraise: false)
end
# Log and report to error service, but doesn't reraise.
- 1
sig { params(ex: StandardError).void }
- 1
def log_and_report_error(ex)
log_email_delivery_error(ex, notify: false, report: true, reraise: false)
end
# Log, report to error service, and reraise for retry
- 2
sig { params(ex: StandardError).void }
- 1
def log_and_reraise_error(ex)
- 1
log_email_delivery_error(ex, notify: false, report: true, reraise: true)
end
- 1
private
# Handle an error from a mailer
- 2
sig { params(mailer: T.untyped, error: StandardError, message: String).void }
- 1
def log_structured_error(mailer, error, message)
# Create a structured exception log with context
context = {
- 1
mailer_class: mailer.class.to_s,
- 1
mailer_action: mailer.respond_to?(:action_name) ? mailer.action_name : nil,
message: message
}
# Create the structured exception log
- 1
exception_data = Log::Error.from_exception(
Source::Mailer,
error,
context
)
# Log the structured error
- 1
LogStruct.error(exception_data)
end
# Log when email delivery fails
- 2
sig { params(error: StandardError, notify: T::Boolean, report: T::Boolean, reraise: T::Boolean).void }
- 1
def log_email_delivery_error(error, notify: false, report: true, reraise: true)
# Generate appropriate error message
- 1
message = error_message_for(error, reraise)
# Use structured error logging
- 1
log_structured_error(self, error, message)
# Handle notifications and reporting
- 1
handle_error_notifications(error, notify, report, reraise)
end
# Generate appropriate error message based on error handling strategy
- 2
sig { params(error: StandardError, reraise: T::Boolean).returns(String) }
- 1
def error_message_for(error, reraise)
- 1
if reraise
- 1
"#{error.class}: Email delivery error, will retry. Recipients: #{recipients(error)}"
else
"#{error.class}: Cannot send email to #{recipients(error)}"
end
end
# Handle error notifications, reporting, and reraising
- 2
sig { params(error: StandardError, notify: T::Boolean, report: T::Boolean, reraise: T::Boolean).void }
- 1
def handle_error_notifications(error, notify, report, reraise)
# Log a notification event if requested
- 1
log_notification_event(error) if notify
# Report to error reporting service if requested
- 1
if report
context = {
- 1
mailer_class: self.class.to_s,
- 1
mailer_action: respond_to?(:action_name) ? action_name : nil,
recipients: recipients(error)
}
# Create an exception log for structured logging
- 1
exception_data = Log::Error.from_exception(
Source::Mailer,
error,
context
)
# Log the exception with structured data
- 1
LogStruct.error(exception_data)
# Call the error handler
- 1
LogStruct.handle_exception(error, source: Source::Mailer, context: context)
end
# Re-raise the error if requested
Kernel.raise error if reraise
end
# Log a notification event that can be picked up by external systems
- 1
sig { params(error: StandardError).void }
- 1
def log_notification_event(error)
# Create an error log data object
exception_data = Log::Error.from_exception(
Source::Mailer,
error,
{
mailer: self.class,
action: action_name,
recipients: recipients(error)
}
)
# Log the error at info level since it's not a critical error
LogStruct.info(exception_data)
end
- 2
sig { params(error: StandardError).returns(String) }
- 1
def recipients(error)
# Extract recipient info if available
- 2
if error.respond_to?(:recipients) && T.unsafe(error).recipients.present?
T.unsafe(error).recipients.join(", ")
else
- 2
"unknown"
end
end
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
module LogStruct
- 1
module Integrations
- 1
module ActionMailer
# Handles logging of email delivery events
- 1
module EventLogging
- 1
extend ActiveSupport::Concern
- 1
extend T::Sig
- 1
extend T::Helpers
- 1
requires_ancestor { ::ActionMailer::Base }
- 1
included do
- 1
T.bind(self, ActionMailer::Callbacks::ClassMethods)
# Add callbacks for delivery events
- 1
before_deliver :log_email_delivery
- 1
after_deliver :log_email_delivered
end
- 1
protected
# Log when an email is about to be delivered
- 2
sig { void }
- 1
def log_email_delivery
- 1
log_mailer_event(Event::Delivery)
end
# Log when an email is delivered
- 2
sig { void }
- 1
def log_email_delivered
- 1
log_mailer_event(Event::Delivered)
end
- 1
private
# Log a mailer event with the given event type
- 1
sig do
- 1
params(event_type: Log::ActionMailer::ActionMailerEvent,
level: Symbol,
additional_data: T::Hash[Symbol, T.untyped]).returns(T.untyped)
end
- 1
def log_mailer_event(event_type, level = :info, additional_data = {})
# Get message (self refers to the mailer instance)
- 2
mailer_message = message if respond_to?(:message)
# Prepare data for the log entry
data = {
- 2
message_id: extract_message_id,
mailer_class: self.class.to_s,
mailer_action: action_name.to_s
}.compact
# Add any additional metadata
- 2
MetadataCollection.add_message_metadata(self, data)
- 2
MetadataCollection.add_context_metadata(self, data)
- 2
data.merge!(additional_data) if additional_data.present?
# Extract email fields (these will be filtered if email_addresses=true)
- 2
to = mailer_message&.to
- 2
from = mailer_message&.from&.first
- 2
subject = mailer_message&.subject
# Create a structured log entry
- 2
log_data = Log::ActionMailer.new(
event: event_type,
to: to,
from: from,
subject: subject,
additional_data: data
)
- 2
LogStruct.info(log_data)
- 2
log_data
end
# Extract message ID from the mailer
- 2
sig { returns(T.nilable(String)) }
- 1
def extract_message_id
- 2
return nil unless respond_to?(:message)
- 2
mail_message = message
- 2
return nil unless mail_message.respond_to?(:message_id)
- 2
mail_message.message_id
end
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
module LogStruct
- 1
module Integrations
- 1
module ActionMailer
# Handles collection of metadata for email logging
- 1
module MetadataCollection
- 1
extend T::Sig
# Add message-specific metadata to log data
- 2
sig { params(mailer: T.untyped, log_data: T::Hash[Symbol, T.untyped]).void }
- 1
def self.add_message_metadata(mailer, log_data)
- 4
message = mailer.respond_to?(:message) ? mailer.message : nil
# Add recipient count if message is available
- 4
if message
# Don't log actual email addresses
- 3
log_data[:recipient_count] = [message.to, message.cc, message.bcc].flatten.compact.count
# Handle case when attachments might be nil
- 3
log_data[:has_attachments] = message.attachments&.any? || false
- 3
log_data[:attachment_count] = message.attachments&.count || 0
else
- 1
log_data[:recipient_count] = 0
- 1
log_data[:has_attachments] = false
- 1
log_data[:attachment_count] = 0
end
end
# Add context metadata to log data
- 2
sig { params(mailer: T.untyped, log_data: T::Hash[Symbol, T.untyped]).void }
- 1
def self.add_context_metadata(mailer, log_data)
# Add account ID information if available (but not user email)
- 4
extract_ids_to_log_data(mailer, log_data)
# Add any current tags from ActiveJob or ActionMailer
- 4
add_current_tags_to_log_data(log_data)
end
- 2
sig { params(mailer: T.untyped, log_data: T::Hash[Symbol, T.untyped]).void }
- 1
def self.extract_ids_to_log_data(mailer, log_data)
# Extract account ID if available
- 4
if mailer.instance_variable_defined?(:@account)
account = mailer.instance_variable_get(:@account)
log_data[:account_id] = account.id if account.respond_to?(:id)
end
# Extract user ID if available
- 4
return unless mailer.instance_variable_defined?(:@user)
user = mailer.instance_variable_get(:@user)
log_data[:user_id] = user.id if user.respond_to?(:id)
end
- 2
sig { params(log_data: T::Hash[Symbol, T.untyped]).void }
- 1
def self.add_current_tags_to_log_data(log_data)
# Get current tags from ActiveSupport::TaggedLogging if available
- 4
if ::ActiveSupport::TaggedLogging.respond_to?(:current_tags)
- 4
tags = T.unsafe(::ActiveSupport::TaggedLogging).current_tags
- 4
log_data[:tags] = tags if tags.present?
end
# Get request_id from ActionDispatch if available
- 4
if ::ActionDispatch::Request.respond_to?(:current_request_id) &&
T.unsafe(::ActionDispatch::Request).current_request_id.present?
- 4
log_data[:request_id] = T.unsafe(::ActionDispatch::Request).current_request_id
end
# Get job_id from ActiveJob if available
- 4
if defined?(::ActiveJob::Logging) && ::ActiveJob::Logging.respond_to?(:job_id) &&
T.unsafe(::ActiveJob::Logging).job_id.present?
- 3
log_data[:job_id] = T.unsafe(::ActiveJob::Logging).job_id
end
end
end
end
end
end
# typed: strict
# frozen_string_literal: true
begin
- 1
require "active_job"
- 1
require "active_job/log_subscriber"
rescue LoadError
# ActiveJob gem is not available, integration will be skipped
end
- 1
require_relative "active_job/log_subscriber" if defined?(::ActiveJob::LogSubscriber)
- 1
module LogStruct
- 1
module Integrations
# ActiveJob integration for structured logging
- 1
module ActiveJob
- 1
extend T::Sig
- 1
extend IntegrationInterface
# Set up ActiveJob structured logging
- 1
sig { override.params(config: LogStruct::Configuration).returns(T.nilable(T::Boolean)) }
- 1
def self.setup(config)
return nil unless defined?(::ActiveJob::LogSubscriber)
return nil unless config.enabled
return nil unless config.integrations.enable_activejob
::ActiveSupport.on_load(:active_job) do
# Detach the default text formatter
::ActiveJob::LogSubscriber.detach_from :active_job
# Attach our structured formatter
Integrations::ActiveJob::LogSubscriber.attach_to :active_job
end
true
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require_relative "../../enums/source"
- 1
require_relative "../../enums/event"
- 1
require_relative "../../log/active_job"
- 1
require_relative "../../log/error"
- 1
module LogStruct
- 1
module Integrations
- 1
module ActiveJob
# Structured logging for ActiveJob
- 1
class LogSubscriber < ::ActiveJob::LogSubscriber
- 1
extend T::Sig
- 1
sig { params(event: T.untyped).void }
- 1
def enqueue(event)
job = event.payload[:job]
log_job_event(Event::Enqueue, job, event)
end
- 1
sig { params(event: T.untyped).void }
- 1
def enqueue_at(event)
job = event.payload[:job]
log_job_event(Event::Schedule, job, event, scheduled_at: job.scheduled_at)
end
- 1
sig { params(event: T.untyped).void }
- 1
def perform(event)
job = event.payload[:job]
exception = event.payload[:exception_object]
if exception
# Log the exception with the job context
log_exception(exception, job, event)
else
log_job_event(Event::Finish, job, event, duration: event.duration.round(2))
end
end
- 1
sig { params(event: T.untyped).void }
- 1
def perform_start(event)
job = event.payload[:job]
log_job_event(Event::Start, job, event)
end
- 1
private
- 1
sig { params(event_type: T.any(Event::Enqueue, Event::Schedule, Event::Start, Event::Finish), job: T.untyped, _event: T.untyped, additional_data: T::Hash[Symbol, T.untyped]).void }
- 1
def log_job_event(event_type, job, _event, additional_data = {})
# Create structured log data
log_data = Log::ActiveJob.new(
event: event_type,
job_id: job.job_id,
job_class: job.class.to_s,
queue_name: job.queue_name,
duration: additional_data[:duration],
# Add arguments if the job class allows it
arguments: job.class.log_arguments? ? job.arguments : nil,
# Store additional data in the data hash
additional_data: {
executions: job.executions,
scheduled_at: additional_data[:scheduled_at],
provider_job_id: job.provider_job_id
}.compact
)
# Use Rails logger with our structured formatter
logger.info(log_data)
end
- 1
sig { params(exception: StandardError, job: T.untyped, _event: T.untyped).void }
- 1
def log_exception(exception, job, _event)
# Create job context data for the exception
job_context = {
job_id: job.job_id,
job_class: job.class.to_s,
queue_name: job.queue_name,
executions: job.executions,
provider_job_id: job.provider_job_id
}
# Add arguments if the job class allows it
job_context[:arguments] = job.arguments if job.class.log_arguments?
# Create exception log with job source and context
log_data = Log::Error.from_exception(
Source::Job,
exception,
job_context
)
# Use Rails logger with our structured formatter
logger.error(log_data)
end
- 1
sig { returns(::ActiveSupport::Logger) }
- 1
def logger
::ActiveJob::Base.logger
end
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require "active_support/notifications"
- 1
module LogStruct
- 1
module Integrations
# ActiveRecord Integration for SQL Query Logging
#
# This integration captures and structures all SQL queries executed through ActiveRecord,
# providing detailed performance and debugging information in a structured format.
#
# ## Features:
# - Captures all SQL queries with execution time
# - Safely filters sensitive data from bind parameters
# - Extracts database operation metadata
# - Provides connection pool monitoring information
# - Identifies query types and table names
#
# ## Performance Considerations:
# - Minimal overhead on query execution
# - Async logging prevents I/O blocking
# - Configurable to disable in production if needed
# - Smart filtering reduces log volume for repetitive queries
#
# ## Security:
# - SQL queries are always parameterized (safe)
# - Bind parameters filtered through LogStruct's param filters
# - Sensitive patterns automatically scrubbed
#
# ## Configuration:
# ```ruby
# LogStruct.configure do |config|
# config.integrations.enable_sql_logging = true
# config.integrations.sql_slow_query_threshold = 100.0 # ms
# config.integrations.sql_log_bind_params = false # disable in production
# end
# ```
- 1
module ActiveRecord
- 1
extend T::Sig
- 1
extend IntegrationInterface
# Set up SQL query logging integration
- 2
sig { override.params(config: LogStruct::Configuration).returns(T.nilable(T::Boolean)) }
- 1
def self.setup(config)
- 16
return nil unless config.integrations.enable_sql_logging
- 15
return nil unless defined?(::ActiveRecord::Base)
- 14
subscribe_to_sql_notifications
- 14
true
end
- 1
private_class_method
# Subscribe to ActiveRecord's sql.active_record notifications
- 2
sig { void }
- 1
def self.subscribe_to_sql_notifications
- 14
::ActiveSupport::Notifications.subscribe("sql.active_record") do |name, start, finish, id, payload|
- 9
handle_sql_event(name, start, finish, id, payload)
rescue => error
- 1
LogStruct.handle_exception(error, source: LogStruct::Source::LogStruct)
end
end
# Process SQL notification event and create structured log
- 2
sig { params(name: String, start: T.untyped, finish: T.untyped, id: String, payload: T::Hash[Symbol, T.untyped]).void }
- 1
def self.handle_sql_event(name, start, finish, id, payload)
# Skip schema queries and Rails internal queries
- 31
return if skip_query?(payload)
- 24
duration = ((finish - start) * 1000.0).round(2)
# Skip fast queries if threshold is configured
- 24
config = LogStruct.config
- 24
if config.integrations.sql_slow_query_threshold&.positive?
- 2
return if duration < config.integrations.sql_slow_query_threshold
end
- 23
sql_log = Log::SQL.new(
message: format_sql_message(payload),
source: Source::App,
event: Event::Database,
sql: payload[:sql]&.strip || "",
name: payload[:name] || "SQL Query",
duration: duration,
row_count: extract_row_count(payload),
connection_adapter: extract_adapter_name(payload),
bind_params: extract_and_filter_binds(payload),
database_name: extract_database_name(payload),
connection_pool_size: extract_pool_size(payload),
active_connections: extract_active_connections(payload),
operation_type: extract_operation_type(payload),
table_names: extract_table_names(payload)
)
- 22
LogStruct.info(sql_log)
end
# Determine if query should be skipped from logging
- 2
sig { params(payload: T::Hash[Symbol, T.untyped]).returns(T::Boolean) }
- 1
def self.skip_query?(payload)
- 31
query_name = payload[:name]
- 31
sql = payload[:sql]
# Skip Rails schema queries
- 31
return true if query_name&.include?("SCHEMA")
- 30
return true if query_name&.include?("CACHE")
# Skip common Rails internal queries
- 29
return true if sql&.include?("schema_migrations")
- 28
return true if sql&.include?("ar_internal_metadata")
# Skip SHOW/DESCRIBE queries
- 27
return true if sql&.match?(/\A\s*(SHOW|DESCRIBE|EXPLAIN)\s/i)
- 24
false
end
# Format a readable message for the SQL log
- 2
sig { params(payload: T::Hash[Symbol, T.untyped]).returns(String) }
- 1
def self.format_sql_message(payload)
- 23
operation_name = payload[:name] || "SQL Query"
- 23
"#{operation_name} executed"
end
# Extract row count from payload
- 2
sig { params(payload: T::Hash[Symbol, T.untyped]).returns(T.nilable(Integer)) }
- 1
def self.extract_row_count(payload)
- 23
row_count = payload[:row_count]
- 23
row_count.is_a?(Integer) ? row_count : nil
end
# Extract database adapter name
- 2
sig { params(payload: T::Hash[Symbol, T.untyped]).returns(T.nilable(String)) }
- 1
def self.extract_adapter_name(payload)
- 23
connection = payload[:connection]
- 23
return nil unless connection
- 22
adapter_name = connection.class.name
- 22
adapter_name&.split("::")&.last
end
# Extract and filter bind parameters
- 2
sig { params(payload: T::Hash[Symbol, T.untyped]).returns(T.nilable(T::Array[T.untyped])) }
- 1
def self.extract_and_filter_binds(payload)
- 23
return nil unless LogStruct.config.integrations.sql_log_bind_params
# Prefer type_casted_binds as they're more readable
- 22
binds = payload[:type_casted_binds] || payload[:binds]
- 22
return nil unless binds
# Filter sensitive data from bind parameters
- 2
binds.map do |bind|
- 4
filter_bind_parameter(bind)
end
end
# Extract database name from connection
- 2
sig { params(payload: T::Hash[Symbol, T.untyped]).returns(T.nilable(String)) }
- 1
def self.extract_database_name(payload)
- 23
connection = payload[:connection]
- 23
return nil unless connection
- 22
if connection.respond_to?(:current_database)
- 22
connection.current_database
elsif connection.respond_to?(:database)
connection.database
end
rescue
nil
end
# Extract connection pool size
- 2
sig { params(payload: T::Hash[Symbol, T.untyped]).returns(T.nilable(Integer)) }
- 1
def self.extract_pool_size(payload)
- 23
connection = payload[:connection]
- 23
return nil unless connection
- 22
pool = connection.pool if connection.respond_to?(:pool)
- 22
pool&.size
rescue
nil
end
# Extract active connection count
- 2
sig { params(payload: T::Hash[Symbol, T.untyped]).returns(T.nilable(Integer)) }
- 1
def self.extract_active_connections(payload)
- 23
connection = payload[:connection]
- 23
return nil unless connection
- 22
pool = connection.pool if connection.respond_to?(:pool)
- 22
pool&.stat&.[](:busy)
rescue
nil
end
# Extract SQL operation type (SELECT, INSERT, etc.)
- 2
sig { params(payload: T::Hash[Symbol, T.untyped]).returns(T.nilable(String)) }
- 1
def self.extract_operation_type(payload)
- 23
sql = payload[:sql]
- 23
return nil unless sql
# Extract first word of SQL query
- 23
match = sql.strip.match(/\A\s*(\w+)/i)
- 23
match&.captures&.first&.upcase
end
# Extract table names from SQL query
- 2
sig { params(payload: T::Hash[Symbol, T.untyped]).returns(T.nilable(T::Array[String])) }
- 1
def self.extract_table_names(payload)
- 23
sql = payload[:sql]
- 23
return nil unless sql
# Simple regex to extract table names (basic implementation)
# This covers most common cases but could be enhanced
- 23
tables = []
# Match FROM, JOIN, UPDATE, INSERT INTO, DELETE FROM patterns
- 23
sql.scan(/(?:FROM|JOIN|UPDATE|INTO|DELETE\s+FROM)\s+["`]?(\w+)["`]?/i) do |match|
- 23
table_name = match[0]
- 23
tables << table_name unless tables.include?(table_name)
end
- 23
tables.empty? ? nil : tables
end
# Filter individual bind parameter values to remove sensitive data
- 2
sig { params(value: T.untyped).returns(T.untyped) }
- 1
def self.filter_bind_parameter(value)
- 4
case value
when String
# Filter strings that look like passwords, tokens, secrets, etc.
- 2
if looks_sensitive?(value)
- 1
"[FILTERED]"
else
- 1
value
end
else
- 2
value
end
end
# Check if a string value looks sensitive and should be filtered
- 2
sig { params(value: String).returns(T::Boolean) }
- 1
def self.looks_sensitive?(value)
# Filter very long strings that might be tokens
- 2
return true if value.length > 50
# Filter strings that look like hashed passwords, API keys, tokens
- 2
return true if value.match?(/\A[a-f0-9]{32,}\z/i) # MD5, SHA, etc.
- 2
return true if value.match?(/\A[A-Za-z0-9+\/]{20,}={0,2}\z/) # Base64
- 2
return true if value.match?(/(password|secret|token|key|auth)/i)
- 1
false
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require_relative "../enums/source"
- 1
require_relative "../enums/event"
- 1
require_relative "../log/active_storage"
- 1
module LogStruct
- 1
module Integrations
# Integration for ActiveStorage structured logging
- 1
module ActiveStorage
- 1
extend T::Sig
- 1
extend IntegrationInterface
# Set up ActiveStorage structured logging
- 1
sig { override.params(config: LogStruct::Configuration).returns(T.nilable(T::Boolean)) }
- 1
def self.setup(config)
return nil unless defined?(::ActiveStorage)
return nil unless config.enabled
return nil unless config.integrations.enable_activestorage
# Subscribe to all ActiveStorage service events
::ActiveSupport::Notifications.subscribe(/service_.*\.active_storage/) do |*args|
process_active_storage_event(::ActiveSupport::Notifications::Event.new(*args), config)
end
true
end
- 1
private_class_method
# Process ActiveStorage events and create structured logs
- 1
sig { params(event: ActiveSupport::Notifications::Event, config: LogStruct::Configuration).void }
- 1
def self.process_active_storage_event(event, config)
return unless config.enabled
return unless config.integrations.enable_activestorage
# Extract key information from the event
event_name = event.name.sub(/\.active_storage$/, "")
service_name = event.payload[:service]
duration = event.duration
# Map service events to log event types
event_type = case event_name
when "service_upload"
Event::Upload
when "service_download"
Event::Download
when "service_delete"
Event::Delete
when "service_delete_prefixed"
Event::Delete
when "service_exist"
Event::Exist
when "service_url"
Event::Url
when "service_download_chunk"
Event::Download
when "service_stream"
Event::Stream
when "service_update_metadata"
Event::Metadata
else
Event::Unknown
end
# Map the event name to an operation
operation = event_name.sub(/^service_/, "").to_sym
# Create structured log event specific to ActiveStorage
log_data = Log::ActiveStorage.new(
event: event_type,
operation: operation,
storage: service_name.to_s,
file_id: event.payload[:key].to_s,
checksum: event.payload[:checksum].to_s,
duration: duration,
# Add other fields where available
metadata: event.payload[:metadata],
exist: event.payload[:exist],
url: event.payload[:url],
filename: event.payload[:filename],
mime_type: event.payload[:content_type],
size: event.payload[:byte_size],
prefix: event.payload[:prefix],
range: event.payload[:range]
)
# Log structured data
LogStruct.info(log_data)
end
end
end
end
# typed: strict
# frozen_string_literal: true
begin
- 1
require "carrierwave"
rescue LoadError
# CarrierWave gem is not available, integration will be skipped
end
- 1
module LogStruct
- 1
module Integrations
# CarrierWave integration for structured logging
- 1
module CarrierWave
- 1
extend T::Sig
- 1
extend IntegrationInterface
# Set up CarrierWave structured logging
- 1
sig { override.params(config: LogStruct::Configuration).returns(T.nilable(T::Boolean)) }
- 1
def self.setup(config)
return nil unless defined?(::CarrierWave)
return nil unless config.enabled
return nil unless config.integrations.enable_carrierwave
# Patch CarrierWave to add logging
::CarrierWave::Uploader::Base.prepend(LoggingMethods)
true
end
# Methods to add logging to CarrierWave operations
- 1
module LoggingMethods
- 1
extend T::Sig
- 1
extend T::Helpers
- 1
requires_ancestor { ::CarrierWave::Uploader::Base }
# Log file storage operations
- 1
sig { params(args: T.untyped).returns(T.untyped) }
- 1
def store!(*args)
start_time = Process.clock_gettime(Process::CLOCK_MONOTONIC)
result = super
duration = Process.clock_gettime(Process::CLOCK_MONOTONIC) - start_time
# Extract file information
file_size = file.size if file.respond_to?(:size)
{
identifier: identifier,
filename: file.filename,
content_type: file.content_type,
size: file_size,
store_path: store_path,
extension: file.extension
}
# Log the store operation with structured data
log_data = Log::CarrierWave.new(
source: Source::CarrierWave,
event: Event::Upload,
duration: duration * 1000.0, # Convert to ms
model: model.class.name,
uploader: self.class.name,
storage: storage.class.name,
mount_point: mounted_as.to_s,
filename: file.filename,
mime_type: file.content_type,
size: file_size,
file_id: identifier,
additional_data: {
version: version_name.to_s,
store_path: store_path,
extension: file.extension
}
)
::Rails.logger.info(log_data)
result
end
# Log file retrieve operations
- 1
sig { params(identifier: T.untyped, args: T.untyped).returns(T.untyped) }
- 1
def retrieve_from_store!(identifier, *args)
start_time = Process.clock_gettime(Process::CLOCK_MONOTONIC)
result = super
duration = Process.clock_gettime(Process::CLOCK_MONOTONIC) - start_time
# Extract file information if available
file_size = file.size if file&.respond_to?(:size)
# Log the retrieve operation with structured data
log_data = Log::CarrierWave.new(
source: Source::CarrierWave,
event: Event::Download,
duration: duration * 1000.0, # Convert to ms
uploader: self.class.name,
storage: storage.class.name,
mount_point: mounted_as.to_s,
file_id: identifier,
filename: file&.filename,
mime_type: file&.content_type,
size: file_size,
additional_data: {
version: version_name.to_s
}
)
::Rails.logger.info(log_data)
result
end
end
end
end
end
# typed: strict
# frozen_string_literal: true
begin
- 1
require "good_job"
rescue LoadError
# GoodJob gem is not available, integration will be skipped
end
- 1
require_relative "good_job/logger" if defined?(::GoodJob)
- 1
require_relative "good_job/log_subscriber" if defined?(::GoodJob)
- 1
module LogStruct
- 1
module Integrations
# GoodJob integration for structured logging
#
# GoodJob is a PostgreSQL-based ActiveJob backend that provides reliable,
# scalable job processing for Rails applications. This integration provides
# structured logging for all GoodJob operations.
#
# ## Features:
# - Structured logging for job execution lifecycle
# - Error tracking and retry logging
# - Performance metrics and timing data
# - Database operation logging
# - Thread and process tracking
# - Custom GoodJob logger with LogStruct formatting
#
# ## Integration Points:
# - Replaces GoodJob.logger with LogStruct-compatible logger
# - Subscribes to GoodJob's ActiveSupport notifications
# - Captures job execution events, errors, and performance metrics
# - Logs database operations and connection information
#
# ## Configuration:
# The integration is automatically enabled when GoodJob is detected and
# LogStruct configuration allows it. It can be disabled by setting:
#
# ```ruby
# config.integrations.enable_goodjob = false
# ```
- 1
module GoodJob
- 1
extend T::Sig
- 1
extend IntegrationInterface
# Set up GoodJob structured logging
#
# This method configures GoodJob to use LogStruct's structured logging
# by replacing the default logger and subscribing to job events.
#
# @param config [LogStruct::Configuration] The LogStruct configuration
# @return [Boolean, nil] Returns true if setup was successful, nil if skipped
- 2
sig { override.params(config: LogStruct::Configuration).returns(T.nilable(T::Boolean)) }
- 1
def self.setup(config)
- 3
return nil unless defined?(::GoodJob)
return nil unless config.enabled
return nil unless config.integrations.enable_goodjob
# Replace GoodJob's logger with our structured logger
configure_logger
# Subscribe to GoodJob's ActiveSupport notifications
subscribe_to_notifications
true
end
# Configure GoodJob to use LogStruct's structured logger
- 1
sig { void }
- 1
def self.configure_logger
return unless defined?(::GoodJob)
# Use direct reference to avoid const_get - GoodJob is guaranteed to be defined here
goodjob_module = T.unsafe(GoodJob)
# Replace GoodJob.logger with our structured logger if GoodJob is available
if goodjob_module.respond_to?(:logger=)
goodjob_module.logger = LogStruct::Integrations::GoodJob::Logger.new("GoodJob")
end
# Configure error handling for thread errors if GoodJob supports it
if goodjob_module.respond_to?(:on_thread_error=)
goodjob_module.on_thread_error = ->(exception) do
# Log the error using our structured format
log_entry = LogStruct::Log::GoodJob.new(
event: Event::Error,
level: Level::Error,
error_class: exception.class.name,
error_message: exception.message,
error_backtrace: exception.backtrace
)
goodjob_module.logger.error(log_entry)
end
end
end
# Subscribe to GoodJob's ActiveSupport notifications
- 1
sig { void }
- 1
def self.subscribe_to_notifications
return unless defined?(::GoodJob)
# Subscribe to our custom log subscriber for GoodJob events
LogStruct::Integrations::GoodJob::LogSubscriber.attach_to :good_job
end
- 1
private_class_method :configure_logger
- 1
private_class_method :subscribe_to_notifications
end
end
end
# typed: strict
# frozen_string_literal: true
begin
- 1
require "active_support/log_subscriber"
rescue LoadError
# ActiveSupport is not available, log subscriber will be skipped
end
- 1
require_relative "../../log/good_job"
- 1
require_relative "../../enums/event"
- 1
require_relative "../../enums/level"
- 1
module LogStruct
- 1
module Integrations
- 1
module GoodJob
# LogSubscriber for GoodJob ActiveSupport notifications
#
# This subscriber captures GoodJob's ActiveSupport notifications and converts
# them into structured LogStruct::Log::GoodJob entries. It provides detailed
# logging for job lifecycle events, performance metrics, and error tracking.
#
# ## Supported Events:
# - job.enqueue - Job queued for execution
# - job.start - Job execution started
# - job.finish - Job completed successfully
# - job.error - Job failed with error
# - job.retry - Job retry initiated
# - job.schedule - Job scheduled for future execution
#
# ## Event Data Captured:
# - Job identification (ID, class, queue)
# - Execution context (arguments, priority, scheduled time)
# - Performance metrics (execution time, wait time)
# - Error information (class, message, backtrace)
# - Process and thread information
- 1
class LogSubscriber < ::ActiveSupport::LogSubscriber
- 1
extend T::Sig
# Job enqueued event
- 2
sig { params(event: T.untyped).void }
- 1
def enqueue(event)
- 2
job_data = extract_job_data(event)
- 2
log_entry = LogStruct::Log::GoodJob.new(
event: Event::Enqueue,
level: Level::Info,
job_id: job_data[:job_id],
job_class: job_data[:job_class],
queue_name: job_data[:queue_name],
arguments: job_data[:arguments],
scheduled_at: job_data[:scheduled_at],
priority: job_data[:priority],
execution_time: event.duration,
additional_data: {
enqueue_caller: job_data[:caller_location]
}
)
- 2
logger.info(log_entry)
end
# Job execution started event
- 2
sig { params(event: T.untyped).void }
- 1
def start(event)
- 1
job_data = extract_job_data(event)
- 1
log_entry = LogStruct::Log::GoodJob.new(
event: Event::Start,
level: Level::Info,
job_id: job_data[:job_id],
job_class: job_data[:job_class],
queue_name: job_data[:queue_name],
arguments: job_data[:arguments],
executions: job_data[:executions],
wait_time: job_data[:wait_time],
scheduled_at: job_data[:scheduled_at],
process_id: ::Process.pid,
thread_id: Thread.current.object_id.to_s(36)
)
- 1
logger.info(log_entry)
end
# Job completed successfully event
- 2
sig { params(event: T.untyped).void }
- 1
def finish(event)
- 1
job_data = extract_job_data(event)
- 1
log_entry = LogStruct::Log::GoodJob.new(
event: Event::Finish,
level: Level::Info,
job_id: job_data[:job_id],
job_class: job_data[:job_class],
queue_name: job_data[:queue_name],
executions: job_data[:executions],
run_time: event.duration,
finished_at: Time.now,
process_id: ::Process.pid,
thread_id: Thread.current.object_id.to_s(36),
additional_data: {
result: job_data[:result]
}
)
- 1
logger.info(log_entry)
end
# Job failed with error event
- 2
sig { params(event: T.untyped).void }
- 1
def error(event)
- 2
job_data = extract_job_data(event)
- 2
log_entry = LogStruct::Log::GoodJob.new(
event: Event::Error,
level: Level::Error,
job_id: job_data[:job_id],
job_class: job_data[:job_class],
queue_name: job_data[:queue_name],
executions: job_data[:executions],
exception_executions: job_data[:exception_executions],
error_class: job_data[:error_class],
error_message: job_data[:error_message],
error_backtrace: job_data[:error_backtrace],
run_time: event.duration,
process_id: ::Process.pid,
thread_id: Thread.current.object_id.to_s(36)
)
- 2
logger.error(log_entry)
end
# Job scheduled for future execution event
- 2
sig { params(event: T.untyped).void }
- 1
def schedule(event)
- 1
job_data = extract_job_data(event)
- 1
log_entry = LogStruct::Log::GoodJob.new(
event: Event::Schedule,
level: Level::Info,
job_id: job_data[:job_id],
job_class: job_data[:job_class],
queue_name: job_data[:queue_name],
arguments: job_data[:arguments],
scheduled_at: job_data[:scheduled_at],
priority: job_data[:priority],
cron_key: job_data[:cron_key],
execution_time: event.duration
)
- 1
logger.info(log_entry)
end
- 1
private
# Extract job data from ActiveSupport event payload
- 2
sig { params(event: T.untyped).returns(T::Hash[Symbol, T.untyped]) }
- 1
def extract_job_data(event)
- 7
payload = event.payload || {}
- 7
job = payload[:job]
- 7
execution = payload[:execution] || payload[:good_job_execution]
- 7
exception = payload[:exception] || payload[:error]
- 7
data = {}
# Basic job information
- 7
if job
- 6
data[:job_id] = job.job_id if job.respond_to?(:job_id)
- 6
data[:job_class] = job.job_class if job.respond_to?(:job_class)
- 6
data[:queue_name] = job.queue_name if job.respond_to?(:queue_name)
- 6
data[:arguments] = job.arguments if job.respond_to?(:arguments)
- 6
data[:priority] = job.priority if job.respond_to?(:priority)
- 6
data[:scheduled_at] = job.scheduled_at if job.respond_to?(:scheduled_at)
- 6
data[:cron_key] = job.cron_key if job.respond_to?(:cron_key)
- 6
data[:caller_location] = job.enqueue_caller_location if job.respond_to?(:enqueue_caller_location)
end
# Execution-specific information
- 7
if execution
- 3
data[:executions] = execution.executions if execution.respond_to?(:executions)
- 3
data[:exception_executions] = execution.exception_executions if execution.respond_to?(:exception_executions)
# Use existing wait_time if available, otherwise calculate it
- 3
if execution.respond_to?(:wait_time) && execution.wait_time
- 1
data[:wait_time] = execution.wait_time
- 2
elsif execution.respond_to?(:created_at)
- 2
data[:wait_time] = calculate_wait_time(execution)
end
- 3
data[:batch_id] = execution.batch_id if execution.respond_to?(:batch_id)
- 3
data[:cron_key] ||= execution.cron_key if execution.respond_to?(:cron_key)
end
# Error information
- 7
if exception
- 2
data[:error_class] = exception.class.name
- 2
data[:error_message] = exception.message
- 2
data[:error_backtrace] = exception.backtrace&.first(20) # Limit backtrace size
end
# Result information
- 7
data[:result] = payload[:result] if payload.key?(:result)
- 7
data
end
# Calculate wait time from job creation to execution start
- 2
sig { params(execution: T.untyped).returns(T.nilable(Float)) }
- 1
def calculate_wait_time(execution)
- 4
return nil unless execution.respond_to?(:created_at)
- 4
return nil unless execution.respond_to?(:performed_at)
- 4
return nil unless execution.created_at && execution.performed_at
- 3
(execution.performed_at - execution.created_at).to_f
rescue
# Return nil if calculation fails
nil
end
# Get the appropriate logger for GoodJob events
- 2
sig { returns(T.untyped) }
- 1
def logger
# Always use Rails.logger - in production it will be configured by the integration setup,
# in tests it will be set up by the test harness
- 7
Rails.logger
end
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require_relative "../../semantic_logger/logger"
- 1
require_relative "../../log/good_job"
- 1
require_relative "../../enums/source"
- 1
module LogStruct
- 1
module Integrations
- 1
module GoodJob
# Custom Logger for GoodJob that creates LogStruct::Log::GoodJob entries
#
# This logger extends LogStruct's SemanticLogger to provide optimal logging
# performance while creating structured log entries specifically for GoodJob
# operations and events.
#
# ## Benefits:
# - High-performance logging with SemanticLogger backend
# - Structured GoodJob-specific log entries
# - Automatic job context capture
# - Thread and process information
# - Performance metrics and timing data
#
# ## Usage:
# This logger is automatically configured when the GoodJob integration
# is enabled. It replaces GoodJob.logger to provide structured logging
# for all GoodJob operations.
- 1
class Logger < LogStruct::SemanticLogger::Logger
- 1
extend T::Sig
# Override log methods to create GoodJob-specific log structs
- 1
%i[debug info warn error fatal].each do |level|
- 5
define_method(level) do |message = nil, payload = nil, &block|
# Extract basic job context from thread-local variables
- 12
job_context = {}
- 12
if Thread.current[:good_job_execution]
- 2
execution = Thread.current[:good_job_execution]
- 2
if execution.respond_to?(:job_id)
- 2
job_context[:job_id] = execution.job_id
- 2
job_context[:job_class] = execution.job_class if execution.respond_to?(:job_class)
- 2
job_context[:queue_name] = execution.queue_name if execution.respond_to?(:queue_name)
- 2
job_context[:executions] = execution.executions if execution.respond_to?(:executions)
- 2
job_context[:scheduled_at] = execution.scheduled_at if execution.respond_to?(:scheduled_at)
- 2
job_context[:priority] = execution.priority if execution.respond_to?(:priority)
end
end
# Create a GoodJob log struct with the context
- 12
log_struct = Log::GoodJob.new(
event: Event::Log,
level: LogStruct::Level.from_severity(level.to_s.upcase),
process_id: ::Process.pid,
thread_id: Thread.current.object_id.to_s(36),
job_id: job_context[:job_id],
job_class: job_context[:job_class],
queue_name: job_context[:queue_name],
executions: job_context[:executions],
scheduled_at: job_context[:scheduled_at],
priority: job_context[:priority],
additional_data: {
- 1
message: message || (block ? block.call : "")
}
)
# Pass the struct to SemanticLogger
- 12
super(log_struct, payload, &nil)
end
end
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require "action_dispatch/middleware/host_authorization"
- 1
require_relative "../enums/event"
- 1
module LogStruct
- 1
module Integrations
# Host Authorization integration for structured logging of blocked hosts
- 1
module HostAuthorization
- 1
extend T::Sig
- 1
extend IntegrationInterface
- 1
RESPONSE_HTML = T.let(
"<html><head><title>Blocked Host</title></head><body>" \
"<h1>Blocked Host</h1>" \
"<p>This host is not permitted to access this application.</p>" \
"<p>If you are the administrator, check your configuration.</p>" \
"</body></html>",
String
)
- 1
RESPONSE_HEADERS = T.let(
{
"Content-Type" => "text/html",
"Content-Length" => RESPONSE_HTML.bytesize.to_s
}.freeze,
T::Hash[String, String]
)
- 1
FORBIDDEN_STATUS = T.let(403, Integer)
# Set up host authorization logging
- 1
sig { override.params(config: LogStruct::Configuration).returns(T.nilable(T::Boolean)) }
- 1
def self.setup(config)
return nil unless config.enabled
return nil unless config.integrations.enable_host_authorization
# Define the response app as a separate variable to fix block alignment
response_app = lambda do |env|
request = ::ActionDispatch::Request.new(env)
# Include the blocked hosts app configuration in the log entry
# This can be helpful later when reviewing logs.
blocked_hosts = env["action_dispatch.blocked_hosts"]
# Create a security error to be handled
blocked_host_error = ::ActionController::BadRequest.new(
"Blocked host detected: #{request.host}"
)
# Create request context hash
context = {
blocked_host: request.host,
client_ip: request.ip,
x_forwarded_for: request.x_forwarded_for,
http_method: request.method,
path: request.path,
user_agent: request.user_agent,
allowed_hosts: blocked_hosts.allowed_hosts,
allow_ip_hosts: blocked_hosts.allow_ip_hosts
}
# Handle error according to configured mode (log, report, raise)
LogStruct.handle_exception(
blocked_host_error,
source: Source::Security,
context: context
)
# Use pre-defined headers and response if we are only logging or reporting
[FORBIDDEN_STATUS, RESPONSE_HEADERS, [RESPONSE_HTML]]
end
# Replace the default HostAuthorization app with our custom app for logging
Rails.application.config.host_authorization = {
response_app: response_app
}
true
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
module LogStruct
- 1
module Integrations
# Interface that all integrations must implement
# This ensures consistent behavior across all integration modules
- 1
module IntegrationInterface
- 1
extend T::Sig
- 1
extend T::Helpers
# This is an interface that should be implemented by all integration modules
- 1
interface!
# All integrations must implement this method to set up their functionality
# @return [Boolean, nil] Returns true if setup was successful, nil if skipped
- 2
sig { abstract.params(config: LogStruct::Configuration).returns(T.nilable(T::Boolean)) }
- 1
def setup(config); end
end
end
end
# typed: strict
# frozen_string_literal: true
begin
- 1
require "lograge"
rescue LoadError
# Lograge gem is not available, integration will be skipped
end
- 1
module LogStruct
- 1
module Integrations
# Lograge integration for structured request logging
- 1
module Lograge
- 1
extend IntegrationInterface
- 1
class << self
- 1
extend T::Sig
# Set up lograge for structured request logging
- 1
sig { override.params(logstruct_config: LogStruct::Configuration).returns(T.nilable(T::Boolean)) }
- 1
def setup(logstruct_config)
return nil unless defined?(::Lograge)
return nil unless logstruct_config.enabled
return nil unless logstruct_config.integrations.enable_lograge
configure_lograge(logstruct_config)
true
end
- 1
private_class_method
- 1
sig { params(logstruct_config: LogStruct::Configuration).void }
- 1
def configure_lograge(logstruct_config)
::Rails.application.configure do
config.lograge.enabled = true
# Use a raw formatter that just returns the log struct.
# The struct is converted to JSON by our Formatter (after filtering, etc.)
config.lograge.formatter = T.let(
lambda do |data|
# Convert the data hash to a Log::Request struct
Log::Request.new(
source: Source::Rails,
event: Event::Request,
timestamp: Time.now,
http_method: data[:method],
path: data[:path],
format: data[:format],
controller: data[:controller],
action: data[:action],
status: data[:status],
duration: data[:duration],
view: data[:view],
db: data[:db],
params: data[:params]
)
end,
T.proc.params(hash: T::Hash[Symbol, T.untyped]).returns(Log::Request)
)
# Add custom options to lograge
config.lograge.custom_options = lambda do |event|
Integrations::Lograge.lograge_default_options(event)
end
end
end
- 1
sig { params(event: ActiveSupport::Notifications::Event).returns(T::Hash[Symbol, T.untyped]) }
- 1
def lograge_default_options(event)
# Extract essential fields from the payload
options = event.payload.slice(
:request_id,
:host,
:source_ip
).compact
if event.payload[:params].present?
options[:params] = event.payload[:params].except("controller", "action")
end
# Process headers if available
process_headers(event, options)
# Apply custom options from application if provided
apply_custom_options(event, options)
options
end
# Process headers from the event payload
- 1
sig { params(event: ActiveSupport::Notifications::Event, options: T::Hash[Symbol, T.untyped]).void }
- 1
def process_headers(event, options)
headers = event.payload[:headers]
return if headers.blank?
options[:user_agent] = headers["HTTP_USER_AGENT"]
options[:content_type] = headers["CONTENT_TYPE"]
options[:accept] = headers["HTTP_ACCEPT"]
end
# Apply custom options from the application's configuration
- 1
sig { params(event: ActiveSupport::Notifications::Event, options: T::Hash[Symbol, T.untyped]).void }
- 1
def apply_custom_options(event, options)
custom_options_proc = LogStruct.config.integrations.lograge_custom_options
return unless custom_options_proc&.respond_to?(:call)
# Call the proc with the event and options
# The proc can modify the options hash directly
custom_options_proc.call(event, options)
end
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require "rack"
- 1
require "action_dispatch/middleware/show_exceptions"
- 1
require_relative "rack_error_handler/middleware"
- 1
module LogStruct
- 1
module Integrations
# Rack middleware integration for structured logging
- 1
module RackErrorHandler
- 1
extend T::Sig
- 1
extend IntegrationInterface
# Set up Rack middleware for structured error logging
- 1
sig { override.params(config: LogStruct::Configuration).returns(T.nilable(T::Boolean)) }
- 1
def self.setup(config)
return nil unless config.enabled
return nil unless config.integrations.enable_rack_error_handler
# Add structured logging middleware for security violations and errors
# Need to insert after ShowExceptions to catch IP spoofing errors
::Rails.application.middleware.insert_after(
::ActionDispatch::ShowExceptions,
Integrations::RackErrorHandler::Middleware
)
true
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
module LogStruct
- 1
module Integrations
- 1
module RackErrorHandler
# Custom middleware to enhance Rails error logging with JSON format and request details
- 1
class Middleware
- 1
extend T::Sig
# IP Spoofing error response
- 1
IP_SPOOF_HTML = T.let(
"<html><head><title>IP Spoofing Detected</title></head><body>" \
"<h1>Forbidden</h1>" \
"<p>IP spoofing detected. This request has been blocked for security reasons.</p>" \
"</body></html>",
String
)
# CSRF error response
- 1
CSRF_HTML = T.let(
"<html><head><title>CSRF Error</title></head><body>" \
"<h1>Forbidden</h1>" \
"<p>Invalid authenticity token. This request has been blocked to prevent cross-site request forgery.</p>" \
"</body></html>",
String
)
# Response headers calculated at load time
- 1
IP_SPOOF_HEADERS = T.let(
{
"Content-Type" => "text/html",
"Content-Length" => IP_SPOOF_HTML.bytesize.to_s
}.freeze,
T::Hash[String, String]
)
- 1
CSRF_HEADERS = T.let(
{
"Content-Type" => "text/html",
"Content-Length" => CSRF_HTML.bytesize.to_s
}.freeze,
T::Hash[String, String]
)
# HTTP status code for forbidden responses
- 1
FORBIDDEN_STATUS = T.let(403, Integer)
- 1
sig { params(app: T.untyped).void }
- 1
def initialize(app)
@app = app
end
- 1
sig { params(env: T.untyped).returns(T.untyped) }
- 1
def call(env)
return @app.call(env) unless LogStruct.enabled?
# Try to process the request
begin
@app.call(env)
rescue ::ActionDispatch::RemoteIp::IpSpoofAttackError => ip_spoof_error
# Create a security log for IP spoofing
security_log = Log::Security.new(
event: Event::IPSpoof,
message: ip_spoof_error.message,
# Can't call .remote_ip on the request because that's what raises the error.
# Have to pass the client_ip and x_forwarded_for headers.
client_ip: env["HTTP_CLIENT_IP"],
x_forwarded_for: env["HTTP_X_FORWARDED_FOR"],
path: env["PATH_INFO"],
http_method: env["REQUEST_METHOD"],
user_agent: env["HTTP_USER_AGENT"],
referer: env["HTTP_REFERER"],
request_id: env["action_dispatch.request_id"]
)
# Log the structured data
::Rails.logger.warn(security_log)
# Report the error
context = extract_request_context(env)
LogStruct.handle_exception(ip_spoof_error, source: Source::Security, context: context)
# If handle_exception raised an exception then Rails will deal with it (e.g. config.exceptions_app)
# If we are only logging or reporting these security errors, then return a default response
[FORBIDDEN_STATUS, IP_SPOOF_HEADERS, [IP_SPOOF_HTML]]
rescue ::ActionController::InvalidAuthenticityToken => invalid_auth_token_error
# Create a security log for CSRF error
request = ::ActionDispatch::Request.new(env)
security_log = Log::Security.new(
event: Event::CSRFViolation,
message: invalid_auth_token_error.message,
path: request.path,
http_method: request.method,
source_ip: request.remote_ip,
user_agent: request.user_agent,
referer: request.referer,
request_id: request.request_id
)
LogStruct.error(security_log)
# Report to error reporting service and/or re-raise
context = extract_request_context(env)
LogStruct.handle_exception(invalid_auth_token_error, source: Source::Security, context: context)
# If handle_exception raised an exception then Rails will deal with it (e.g. config.exceptions_app)
# If we are only logging or reporting these security errors, then return a default response
[FORBIDDEN_STATUS, CSRF_HEADERS, [CSRF_HTML]]
rescue => error
# Extract request context for error reporting
context = extract_request_context(env)
# Create and log a structured exception with request context
exception_log = Log::Error.from_exception(
Source::Rails,
error,
context
)
LogStruct.error(exception_log)
# Re-raise any standard errors to let Rails or error reporter handle it.
# Rails will also log the request details separately
raise error
end
end
- 1
private
- 1
sig { params(env: T::Hash[String, T.untyped]).returns(T::Hash[Symbol, T.untyped]) }
- 1
def extract_request_context(env)
request = ::ActionDispatch::Request.new(env)
{
request_id: request.request_id,
path: request.path,
method: request.method,
user_agent: request.user_agent,
referer: request.referer
}
rescue => error
# If we can't extract request context, return minimal info
{error_extracting_context: error.message}
end
end
end
end
end
# typed: strict
# frozen_string_literal: true
begin
- 1
require "shrine"
rescue LoadError
# Shrine gem is not available, integration will be skipped
end
- 1
module LogStruct
- 1
module Integrations
# Shrine integration for structured logging
- 1
module Shrine
- 1
extend T::Sig
- 1
extend IntegrationInterface
# Set up Shrine structured logging
- 1
sig { override.params(config: LogStruct::Configuration).returns(T.nilable(T::Boolean)) }
- 1
def self.setup(config)
return nil unless defined?(::Shrine)
return nil unless config.enabled
return nil unless config.integrations.enable_shrine
# Create a structured log subscriber for Shrine
# ActiveSupport::Notifications::Event has name, time, end, transaction_id, payload, and duration
shrine_log_subscriber = T.unsafe(lambda do |event|
payload = event.payload.except(:io, :metadata, :name).dup
# Map event name to Event type
event_type = case event.name
when :upload then Event::Upload
when :download then Event::Download
when :delete then Event::Delete
when :metadata then Event::Metadata
when :exists then Event::Exist # ActiveStorage uses 'exist', may as well use that
else Event::Unknown
end
# Create structured log data
log_data = Log::Shrine.new(
source: Source::Shrine,
event: event_type,
duration: event.duration,
storage: payload[:storage],
location: payload[:location],
uploader: payload[:uploader],
upload_options: payload[:upload_options],
download_options: payload[:download_options],
options: payload[:options],
# Data is flattened by the JSON formatter
additional_data: payload.except(
:storage,
:location,
:uploader,
:upload_options,
:download_options,
:options
)
)
# Pass the structured hash to the logger
# If Rails.logger has our Formatter, it will handle JSON conversion
::Shrine.logger.info log_data
end)
# Configure Shrine to use our structured log subscriber
::Shrine.plugin :instrumentation,
events: %i[upload exists download delete],
log_subscriber: shrine_log_subscriber
true
end
end
end
end
# typed: strict
# frozen_string_literal: true
begin
- 1
require "sidekiq"
rescue LoadError
# Sidekiq gem is not available, integration will be skipped
end
- 1
require_relative "sidekiq/logger" if defined?(::Sidekiq)
- 1
module LogStruct
- 1
module Integrations
# Sidekiq integration for structured logging
- 1
module Sidekiq
- 1
extend T::Sig
- 1
extend IntegrationInterface
# Set up Sidekiq structured logging
- 1
sig { override.params(config: LogStruct::Configuration).returns(T.nilable(T::Boolean)) }
- 1
def self.setup(config)
return nil unless defined?(::Sidekiq)
return nil unless config.enabled
return nil unless config.integrations.enable_sidekiq
# Configure Sidekiq server (worker) to use our logger
::Sidekiq.configure_server do |sidekiq_config|
sidekiq_config.logger = LogStruct::Integrations::Sidekiq::Logger.new("Sidekiq-Server")
end
# Configure Sidekiq client (Rails app) to use our logger
::Sidekiq.configure_client do |sidekiq_config|
sidekiq_config.logger = LogStruct::Integrations::Sidekiq::Logger.new("Sidekiq-Client")
end
true
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require "sorbet-runtime"
- 1
module LogStruct
- 1
module Integrations
# Integration for Sorbet runtime type checking error handlers
# This module installs error handlers that report type errors through LogStruct
# These handlers can be enabled/disabled using configuration
- 1
module Sorbet
- 1
extend T::Sig
- 1
extend IntegrationInterface
# Set up Sorbet error handlers to report errors through LogStruct
- 1
sig { override.params(config: LogStruct::Configuration).returns(T.nilable(T::Boolean)) }
- 1
def self.setup(config)
return nil unless config.integrations.enable_sorbet_error_handlers
# Install inline type error handler
# Called when T.let, T.cast, T.must, etc. fail
T::Configuration.inline_type_error_handler = lambda do |error, _opts|
LogStruct.handle_exception(error, source: LogStruct::Source::TypeChecking)
end
# Install call validation error handler
# Called when method signature validation fails
T::Configuration.call_validation_error_handler = lambda do |_signature, opts|
error = TypeError.new(opts[:pretty_message])
LogStruct.handle_exception(error, source: LogStruct::Source::TypeChecking)
end
# Install sig builder error handler
# Called when there's a problem with a signature definition
T::Configuration.sig_builder_error_handler = lambda do |error, _location|
LogStruct.handle_exception(error, source: LogStruct::Source::TypeChecking)
end
# Install sig validation error handler
# Called when there's a problem with a signature validation
T::Configuration.sig_validation_error_handler = lambda do |error, _opts|
LogStruct.handle_exception(error, source: LogStruct::Source::TypeChecking)
end
true
end
end
end
end
# typed: strict
# frozen_string_literal: true
# Common Enums
- 1
require_relative "enums/source"
- 1
require_relative "enums/event"
- 1
require_relative "enums/level"
# Log Structs
- 1
require_relative "log/carrierwave"
- 1
require_relative "log/action_mailer"
- 1
require_relative "log/active_storage"
- 1
require_relative "log/active_job"
- 1
require_relative "log/error"
- 1
require_relative "log/good_job"
- 1
require_relative "log/plain"
- 1
require_relative "log/request"
- 1
require_relative "log/security"
- 1
require_relative "log/shrine"
- 1
require_relative "log/sidekiq"
- 1
require_relative "log/sql"
- 1
module LogStruct
# Type aliases for all possible log types
# This should be updated whenever a new log type is added
# (Can't use sealed! unless we want to put everything in one giant file.)
- 1
LogClassType = T.type_alias do
T.any(
T.class_of(LogStruct::Log::CarrierWave),
T.class_of(LogStruct::Log::ActionMailer),
T.class_of(LogStruct::Log::ActiveStorage),
T.class_of(LogStruct::Log::ActiveJob),
T.class_of(LogStruct::Log::Error),
T.class_of(LogStruct::Log::GoodJob),
T.class_of(LogStruct::Log::Plain),
T.class_of(LogStruct::Log::Request),
T.class_of(LogStruct::Log::Security),
T.class_of(LogStruct::Log::Shrine),
T.class_of(LogStruct::Log::Sidekiq),
T.class_of(LogStruct::Log::SQL)
)
end
end
# typed: strict
# frozen_string_literal: true
- 1
require_relative "interfaces/common_fields"
- 1
require_relative "interfaces/additional_data_field"
- 1
require_relative "shared/serialize_common"
- 1
require_relative "shared/merge_additional_data_fields"
- 1
require_relative "../enums/source"
- 1
require_relative "../enums/event"
- 1
require_relative "../enums/level"
- 1
require_relative "../log_keys"
- 1
module LogStruct
- 1
module Log
# Email log entry for structured logging
- 1
class ActionMailer < T::Struct
- 1
extend T::Sig
- 1
include Interfaces::CommonFields
- 1
include Interfaces::AdditionalDataField
- 1
include SerializeCommon
- 1
include MergeAdditionalDataFields
- 1
ActionMailerEvent = T.type_alias {
- 1
T.any(Event::Delivery, Event::Delivered)
}
# Common fields
- 1
const :source, Source::Mailer, default: T.let(Source::Mailer, Source::Mailer)
- 1
const :event, ActionMailerEvent
- 3
const :timestamp, Time, factory: -> { Time.now }
- 1
const :level, Level, default: T.let(Level::Info, Level)
# Email-specific fields
- 1
const :to, T.nilable(T.any(String, T::Array[String])), default: nil
- 1
const :from, T.nilable(String), default: nil
- 1
const :subject, T.nilable(String), default: nil
- 1
const :additional_data, T::Hash[Symbol, T.untyped], default: {}
# Convert the log entry to a hash for serialization
- 2
sig { override.params(strict: T::Boolean).returns(T::Hash[Symbol, T.untyped]) }
- 1
def serialize(strict = true)
- 2
hash = serialize_common(strict)
- 2
merge_additional_data_fields(hash)
# Add email-specific fields if they're present
- 2
hash[LOG_KEYS.fetch(:to)] = to if to
- 2
hash[LOG_KEYS.fetch(:from)] = from if from
- 2
hash[LOG_KEYS.fetch(:subject)] = subject if subject
- 2
hash
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require_relative "interfaces/common_fields"
- 1
require_relative "interfaces/additional_data_field"
- 1
require_relative "shared/serialize_common"
- 1
require_relative "shared/merge_additional_data_fields"
- 1
require_relative "../enums/source"
- 1
require_relative "../enums/event"
- 1
require_relative "../enums/level"
- 1
require_relative "../log_keys"
- 1
module LogStruct
- 1
module Log
# ActiveJob log entry for structured logging
- 1
class ActiveJob < T::Struct
- 1
extend T::Sig
- 1
include Interfaces::CommonFields
- 1
include Interfaces::AdditionalDataField
- 1
include SerializeCommon
- 1
include MergeAdditionalDataFields
- 1
ActiveJobEvent = T.type_alias {
- 1
T.any(
Event::Enqueue,
Event::Schedule,
Event::Start,
Event::Finish
)
}
# Common fields
- 1
const :source, Source::Job, default: T.let(Source::Job, Source::Job)
- 1
const :event, ActiveJobEvent
- 1
const :timestamp, Time, factory: -> { Time.now }
- 1
const :level, Level, default: T.let(Level::Info, Level)
# Job-specific fields
- 1
const :job_id, T.nilable(String), default: nil
- 1
const :job_class, T.nilable(String), default: nil
- 1
const :queue_name, T.nilable(String), default: nil
- 1
const :arguments, T.nilable(T::Array[T.untyped]), default: nil
- 1
const :duration, T.nilable(Float), default: nil
- 1
const :additional_data, T::Hash[Symbol, T.untyped], default: {}
# Convert the log entry to a hash for serialization
- 1
sig { override.params(strict: T::Boolean).returns(T::Hash[Symbol, T.untyped]) }
- 1
def serialize(strict = true)
hash = serialize_common(strict)
merge_additional_data_fields(hash)
# Add job-specific fields if they're present
hash[LOG_KEYS.fetch(:job_id)] = job_id if job_id
hash[LOG_KEYS.fetch(:job_class)] = job_class if job_class
hash[LOG_KEYS.fetch(:queue_name)] = queue_name if queue_name
hash[LOG_KEYS.fetch(:arguments)] = arguments if arguments
hash[LOG_KEYS.fetch(:duration)] = duration if duration
hash
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require_relative "interfaces/common_fields"
- 1
require_relative "shared/serialize_common"
- 1
require_relative "../enums/source"
- 1
require_relative "../enums/event"
- 1
require_relative "../enums/level"
- 1
module LogStruct
- 1
module Log
# ActiveStorage log entry for structured logging
- 1
class ActiveStorage < T::Struct
- 1
extend T::Sig
- 1
include Interfaces::CommonFields
- 1
include SerializeCommon
# Define valid event types for ActiveStorage
- 1
ActiveStorageEvent = T.type_alias {
- 1
T.any(
Event::Upload,
Event::Download,
Event::Delete,
Event::Metadata,
Event::Exist,
Event::Stream,
Event::Url,
Event::Unknown
)
}
# Common fields
- 1
const :source, Source::Storage, default: T.let(Source::Storage, Source::Storage)
- 1
const :event, ActiveStorageEvent
- 1
const :timestamp, Time, factory: -> { Time.now }
- 1
const :level, Level, default: T.let(Level::Info, Level)
# ActiveStorage-specific fields
- 1
const :operation, T.nilable(Symbol), default: nil
- 1
const :storage, T.nilable(String), default: nil
- 1
const :file_id, T.nilable(String), default: nil
- 1
const :filename, T.nilable(String), default: nil
- 1
const :mime_type, T.nilable(String), default: nil
- 1
const :size, T.nilable(Integer), default: nil
- 1
const :metadata, T.nilable(T::Hash[String, T.untyped]), default: nil
- 1
const :duration, T.nilable(Float), default: nil
- 1
const :checksum, T.nilable(String), default: nil
- 1
const :exist, T.nilable(T::Boolean), default: nil
- 1
const :url, T.nilable(String), default: nil
- 1
const :prefix, T.nilable(String), default: nil
- 1
const :range, T.nilable(String), default: nil
# Convert the log entry to a hash for serialization
- 1
sig { override.params(strict: T::Boolean).returns(T::Hash[Symbol, T.untyped]) }
- 1
def serialize(strict = true)
hash = serialize_common(strict)
# Add ActiveStorage-specific fields - only include non-nil values
hash[LOG_KEYS.fetch(:operation)] = operation if operation
hash[LOG_KEYS.fetch(:storage)] = storage if storage
hash[LOG_KEYS.fetch(:file_id)] = file_id if file_id
hash[LOG_KEYS.fetch(:filename)] = filename if filename
hash[LOG_KEYS.fetch(:mime_type)] = mime_type if mime_type
hash[LOG_KEYS.fetch(:size)] = size if size
hash[LOG_KEYS.fetch(:metadata)] = metadata if metadata
hash[LOG_KEYS.fetch(:duration)] = duration if duration
hash[LOG_KEYS.fetch(:checksum)] = checksum if checksum
hash[LOG_KEYS.fetch(:exist)] = exist if !exist.nil?
hash[LOG_KEYS.fetch(:url)] = url if url
hash[LOG_KEYS.fetch(:prefix)] = prefix if prefix
hash[LOG_KEYS.fetch(:range)] = range if range
hash
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require_relative "interfaces/common_fields"
- 1
require_relative "interfaces/additional_data_field"
- 1
require_relative "shared/serialize_common"
- 1
require_relative "shared/merge_additional_data_fields"
- 1
require_relative "../enums/source"
- 1
require_relative "../enums/event"
- 1
require_relative "../enums/level"
- 1
require_relative "../log_keys"
- 1
module LogStruct
- 1
module Log
# CarrierWave log entry for structured logging
- 1
class CarrierWave < T::Struct
- 1
extend T::Sig
- 1
include Interfaces::CommonFields
- 1
include Interfaces::AdditionalDataField
- 1
include SerializeCommon
- 1
include MergeAdditionalDataFields
- 1
CarrierWaveEvent = T.type_alias {
- 1
T.any(
Event::Upload,
Event::Download,
Event::Delete,
Event::Metadata,
Event::Exist,
Event::Unknown
)
}
# Common fields
- 1
const :source, Source::CarrierWave, default: T.let(Source::CarrierWave, Source::CarrierWave)
- 1
const :event, CarrierWaveEvent
- 1
const :timestamp, Time, factory: -> { Time.now }
- 1
const :level, Level, default: T.let(Level::Info, Level)
# File-specific fields
- 1
const :operation, T.nilable(Symbol), default: nil
- 1
const :storage, T.nilable(String), default: nil
- 1
const :file_id, T.nilable(String), default: nil
- 1
const :filename, T.nilable(String), default: nil
- 1
const :mime_type, T.nilable(String), default: nil
- 1
const :size, T.nilable(Integer), default: nil
- 1
const :metadata, T.nilable(T::Hash[String, T.untyped]), default: nil
- 1
const :duration, T.nilable(Float), default: nil
# CarrierWave-specific fields
- 1
const :uploader, T.nilable(String), default: nil
- 1
const :model, T.nilable(String), default: nil
- 1
const :mount_point, T.nilable(String), default: nil
- 1
const :additional_data, T::Hash[Symbol, T.untyped], default: {}
# Convert the log entry to a hash for serialization
- 1
sig { override.params(strict: T::Boolean).returns(T::Hash[Symbol, T.untyped]) }
- 1
def serialize(strict = true)
hash = serialize_common(strict)
merge_additional_data_fields(hash)
# Add file-specific fields if they're present
hash[LOG_KEYS.fetch(:storage)] = storage if storage
hash[LOG_KEYS.fetch(:operation)] = operation if operation
hash[LOG_KEYS.fetch(:file_id)] = file_id if file_id
hash[LOG_KEYS.fetch(:filename)] = filename if filename
hash[LOG_KEYS.fetch(:mime_type)] = mime_type if mime_type
hash[LOG_KEYS.fetch(:size)] = size if size
hash[LOG_KEYS.fetch(:metadata)] = metadata if metadata
hash[LOG_KEYS.fetch(:duration)] = duration if duration
# Add CarrierWave-specific fields if they're present
hash[LOG_KEYS.fetch(:uploader)] = uploader if uploader
hash[LOG_KEYS.fetch(:model)] = model if model
hash[LOG_KEYS.fetch(:mount_point)] = mount_point if mount_point
hash
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require_relative "interfaces/common_fields"
- 1
require_relative "interfaces/additional_data_field"
- 1
require_relative "interfaces/message_field"
- 1
require_relative "shared/serialize_common"
- 1
require_relative "shared/merge_additional_data_fields"
- 1
require_relative "../enums/source"
- 1
require_relative "../enums/event"
- 1
require_relative "../enums/level"
- 1
require_relative "../log_keys"
- 1
module LogStruct
- 1
module Log
# Exception log entry for Ruby exceptions with class, message, and backtrace
- 1
class Error < T::Struct
- 1
extend T::Sig
- 1
include Interfaces::CommonFields
- 1
include Interfaces::AdditionalDataField
- 1
include Interfaces::MessageField
- 1
include MergeAdditionalDataFields
- 1
ErrorEvent = T.type_alias {
- 1
Event::Error
}
# Common fields
- 1
const :source, Source # Used by all sources, should not have a default.
- 1
const :event, ErrorEvent, default: T.let(Event::Error, ErrorEvent)
- 6
const :timestamp, Time, factory: -> { Time.now }
- 1
const :level, Level, default: T.let(Level::Error, Level)
# Exception-specific fields
- 1
const :err_class, T.class_of(StandardError)
- 1
const :message, String
- 1
const :backtrace, T.nilable(T::Array[String]), default: nil
- 1
const :additional_data, T::Hash[Symbol, T.untyped], default: {}
# Convert the log entry to a hash for serialization
- 2
sig { override.params(strict: T::Boolean).returns(T::Hash[Symbol, T.untyped]) }
- 1
def serialize(strict = true)
- 2
hash = serialize_common(strict)
- 2
merge_additional_data_fields(hash)
# Add exception-specific fields
- 2
hash[LOG_KEYS.fetch(:err_class)] = err_class.name
- 2
hash[LOG_KEYS.fetch(:message)] = message
- 2
if backtrace.is_a?(Array) && backtrace&.any?
hash[LOG_KEYS.fetch(:backtrace)] = backtrace&.first(10)
end
- 2
hash
end
# Create an Error log from a Ruby StandardError
- 1
sig {
- 1
params(
source: Source,
ex: StandardError,
additional_data: T::Hash[Symbol, T.untyped]
).returns(Log::Error)
}
- 1
def self.from_exception(source, ex, additional_data = {})
- 4
new(
source: source,
message: ex.message,
err_class: ex.class,
backtrace: ex.backtrace,
additional_data: additional_data
)
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require_relative "interfaces/common_fields"
- 1
require_relative "interfaces/additional_data_field"
- 1
require_relative "shared/serialize_common"
- 1
require_relative "shared/merge_additional_data_fields"
- 1
require_relative "../enums/source"
- 1
require_relative "../enums/event"
- 1
require_relative "../enums/level"
- 1
require_relative "../log_keys"
- 1
module LogStruct
- 1
module Log
# GoodJob log entry for structured logging
#
# GoodJob is a PostgreSQL-based ActiveJob backend that provides reliable,
# scalable job processing for Rails applications. This log class captures
# GoodJob-specific events including job execution, database operations,
# error handling, and performance metrics.
#
# ## Key Features Logged:
# - Job execution lifecycle (enqueue, start, finish, retry)
# - Database-backed job persistence events
# - Error handling and retry logic
# - Job batching and bulk operations
# - Performance metrics and timing data
# - Thread and process information
#
# ## Usage Examples:
#
# ```ruby
# # Job execution logging
# LogStruct::Log::GoodJob.new(
# event: Event::Start,
# job_id: "job_123",
# job_class: "UserNotificationJob",
# queue_name: "default",
# execution_time: 1.5
# )
#
# # Error logging
# LogStruct::Log::GoodJob.new(
# event: Event::Error,
# job_id: "job_123",
# error_class: "StandardError",
# error_message: "Connection failed"
# )
# ```
- 1
class GoodJob < T::Struct
- 1
extend T::Sig
- 1
include Interfaces::CommonFields
- 1
include Interfaces::AdditionalDataField
- 1
include SerializeCommon
- 1
include MergeAdditionalDataFields
# Valid event types for GoodJob operations
- 1
GoodJobEvent = T.type_alias {
- 1
T.any(
Event::Log, # General logging
Event::Enqueue, # Job queued
Event::Start, # Job execution started
Event::Finish, # Job completed successfully
Event::Error, # Job failed with error
Event::Schedule # Job scheduled for future execution
)
}
# Common fields
- 1
const :source, Source::Job, default: T.let(Source::Job, Source::Job)
- 1
const :event, GoodJobEvent
- 36
const :timestamp, Time, factory: -> { Time.now }
- 1
const :level, Level, default: T.let(Level::Info, Level)
# Job identification fields
- 1
const :job_id, T.nilable(String), default: nil
- 1
const :job_class, T.nilable(String), default: nil
- 1
const :queue_name, T.nilable(String), default: nil
- 1
const :batch_id, T.nilable(String), default: nil
- 1
const :job_label, T.nilable(String), default: nil
# Job execution context
- 1
const :arguments, T.nilable(T::Array[T.untyped]), default: nil
- 1
const :executions, T.nilable(Integer), default: nil
- 1
const :exception_executions, T.nilable(Integer), default: nil
- 1
const :execution_time, T.nilable(Float), default: nil
- 1
const :scheduled_at, T.nilable(Time), default: nil
# Error information
- 1
const :error_class, T.nilable(String), default: nil
- 1
const :error_message, T.nilable(String), default: nil
- 1
const :error_backtrace, T.nilable(T::Array[String]), default: nil
# GoodJob-specific metadata
- 1
const :process_id, T.nilable(Integer), default: nil
- 1
const :thread_id, T.nilable(String), default: nil
- 1
const :priority, T.nilable(Integer), default: nil
- 1
const :cron_key, T.nilable(String), default: nil
- 1
const :database_connection_name, T.nilable(String), default: nil
# Performance and metrics
- 1
const :wait_time, T.nilable(Float), default: nil
- 1
const :run_time, T.nilable(Float), default: nil
- 1
const :finished_at, T.nilable(Time), default: nil
# Additional contextual data
- 1
const :additional_data, T::Hash[Symbol, T.untyped], default: {}
# Convert the log entry to a hash for serialization
- 2
sig { override.params(strict: T::Boolean).returns(T::Hash[Symbol, T.untyped]) }
- 1
def serialize(strict = true)
- 22
hash = serialize_common(strict)
- 22
merge_additional_data_fields(hash)
# Add job identification fields
- 22
hash[LOG_KEYS.fetch(:job_id)] = job_id if job_id
- 22
hash[LOG_KEYS.fetch(:job_class)] = job_class if job_class
- 22
hash[LOG_KEYS.fetch(:queue_name)] = queue_name if queue_name
- 22
hash[:batch_id] = batch_id if batch_id
- 22
hash[:job_label] = job_label if job_label
# Add execution context
- 22
hash[LOG_KEYS.fetch(:arguments)] = arguments if arguments
- 22
hash[:executions] = executions if executions
- 22
hash[:exception_executions] = exception_executions if exception_executions
- 22
hash[:execution_time] = execution_time if execution_time
- 22
hash[:scheduled_at] = scheduled_at&.iso8601 if scheduled_at
# Add error information
- 22
hash[LOG_KEYS.fetch(:err_class)] = error_class if error_class
- 22
hash[:error_message] = error_message if error_message
- 22
hash[LOG_KEYS.fetch(:backtrace)] = error_backtrace if error_backtrace
# Add GoodJob-specific metadata
- 22
hash[LOG_KEYS.fetch(:process_id)] = process_id if process_id
- 22
hash[LOG_KEYS.fetch(:thread_id)] = thread_id if thread_id
- 22
hash[:priority] = priority if priority
- 22
hash[:cron_key] = cron_key if cron_key
- 22
hash[:database_connection_name] = database_connection_name if database_connection_name
# Add performance metrics
- 22
hash[:wait_time] = wait_time if wait_time
- 22
hash[:run_time] = run_time if run_time
- 22
hash[:finished_at] = finished_at&.iso8601 if finished_at
- 22
hash
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
module LogStruct
- 1
module Log
- 1
module Interfaces
# Common interface for logs that include an additional_data field
- 1
module AdditionalDataField
- 1
extend T::Sig
- 1
extend T::Helpers
- 1
interface!
# Additional data field for extra context
- 1
sig { abstract.returns(T::Hash[Symbol, T.untyped]) }
- 1
def additional_data; end
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require_relative "../../enums/source"
- 1
require_relative "../../enums/event"
- 1
require_relative "../../enums/level"
- 1
module LogStruct
- 1
module Log
- 1
module Interfaces
# Common interface that all log entry types must implement
- 1
module CommonFields
- 1
extend T::Sig
- 1
extend T::Helpers
- 1
interface!
# The source of the log entry (JSON property: src)
- 1
sig { abstract.returns(Source) }
- 1
def source; end
# The event type of the log entry (JSON property: evt)
- 1
sig { abstract.returns(Event) }
- 1
def event; end
# The log level (JSON property: lvl)
- 1
sig { abstract.returns(Level) }
- 1
def level; end
# The timestamp of the log entry (JSON property: ts)
- 1
sig { abstract.returns(Time) }
- 1
def timestamp; end
# All logs must define a custom serialize method
# If the class is a T::Struct that responds to serialize then we can be sure
# we're getting symbols as keys and don't need to call #serialize.deep_symbolize_keys
- 2
sig { abstract.params(strict: T::Boolean).returns(T::Hash[Symbol, T.untyped]) }
- 1
def serialize(strict = true); end
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
module LogStruct
- 1
module Log
- 1
module Interfaces
# Common interface for logs that include a message field
- 1
module MessageField
- 1
extend T::Sig
- 1
extend T::Helpers
- 1
interface!
# Message field
- 1
sig { abstract.returns(T.nilable(String)) }
- 1
def message; end
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
module LogStruct
- 1
module Log
- 1
module Interfaces
# Common interface for request-related fields
# Used by both Request and Security logs
- 1
module RequestFields
- 1
extend T::Sig
- 1
extend T::Helpers
- 1
interface!
# Common request fields
- 1
sig { abstract.returns(T.nilable(String)) }
- 1
def path; end
- 1
sig { abstract.returns(T.nilable(String)) }
- 1
def http_method; end
- 1
sig { abstract.returns(T.nilable(String)) }
- 1
def source_ip; end
- 1
sig { abstract.returns(T.nilable(String)) }
- 1
def user_agent; end
- 1
sig { abstract.returns(T.nilable(String)) }
- 1
def referer; end
- 1
sig { abstract.returns(T.nilable(String)) }
- 1
def request_id; end
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require_relative "interfaces/common_fields"
- 1
require_relative "interfaces/additional_data_field"
- 1
require_relative "shared/serialize_common"
- 1
require_relative "shared/merge_additional_data_fields"
- 1
require_relative "../enums/source"
- 1
require_relative "../enums/event"
- 1
require_relative "../enums/level"
- 1
require_relative "../log_keys"
- 1
module LogStruct
- 1
module Log
# Plain log entry for structured logging
- 1
class Plain < T::Struct
- 1
extend T::Sig
- 1
include Interfaces::CommonFields
- 1
include Interfaces::AdditionalDataField
- 1
include SerializeCommon
- 1
include MergeAdditionalDataFields
- 1
PlainEvent = T.type_alias {
- 1
Event::Log
}
# Common fields
- 1
const :source, Source, default: T.let(Source::App, Source)
- 1
const :event, PlainEvent, default: T.let(Event::Log, PlainEvent)
- 1
const :level, Level, default: T.let(Level::Info, Level)
- 6
const :timestamp, Time, factory: -> { Time.now }
# Plain log messages can be any type (String, Number, Array, Hash, etc.)
# Developers might do something like Rails.logger.info(123) or Rails.logger.info(@variable)
# when debugging, or gems might send all kinds of random stuff to the logger.
# We don't want to crash with a type error in any of these cases.
- 1
const :message, T.untyped # rubocop:disable Sorbet/ForbidUntypedStructProps
# Allow people to submit additional data
- 1
const :additional_data, T::Hash[Symbol, T.untyped], default: {}
# Convert the log entry to a hash for serialization
- 2
sig { override.params(strict: T::Boolean).returns(T::Hash[Symbol, T.untyped]) }
- 1
def serialize(strict = true)
- 19
hash = serialize_common(strict)
- 19
merge_additional_data_fields(hash)
- 19
hash[LOG_KEYS.fetch(:message)] = message
- 19
hash
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require_relative "interfaces/common_fields"
- 1
require_relative "interfaces/request_fields"
- 1
require_relative "shared/serialize_common"
- 1
require_relative "shared/add_request_fields"
- 1
require_relative "../enums/source"
- 1
require_relative "../enums/event"
- 1
require_relative "../enums/level"
- 1
require_relative "../log_keys"
- 1
module LogStruct
- 1
module Log
# Request log entry for structured logging
- 1
class Request < T::Struct
- 1
extend T::Sig
- 1
include Interfaces::CommonFields
- 1
include Interfaces::RequestFields
- 1
include SerializeCommon
- 1
include AddRequestFields
- 1
RequestEvent = T.type_alias {
- 1
Event::Request
}
# Common fields
- 1
const :source, Source::Rails, default: T.let(Source::Rails, Source::Rails)
- 1
const :event, RequestEvent, default: T.let(Event::Request, RequestEvent)
- 2
const :timestamp, Time, factory: -> { Time.now }
- 1
const :level, Level, default: T.let(Level::Info, Level)
# Request-specific fields
# NOTE: `method` is a reserved word, so we use `http_method`
# prop while setting `method` in the serialized output
- 1
const :http_method, T.nilable(String), default: nil
- 1
const :path, T.nilable(String), default: nil
- 1
const :format, T.nilable(String), default: nil
- 1
const :controller, T.nilable(String), default: nil
- 1
const :action, T.nilable(String), default: nil
- 1
const :status, T.nilable(Integer), default: nil
- 1
const :duration, T.nilable(Float), default: nil
- 1
const :view, T.nilable(Float), default: nil
- 1
const :db, T.nilable(Float), default: nil
- 1
const :params, T.nilable(T::Hash[Symbol, T.untyped]), default: nil
- 1
const :source_ip, T.nilable(String), default: nil
- 1
const :user_agent, T.nilable(String), default: nil
- 1
const :referer, T.nilable(String), default: nil
- 1
const :request_id, T.nilable(String), default: nil
# Convert the log entry to a hash for serialization
- 1
sig { override.params(strict: T::Boolean).returns(T::Hash[Symbol, T.untyped]) }
- 1
def serialize(strict = true)
hash = serialize_common(strict)
add_request_fields(hash)
hash[LOG_KEYS.fetch(:http_method)] = http_method if http_method
hash[LOG_KEYS.fetch(:path)] = path if path
hash[LOG_KEYS.fetch(:format)] = format if format
hash[LOG_KEYS.fetch(:controller)] = controller if controller
hash[LOG_KEYS.fetch(:action)] = action if action
hash[LOG_KEYS.fetch(:status)] = status if status
hash[LOG_KEYS.fetch(:duration)] = duration if duration
hash[LOG_KEYS.fetch(:view)] = view if view
hash[LOG_KEYS.fetch(:db)] = db if db
hash[LOG_KEYS.fetch(:params)] = params if params
hash[LOG_KEYS.fetch(:source_ip)] = source_ip if source_ip
hash[LOG_KEYS.fetch(:user_agent)] = user_agent if user_agent
hash[LOG_KEYS.fetch(:referer)] = referer if referer
hash[LOG_KEYS.fetch(:request_id)] = request_id if request_id
hash
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require_relative "interfaces/common_fields"
- 1
require_relative "interfaces/additional_data_field"
- 1
require_relative "interfaces/message_field"
- 1
require_relative "interfaces/request_fields"
- 1
require_relative "shared/add_request_fields"
- 1
require_relative "shared/merge_additional_data_fields"
- 1
require_relative "shared/serialize_common"
- 1
require_relative "../enums/event"
- 1
require_relative "../enums/level"
- 1
require_relative "../enums/source"
- 1
require_relative "../log_keys"
- 1
module LogStruct
- 1
module Log
# Security log entry for structured logging of security-related events
- 1
class Security < T::Struct
- 1
extend T::Sig
- 1
include Interfaces::CommonFields
- 1
include Interfaces::AdditionalDataField
- 1
include Interfaces::MessageField
- 1
include Interfaces::RequestFields
- 1
include SerializeCommon
- 1
include AddRequestFields
- 1
include MergeAdditionalDataFields
- 1
SecurityEvent = T.type_alias {
- 1
T.any(
Event::IPSpoof,
Event::CSRFViolation,
Event::BlockedHost
)
}
# Common fields
- 1
const :source, Source::Security, default: T.let(Source::Security, Source::Security)
- 1
const :event, SecurityEvent
- 1
const :timestamp, Time, factory: -> { Time.now }
- 1
const :level, Level, default: T.let(Level::Error, Level)
# Security-specific fields
- 1
const :message, T.nilable(String), default: nil
- 1
const :blocked_host, T.nilable(String), default: nil
- 1
const :blocked_hosts, T.nilable(T::Array[String]), default: nil
- 1
const :client_ip, T.nilable(String), default: nil
- 1
const :x_forwarded_for, T.nilable(String), default: nil
# Additional data (merged into hash)
- 1
const :additional_data, T::Hash[Symbol, T.untyped], default: {}
# Common request fields
- 1
const :path, T.nilable(String), default: nil
- 1
const :http_method, T.nilable(String), default: nil, name: "method"
- 1
const :source_ip, T.nilable(String), default: nil
- 1
const :user_agent, T.nilable(String), default: nil
- 1
const :referer, T.nilable(String), default: nil
- 1
const :request_id, T.nilable(String), default: nil
# Convert the log entry to a hash for serialization
- 1
sig { override.params(strict: T::Boolean).returns(T::Hash[Symbol, T.untyped]) }
- 1
def serialize(strict = true)
hash = serialize_common(strict)
add_request_fields(hash)
merge_additional_data_fields(hash)
# Add security-specific fields
hash[LOG_KEYS.fetch(:message)] = message if message
hash[LOG_KEYS.fetch(:blocked_host)] = blocked_host if blocked_host
hash[LOG_KEYS.fetch(:blocked_hosts)] = blocked_hosts if blocked_hosts
hash[LOG_KEYS.fetch(:client_ip)] = client_ip if client_ip
hash[LOG_KEYS.fetch(:x_forwarded_for)] = x_forwarded_for if x_forwarded_for
hash
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require_relative "../../log_keys"
- 1
require_relative "../interfaces/request_fields"
- 1
module LogStruct
- 1
module Log
# Common log serialization method
- 1
module AddRequestFields
- 1
extend T::Sig
- 1
extend T::Helpers
- 1
requires_ancestor { Interfaces::RequestFields }
# Helper method to serialize request fields
- 1
sig { params(hash: T::Hash[Symbol, T.untyped]).void }
- 1
def add_request_fields(hash)
# Add request-specific fields if they're present
hash[LOG_KEYS.fetch(:path)] = path if path
hash[LOG_KEYS.fetch(:http_method)] = http_method if http_method # Use `method` in JSON
hash[LOG_KEYS.fetch(:source_ip)] = source_ip if source_ip
hash[LOG_KEYS.fetch(:user_agent)] = user_agent if user_agent
hash[LOG_KEYS.fetch(:referer)] = referer if referer
hash[LOG_KEYS.fetch(:request_id)] = request_id if request_id
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require_relative "../../log_keys"
- 1
require_relative "../interfaces/additional_data_field"
- 1
require_relative "serialize_common"
- 1
module LogStruct
- 1
module Log
# Helper module for merging additional data into serialized logs
- 1
module MergeAdditionalDataFields
- 1
extend T::Sig
- 1
extend T::Helpers
- 1
include SerializeCommon
- 1
requires_ancestor { T::Struct }
- 1
requires_ancestor { Interfaces::AdditionalDataField }
- 2
sig { params(hash: T::Hash[Symbol, T.untyped]).void }
- 1
def merge_additional_data_fields(hash)
- 55
additional_data.each do |key, value|
- 46
hash[key.to_sym] = value
end
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require_relative "../../log_keys"
- 1
require_relative "../interfaces/common_fields"
- 1
module LogStruct
- 1
module Log
# Common log serialization method
- 1
module SerializeCommon
- 1
extend T::Sig
- 1
extend T::Helpers
- 1
requires_ancestor { Interfaces::CommonFields }
# Convert the log entry to a hash for serialization.
# (strict param is unused, but need same signature as default T::Struct.serialize)
- 2
sig { params(strict: T::Boolean).returns(T::Hash[Symbol, T.untyped]) }
- 1
def serialize_common(strict = true)
{
- 55
LOG_KEYS.fetch(:source) => source.serialize.to_s,
LOG_KEYS.fetch(:event) => event.serialize.to_s,
LOG_KEYS.fetch(:level) => level.serialize.to_s,
LOG_KEYS.fetch(:timestamp) => timestamp.iso8601(3)
}
end
# Override as_json to use our custom serialize method instead of default T::Struct serialization
- 2
sig { params(options: T.untyped).returns(T::Hash[String, T.untyped]) }
- 1
def as_json(options = nil)
# Convert symbol keys to strings for JSON
- 5
serialize.transform_keys(&:to_s)
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require_relative "interfaces/common_fields"
- 1
require_relative "interfaces/additional_data_field"
- 1
require_relative "shared/serialize_common"
- 1
require_relative "shared/merge_additional_data_fields"
- 1
require_relative "../enums/source"
- 1
require_relative "../enums/event"
- 1
require_relative "../enums/level"
- 1
require_relative "../log_keys"
- 1
module LogStruct
- 1
module Log
# Shrine log entry for structured logging
- 1
class Shrine < T::Struct
- 1
extend T::Sig
- 1
include Interfaces::CommonFields
- 1
include Interfaces::AdditionalDataField
- 1
include SerializeCommon
- 1
include MergeAdditionalDataFields
- 1
ShrineEvent = T.type_alias {
- 1
T.any(
Event::Upload,
Event::Download,
Event::Delete,
Event::Metadata,
Event::Exist,
Event::Unknown
)
}
# Common fields
- 1
const :source, Source::Shrine, default: T.let(Source::Shrine, Source::Shrine)
- 1
const :event, ShrineEvent
- 1
const :timestamp, Time, factory: -> { Time.now }
- 1
const :level, Level, default: T.let(Level::Info, Level)
# Shrine-specific fields
- 1
const :storage, T.nilable(String), default: nil
- 1
const :location, T.nilable(String), default: nil
- 1
const :upload_options, T.nilable(T::Hash[Symbol, T.untyped]), default: nil
- 1
const :download_options, T.nilable(T::Hash[Symbol, T.untyped]), default: nil
- 1
const :options, T.nilable(T::Hash[Symbol, T.untyped]), default: nil
- 1
const :uploader, T.nilable(String), default: nil
- 1
const :duration, T.nilable(Float), default: nil
- 1
const :additional_data, T::Hash[Symbol, T.untyped], default: {}
# Convert the log entry to a hash for serialization
- 1
sig { override.params(strict: T::Boolean).returns(T::Hash[Symbol, T.untyped]) }
- 1
def serialize(strict = true)
hash = serialize_common(strict)
merge_additional_data_fields(hash)
# Add Shrine-specific fields if they're present
hash[LOG_KEYS.fetch(:storage)] = storage if storage
hash[LOG_KEYS.fetch(:location)] = location if location
hash[LOG_KEYS.fetch(:upload_options)] = upload_options if upload_options
hash[LOG_KEYS.fetch(:download_options)] = download_options if download_options
hash[LOG_KEYS.fetch(:options)] = options if options
hash[LOG_KEYS.fetch(:uploader)] = uploader if uploader
hash[LOG_KEYS.fetch(:duration)] = duration if duration
hash
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require_relative "interfaces/common_fields"
- 1
require_relative "shared/serialize_common"
- 1
require_relative "../enums/source"
- 1
require_relative "../enums/event"
- 1
require_relative "../enums/level"
- 1
require_relative "../log_keys"
- 1
module LogStruct
- 1
module Log
# Sidekiq log entry for structured logging
- 1
class Sidekiq < T::Struct
- 1
extend T::Sig
- 1
include Interfaces::CommonFields
- 1
include SerializeCommon
# Define valid event types for Sidekiq (currently only Log is used)
- 2
SidekiqEvent = T.type_alias { Event::Log }
# Common fields
- 1
const :source, Source::Sidekiq, default: T.let(Source::Sidekiq, Source::Sidekiq)
- 1
const :event, SidekiqEvent, default: T.let(Event::Log, SidekiqEvent)
- 1
const :timestamp, Time, factory: -> { Time.now }
- 1
const :level, Level, default: T.let(Level::Info, Level)
# Sidekiq-specific fields
- 1
const :process_id, T.nilable(Integer), default: nil
- 1
const :thread_id, T.nilable(T.any(Integer, String)), default: nil
- 1
const :message, T.nilable(String), default: nil
- 1
const :context, T.nilable(T::Hash[Symbol, T.untyped]), default: nil
# Convert the log entry to a hash for serialization
- 1
sig { override.params(strict: T::Boolean).returns(T::Hash[Symbol, T.untyped]) }
- 1
def serialize(strict = true)
hash = serialize_common(strict)
# Add Sidekiq-specific fields if they're present
hash[LOG_KEYS.fetch(:message)] = message if message
hash[LOG_KEYS.fetch(:context)] = context if context
hash[LOG_KEYS.fetch(:process_id)] = process_id if process_id
hash[LOG_KEYS.fetch(:thread_id)] = thread_id if thread_id
hash
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require_relative "interfaces/common_fields"
- 1
require_relative "interfaces/additional_data_field"
- 1
require_relative "shared/serialize_common"
- 1
require_relative "shared/merge_additional_data_fields"
- 1
module LogStruct
- 1
module Log
# SQL Query Log Structure
#
# Captures detailed information about SQL queries executed through ActiveRecord.
# This provides structured logging for database operations, including:
# - Query text and operation name
# - Execution timing and performance metrics
# - Row counts and connection information
# - Safely filtered bind parameters
#
# ## Use Cases:
# - Development debugging of N+1 queries
# - Production performance monitoring
# - Database query analysis and optimization
# - Audit trails for data access patterns
#
# ## Security:
# - SQL queries are safe (always parameterized with ?)
# - Bind parameters are filtered through LogStruct's param filters
# - Sensitive data like passwords, tokens are automatically scrubbed
#
# ## Example Usage:
#
# ```ruby
# # Automatically captured when SQL query integration is enabled
# LogStruct.config.integrations.enable_sql_logging = true
#
# # Manual logging (rare)
# sql_log = LogStruct::Log::SQL.new(
# message: "User lookup query",
# sql: "SELECT * FROM users WHERE id = ?",
# name: "User Load",
# duration: 2.3,
# row_count: 1,
# bind_params: [123]
# )
# LogStruct.info(sql_log)
# ```
- 1
class SQL < T::Struct
- 1
extend T::Sig
- 1
include Interfaces::CommonFields
- 1
include Interfaces::AdditionalDataField
- 1
include SerializeCommon
- 1
include MergeAdditionalDataFields
- 1
SQLEvent = T.type_alias {
- 1
Event::Database
}
# Common fields
- 1
const :source, Source, default: T.let(Source::App, Source)
- 1
const :event, SQLEvent, default: T.let(Event::Database, SQLEvent)
- 1
const :level, Level, default: T.let(Level::Info, Level)
- 34
const :timestamp, Time, factory: -> { Time.now }
- 1
const :message, String
# The SQL query that was executed (parameterized, safe to log)
- 1
const :sql, String
# The name of the database operation (e.g., "User Load", "Post Create")
- 1
const :name, String
# Duration of the query execution in milliseconds
- 1
const :duration, Float
# Number of rows affected or returned by the query
- 1
const :row_count, T.nilable(Integer)
# Database connection information (adapter name)
- 1
const :connection_adapter, T.nilable(String)
# Filtered bind parameters (sensitive data removed)
- 1
const :bind_params, T.nilable(T::Array[T.untyped])
# Database name (if available)
- 1
const :database_name, T.nilable(String)
# Connection pool size information (for monitoring)
- 1
const :connection_pool_size, T.nilable(Integer)
# Active connection count (for monitoring)
- 1
const :active_connections, T.nilable(Integer)
# SQL operation type (SELECT, INSERT, UPDATE, DELETE, etc.)
- 1
const :operation_type, T.nilable(String)
# Table names involved in the query (extracted from SQL)
- 1
const :table_names, T.nilable(T::Array[String])
# Allow additional custom data
- 1
const :additional_data, T::Hash[Symbol, T.untyped], default: {}
# Convert the log entry to a hash for serialization
- 2
sig { override.params(strict: T::Boolean).returns(T::Hash[Symbol, T.untyped]) }
- 1
def serialize(strict = true)
- 10
hash = serialize_common(strict)
- 10
merge_additional_data_fields(hash)
# Add SQL-specific fields using LOG_KEYS mapping for consistency
- 10
hash[LOG_KEYS.fetch(:message)] = message
- 10
hash[LOG_KEYS.fetch(:sql)] = sql
- 10
hash[LOG_KEYS.fetch(:name)] = name
- 10
hash[LOG_KEYS.fetch(:duration)] = duration
- 10
hash[LOG_KEYS.fetch(:row_count)] = row_count
- 10
hash[LOG_KEYS.fetch(:connection_adapter)] = connection_adapter
- 10
hash[LOG_KEYS.fetch(:bind_params)] = bind_params
- 10
hash[LOG_KEYS.fetch(:database_name)] = database_name
- 10
hash[LOG_KEYS.fetch(:connection_pool_size)] = connection_pool_size
- 10
hash[LOG_KEYS.fetch(:active_connections)] = active_connections
- 10
hash[LOG_KEYS.fetch(:operation_type)] = operation_type
- 10
hash[LOG_KEYS.fetch(:table_names)] = table_names
- 10
hash
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
module LogStruct
# Define a mapping of property names to JSON keys
- 1
LOG_KEYS = T.let({
# Ruby struct property name => JSON key name
# Shared fields
source: :src,
event: :evt,
timestamp: :ts,
level: :lvl,
# Common fields
message: :msg,
data: :data,
# Request-related fields
path: :path,
http_method: :method, # Use `http_method` because `method` is a reserved word
source_ip: :source_ip,
user_agent: :user_agent,
referer: :referer,
request_id: :request_id,
# HTTP-specific fields
format: :format,
controller: :controller,
action: :action,
status: :status,
duration: :duration,
view: :view,
db: :db,
params: :params,
# Security-specific fields
blocked_host: :blocked_host,
blocked_hosts: :blocked_hosts,
client_ip: :client_ip,
x_forwarded_for: :x_forwarded_for,
# Email-specific fields
to: :to,
from: :from,
subject: :subject,
# Error fields
err_class: :err_class,
backtrace: :backtrace,
# Job-specific fields
job_id: :job_id,
job_class: :job_class,
queue_name: :queue_name,
arguments: :arguments,
retry_count: :retry_count,
# Sidekiq-specific fields
process_id: :pid,
thread_id: :tid,
context: :ctx,
# Storage-specific fields (ActiveStorage)
checksum: :checksum,
exist: :exist,
url: :url,
prefix: :prefix,
range: :range,
# Storage-specific fields (Shrine)
storage: :storage,
operation: :op,
file_id: :file_id,
filename: :filename,
mime_type: :mime_type,
size: :size,
metadata: :metadata,
location: :location,
upload_options: :upload_opts,
download_options: :download_opts,
options: :opts,
uploader: :uploader,
# CarrierWave-specific fields
model: :model,
mount_point: :mount_point,
# SQL-specific fields
sql: :sql,
name: :name,
row_count: :row_count,
connection_adapter: :connection_adapter,
bind_params: :bind_params,
database_name: :database_name,
connection_pool_size: :connection_pool_size,
active_connections: :active_connections,
operation_type: :operation_type,
table_names: :table_names
}.freeze,
T::Hash[Symbol, Symbol])
end
# typed: strict
# frozen_string_literal: true
- 1
require "active_support/tagged_logging"
# Monkey-patch ActiveSupport::TaggedLogging::Formatter to support hash inputs
# This allows us to pass structured data to the logger and have tags incorporated
# directly into the hash instead of being prepended as strings
- 1
module ActiveSupport
- 1
module TaggedLogging
- 1
module FormatterExtension
- 1
extend T::Sig
- 1
extend T::Helpers
- 1
requires_ancestor { ::ActiveSupport::TaggedLogging::Formatter }
# Override the call method to support hash input/output, and wrap
# plain strings in a Hash under a `msg` key.
# The data is then passed to our custom log formatter that transforms it
# into a JSON string before logging.
- 2
sig { params(severity: T.any(String, Symbol), time: Time, progname: T.untyped, data: T.untyped).returns(String) }
- 1
def call(severity, time, progname, data)
# Convert data to a hash if it's not already one
- 74
data = {message: data.to_s} unless data.is_a?(Hash)
# Add current tags to the hash if present
- 74
tags = current_tags
- 74
data[:tags] = tags if tags.present?
# Call the original formatter with our enhanced data
- 74
super
end
end
end
end
- 1
ActiveSupport::TaggedLogging::Formatter.prepend(ActiveSupport::TaggedLogging::FormatterExtension)
# typed: strict
# frozen_string_literal: true
- 1
require_relative "enums/error_reporter"
# Try to require all supported error reporting libraries
# Users may have multiple installed, so we should load all of them
- 1
%w[sentry-ruby bugsnag rollbar honeybadger].each do |gem_name|
- 4
require gem_name
rescue LoadError
# If a particular gem is not available, we'll still load the others
end
- 1
module LogStruct
# MultiErrorReporter provides a unified interface for reporting errors to various services.
# You can also override this with your own error reporter by setting
# LogStruct#.config.error_reporting_handler
# NOTE: This is used for cases where an error should be reported
# but the operation should be allowed to continue (e.g. scrubbing log data.)
- 1
class MultiErrorReporter
# Class variable to store the selected reporter
- 1
@reporter = T.let(nil, T.nilable(ErrorReporter))
- 1
class << self
- 1
extend T::Sig
- 2
sig { returns(ErrorReporter) }
- 1
def reporter
- 11
@reporter ||= detect_reporter
end
# Set the reporter to use (user-friendly API that accepts symbols)
- 2
sig { params(reporter_type: T.any(ErrorReporter, Symbol)).returns(ErrorReporter) }
- 1
def reporter=(reporter_type)
- 5
@reporter = case reporter_type
when ErrorReporter
reporter_type
when Symbol
- 5
case reporter_type
- 1
when :sentry then ErrorReporter::Sentry
- 1
when :bugsnag then ErrorReporter::Bugsnag
- 1
when :rollbar then ErrorReporter::Rollbar
- 1
when :honeybadger then ErrorReporter::Honeybadger
- 1
when :rails_logger then ErrorReporter::RailsLogger
else
valid_types = ErrorReporter.values.map { |v| ":#{v.serialize}" }.join(", ")
raise ArgumentError, "Unknown reporter type: #{reporter_type}. Valid types are: #{valid_types}"
end
end
end
# Auto-detect which error reporting service to use
- 2
sig { returns(ErrorReporter) }
- 1
def detect_reporter
- 1
if defined?(::Sentry)
- 1
ErrorReporter::Sentry
elsif defined?(::Bugsnag)
ErrorReporter::Bugsnag
elsif defined?(::Rollbar)
ErrorReporter::Rollbar
elsif defined?(::Honeybadger)
ErrorReporter::Honeybadger
else
ErrorReporter::RailsLogger
end
end
# Report an error to the configured error reporting service
- 2
sig { params(error: StandardError, context: T::Hash[T.untyped, T.untyped]).void }
- 1
def report_error(error, context = {})
# Call the appropriate reporter method based on what's available
- 6
case reporter
when ErrorReporter::Sentry
- 2
report_to_sentry(error, context)
when ErrorReporter::Bugsnag
- 1
report_to_bugsnag(error, context)
when ErrorReporter::Rollbar
- 1
report_to_rollbar(error, context)
when ErrorReporter::Honeybadger
- 1
report_to_honeybadger(error, context)
else
- 1
fallback_logging(error, context)
end
end
- 1
private
# Report to Sentry
- 2
sig { params(error: StandardError, context: T::Hash[T.untyped, T.untyped]).void }
- 1
def report_to_sentry(error, context = {})
- 2
return unless defined?(::Sentry)
# Use the proper Sentry interface defined in the RBI
- 2
::Sentry.capture_exception(error, extra: context)
rescue => e
- 1
fallback_logging(e, {original_error: error.class.to_s})
end
# Report to Bugsnag
- 2
sig { params(error: StandardError, context: T::Hash[T.untyped, T.untyped]).void }
- 1
def report_to_bugsnag(error, context = {})
- 1
return unless defined?(::Bugsnag)
- 1
::Bugsnag.notify(error) do |report|
- 1
report.add_metadata(:context, context)
end
rescue => e
fallback_logging(e, {original_error: error.class.to_s})
end
# Report to Rollbar
- 2
sig { params(error: StandardError, context: T::Hash[T.untyped, T.untyped]).void }
- 1
def report_to_rollbar(error, context = {})
- 1
return unless defined?(::Rollbar)
- 1
::Rollbar.error(error, context)
rescue => e
fallback_logging(e, {original_error: error.class.to_s})
end
# Report to Honeybadger
- 2
sig { params(error: StandardError, context: T::Hash[T.untyped, T.untyped]).void }
- 1
def report_to_honeybadger(error, context = {})
- 1
return unless defined?(::Honeybadger)
- 1
::Honeybadger.notify(error, context: context)
rescue => e
fallback_logging(e, {original_error: error.class.to_s})
end
# Fallback logging when no error reporting services are available
# Uses the LogStruct.error method to properly log the error
- 2
sig { params(error: StandardError, context: T::Hash[T.untyped, T.untyped]).void }
- 1
def fallback_logging(error, context = {})
- 2
return if error.nil?
# Create a proper error log entry
- 2
error_log = Log::Error.from_exception(
Source::LogStruct,
error,
context
)
# Use LogStruct.error to properly log the error
- 2
LogStruct.error(error_log)
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require "digest"
- 1
require_relative "hash_utils"
- 1
module LogStruct
# This class contains methods for filtering sensitive data in logs
# It is used by Formatter to determine which keys should be filtered
- 1
class ParamFilters
- 1
class << self
- 1
extend T::Sig
# Check if a key should be filtered based on our defined sensitive keys
- 2
sig { params(key: T.any(String, Symbol)).returns(T::Boolean) }
- 1
def should_filter_key?(key)
- 357
LogStruct.config.filters.filter_keys.include?(key.to_s.downcase.to_sym)
end
# Check if a key should be hashed rather than completely filtered
- 2
sig { params(key: T.any(String, Symbol)).returns(T::Boolean) }
- 1
def should_include_string_hash?(key)
- 5
LogStruct.config.filters.filter_keys_with_hashes.include?(key.to_s.downcase.to_sym)
end
# Convert a value to a filtered summary hash (e.g. { _filtered: { class: "String", ... }})
- 2
sig { params(key: T.any(String, Symbol), data: T.untyped).returns(T::Hash[Symbol, T.untyped]) }
- 1
def summarize_json_attribute(key, data)
- 6
case data
when Hash
- 1
summarize_hash(data)
when Array
- 1
summarize_array(data)
when String
- 3
summarize_string(data, should_include_string_hash?(key))
else
- 1
{_class: data.class}
end
end
# Summarize a String for logging, including details and an SHA256 hash (if configured)
- 2
sig { params(string: String, include_hash: T::Boolean).returns(T::Hash[Symbol, T.untyped]) }
- 1
def summarize_string(string, include_hash)
filtered_string = {
- 5
_class: String
}
- 5
if include_hash
- 2
filtered_string[:_hash] = HashUtils.hash_value(string)
else
- 3
filtered_string[:_bytes] = string.bytesize
end
- 5
filtered_string
end
# Summarize a Hash for logging, including details about the size and keys
- 2
sig { params(hash: T::Hash[T.untyped, T.untyped]).returns(T::Hash[Symbol, T.untyped]) }
- 1
def summarize_hash(hash)
- 3
return {_class: "Hash", _empty: true} if hash.empty?
# Don't include byte size if hash contains any filtered keys
- 7
has_sensitive_keys = hash.keys.any? { |key| should_filter_key?(key) }
summary = {
- 3
_class: Hash,
_keys_count: hash.keys.size,
_keys: hash.keys.map(&:to_sym).take(10)
}
# Only add byte size if no sensitive keys are present
- 3
summary[:_bytes] = hash.to_json.bytesize unless has_sensitive_keys
- 3
summary
end
# Summarize an Array for logging, including details about the size and items
- 2
sig { params(array: T::Array[T.untyped]).returns(T::Hash[Symbol, T.untyped]) }
- 1
def summarize_array(array)
- 3
return {_class: "Array", _empty: true} if array.empty?
{
- 2
_class: Array,
_count: array.size,
_bytes: array.to_json.bytesize
}
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require "rails"
- 1
require "semantic_logger"
- 1
require_relative "formatter"
- 1
require_relative "semantic_logger/setup"
- 1
module LogStruct
# Railtie to integrate with Rails
- 1
class Railtie < ::Rails::Railtie
# Configure early, right after logger initialization
- 1
initializer "logstruct.configure_logger", after: :initialize_logger do |app|
next unless LogStruct.enabled?
# Use SemanticLogger for powerful logging features
LogStruct::SemanticLogger::Setup.configure_semantic_logger(app)
end
# Setup all integrations after logger setup is complete
- 1
initializer "logstruct.setup", before: :build_middleware_stack do |app|
next unless LogStruct.enabled?
# Merge Rails filter parameters into our filters
LogStruct.merge_rails_filter_parameters!
# Set up all integrations
Integrations.setup_integrations
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require "semantic_logger"
- 1
require_relative "formatter"
- 1
module LogStruct
- 1
module SemanticLogger
# Development-Optimized Colorized JSON Formatter
#
# This formatter extends SemanticLogger's Color formatter to provide beautiful,
# readable JSON output in development environments. It significantly improves
# the developer experience when working with structured logs.
#
# ## Benefits of Colorized Output:
#
# ### Readability
# - **Syntax highlighting**: JSON keys, values, and data types are color-coded
# - **Visual hierarchy**: Different colors help identify structure at a glance
# - **Error spotting**: Quickly identify malformed data or unexpected values
# - **Context separation**: Log entries are visually distinct from each other
#
# ### Performance in Development
# - **Faster debugging**: Quickly scan logs without reading every character
# - **Pattern recognition**: Colors help identify common log patterns
# - **Reduced cognitive load**: Less mental effort required to parse log output
# - **Improved workflow**: Spend less time reading logs, more time coding
#
# ### Customization
# - **Configurable colors**: Customize colors for keys, strings, numbers, etc.
# - **Environment-aware**: Automatically disabled in production/CI environments
# - **Fallback support**: Gracefully falls back to standard formatting if needed
#
# ## Color Mapping:
# - **Keys**: Yellow - Easy to spot field names
# - **Strings**: Green - Clear indication of text values
# - **Numbers**: Blue - Numeric values stand out
# - **Booleans**: Magenta - true/false values are distinctive
# - **Null**: Red - Missing values are immediately visible
# - **Logger names**: Cyan - Source identification
#
# ## Integration with SemanticLogger:
# This formatter preserves all SemanticLogger benefits (performance, threading,
# reliability) while adding visual enhancements. It processes LogStruct types,
# hashes, and plain messages with appropriate colorization.
#
# The formatter is automatically enabled in development when `enable_color_output`
# is true (default), providing zero-configuration enhanced logging experience.
- 1
class ColorFormatter < ::SemanticLogger::Formatters::Color
- 1
extend T::Sig
- 2
sig { params(color_map: T.nilable(T::Hash[Symbol, Symbol]), args: T.untyped).void }
- 1
def initialize(color_map: nil, **args)
- 8
super(**args)
- 8
@logstruct_formatter = T.let(LogStruct::Formatter.new, LogStruct::Formatter)
# Set up custom color mapping
- 8
@custom_colors = T.let(color_map || default_color_map, T::Hash[Symbol, Symbol])
end
- 2
sig { override.params(log: ::SemanticLogger::Log, logger: T.untyped).returns(String) }
- 1
def call(log, logger)
# Handle LogStruct types specially with colorization
- 6
if log.payload.is_a?(LogStruct::Log::Interfaces::CommonFields)
# Get the LogStruct formatted JSON
logstruct_json = @logstruct_formatter.call(log.level, log.time, log.name, log.payload)
# Parse and colorize it
begin
parsed_data = T.let(JSON.parse(logstruct_json), T::Hash[String, T.untyped])
colorized_json = colorize_json(parsed_data)
# Use SemanticLogger's prefix formatting but with our colorized content
prefix = format("%<time>s %<level>s [%<process>s] %<name>s -- ",
time: format_time(log.time),
level: format_level(log.level),
process: log.process_info,
name: format_name(log.name))
"#{prefix}#{colorized_json}\n"
rescue JSON::ParserError
# Fallback to standard formatting
super
end
- 6
elsif log.payload.is_a?(Hash) || log.payload.is_a?(T::Struct)
# Process hashes through our formatter then colorize
begin
- 4
logstruct_json = @logstruct_formatter.call(log.level, log.time, log.name, log.payload)
- 4
parsed_data = T.let(JSON.parse(logstruct_json), T::Hash[String, T.untyped])
- 4
colorized_json = colorize_json(parsed_data)
- 4
prefix = format("%<time>s %<level>s [%<process>s] %<name>s -- ",
time: format_time(log.time),
level: format_level(log.level),
process: log.process_info,
name: format_name(log.name))
- 4
"#{prefix}#{colorized_json}\n"
rescue JSON::ParserError
# Fallback to standard formatting
super
end
else
# For plain messages, use SemanticLogger's default colorization
- 2
super
end
end
- 1
private
- 1
sig { returns(LogStruct::Formatter) }
- 1
attr_reader :logstruct_formatter
# Default color mapping for LogStruct JSON
- 2
sig { returns(T::Hash[Symbol, Symbol]) }
- 1
def default_color_map
- 7
{
key: :yellow,
string: :green,
number: :blue,
bool: :magenta,
nil: :red,
name: :cyan
}
end
# Simple JSON colorizer that adds ANSI codes
- 2
sig { params(data: T::Hash[String, T.untyped]).returns(String) }
- 1
def colorize_json(data)
# For now, just return a simple colorized version of the JSON
# This is much simpler than the full recursive approach
- 4
json_str = JSON.pretty_generate(data)
# Apply basic colorization with regex
- 4
json_str.gsub(/"([^"]+)":/, colorize_text('\1', :key) + ":")
.gsub(/: "([^"]*)"/, ": " + colorize_text('\1', :string))
.gsub(/: (\d+\.?\d*)/, ": " + colorize_text('\1', :number))
.gsub(/: (true|false)/, ": " + colorize_text('\1', :bool))
.gsub(": null", ": " + colorize_text("null", :nil))
end
# Add ANSI color codes to text
- 2
sig { params(text: String, color_type: Symbol).returns(String) }
- 1
def colorize_text(text, color_type)
- 20
color = @custom_colors[color_type] || :white
- 20
"\e[#{color_code_for(color)}m#{text}\e[0m"
end
# Format timestamp
- 2
sig { params(time: Time).returns(String) }
- 1
def format_time(time)
- 6
time.strftime("%Y-%m-%d %H:%M:%S.%6N")
end
# Format log level with color
- 2
sig { params(level: T.any(String, Symbol)).returns(String) }
- 1
def format_level(level)
- 4
level_str = level.to_s.upcase[0]
- 4
color = level_color_for(level.to_sym)
- 4
"\e[#{color_code_for(color)}m#{level_str}\e[0m"
end
# Format logger name with color
- 2
sig { params(name: T.nilable(String)).returns(String) }
- 1
def format_name(name)
- 4
return "" unless name
- 4
color = @custom_colors[:name] || :cyan
- 4
"\e[#{color_code_for(color)}m#{name}\e[0m"
end
# Get color for log level
- 2
sig { params(level: Symbol).returns(Symbol) }
- 1
def level_color_for(level)
- 4
case level
when :debug then :magenta
- 4
when :info then :cyan
when :warn then :yellow
when :error then :red
when :fatal then :red
else :cyan
end
end
# Get ANSI color code for color symbol
- 2
sig { params(color: Symbol).returns(String) }
- 1
def color_code_for(color)
- 28
case color
when :black then "30"
- 4
when :red then "31"
- 4
when :green then "32"
- 3
when :yellow then "33"
- 4
when :blue then "34"
- 3
when :magenta then "35"
- 8
when :cyan then "36"
- 2
when :white then "37"
when :bright_black then "90"
when :bright_red then "91"
when :bright_green then "92"
when :bright_yellow then "93"
when :bright_blue then "94"
when :bright_magenta then "95"
when :bright_cyan then "96"
when :bright_white then "97"
else "37" # default to white
end
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require "semantic_logger"
- 1
require_relative "../formatter"
- 1
module LogStruct
- 1
module SemanticLogger
# High-Performance JSON Formatter with LogStruct Integration
#
# This formatter extends SemanticLogger's JSON formatter to provide optimal
# JSON serialization performance while preserving all LogStruct features
# including data filtering, sensitive data scrubbing, and type-safe structures.
#
# ## Performance Advantages Over Rails Logger:
#
# ### Serialization Performance
# - **Direct JSON generation**: Bypasses intermediate object creation
# - **Streaming serialization**: Memory-efficient processing of large objects
# - **Type-optimized paths**: Fast serialization for common data types
# - **Zero-copy operations**: Minimal memory allocation during serialization
#
# ### Memory Efficiency
# - **Object reuse**: Formatter instances are reused across log calls
# - **Lazy evaluation**: Only processes data that will be included in output
# - **Efficient buffering**: Optimal buffer sizes for JSON generation
# - **Garbage collection friendly**: Minimal object allocation reduces GC pressure
#
# ### Integration Benefits
# - **LogStruct compatibility**: Native support for typed log structures
# - **Filter preservation**: Maintains all LogStruct filtering capabilities
# - **Scrubbing integration**: Seamless sensitive data scrubbing
# - **Error handling**: Robust handling of serialization errors
#
# ## Feature Preservation:
# This formatter maintains full compatibility with LogStruct's features:
# - Sensitive data filtering (passwords, tokens, etc.)
# - Recursive object scrubbing and processing
# - Type-safe log structure handling
# - Custom field transformations
# - Metadata preservation and enrichment
#
# ## JSON Output Structure:
# The formatter produces consistent, parseable JSON that includes:
# - Standard log fields (timestamp, level, message, logger name)
# - LogStruct-specific fields (source, event, context)
# - SemanticLogger metadata (process ID, thread ID, tags)
# - Application-specific payload data
#
# This combination provides the performance benefits of SemanticLogger with
# the structured data benefits of LogStruct, resulting in faster, more
# reliable logging for high-traffic applications.
- 1
class Formatter < ::SemanticLogger::Formatters::Json
- 1
extend T::Sig
- 2
sig { void }
- 1
def initialize
- 32
super
- 32
@logstruct_formatter = T.let(LogStruct::Formatter.new, LogStruct::Formatter)
end
- 2
sig { params(log: ::SemanticLogger::Log, logger: T.untyped).returns(String) }
- 1
def call(log, logger)
# Handle LogStruct types specially - they get wrapped in payload hash by SemanticLogger
- 24
if log.payload.is_a?(Hash) && log.payload[:payload].is_a?(LogStruct::Log::Interfaces::CommonFields)
# Use our formatter to process LogStruct types
- 20
@logstruct_formatter.call(log.level, log.time, log.name, log.payload[:payload])
- 4
elsif log.payload.is_a?(LogStruct::Log::Interfaces::CommonFields)
# Direct LogStruct (fallback case)
@logstruct_formatter.call(log.level, log.time, log.name, log.payload)
- 4
elsif log.payload.is_a?(Hash) && log.payload[:payload].is_a?(T::Struct)
# T::Struct wrapped in payload hash
@logstruct_formatter.call(log.level, log.time, log.name, log.payload[:payload])
- 4
elsif log.payload.is_a?(Hash) || log.payload.is_a?(T::Struct)
# Process hashes and T::Structs through our formatter
- 1
@logstruct_formatter.call(log.level, log.time, log.name, log.payload)
else
# For plain messages, create a Plain log entry
- 3
message_data = log.payload || log.message
- 3
plain_log = LogStruct::Log::Plain.new(
message: message_data,
timestamp: log.time
)
- 3
@logstruct_formatter.call(log.level, log.time, log.name, plain_log)
end
end
- 1
private
- 1
sig { returns(LogStruct::Formatter) }
- 1
attr_reader :logstruct_formatter
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require "semantic_logger"
- 1
module LogStruct
- 1
module SemanticLogger
# High-Performance Logger with LogStruct Integration
#
# This logger extends SemanticLogger::Logger to provide optimal logging performance
# while seamlessly integrating with LogStruct's typed logging system.
#
# ## Key Benefits Over Rails.logger:
#
# ### Performance
# - **10-100x faster** than Rails' default logger for high-volume applications
# - **Non-blocking I/O**: Uses background threads for actual log writes
# - **Minimal memory allocation**: Efficient object reuse and zero-copy operations
# - **Batched writes**: Reduces system calls by batching multiple log entries
#
# ### Reliability
# - **Thread-safe operations**: Safe for use in multi-threaded environments
# - **Error resilience**: Logger failures don't crash your application
# - **Graceful fallbacks**: Continues operating even if appenders fail
#
# ### Features
# - **Structured logging**: Native support for LogStruct types and hashes
# - **Rich metadata**: Automatic inclusion of process ID, thread ID, timestamps
# - **Tagged context**: Hierarchical tagging for request/job tracking
# - **Multiple destinations**: Simultaneously log to files, STDOUT, cloud services
#
# ### Development Experience
# - **Colorized output**: Beautiful ANSI-colored logs in development
# - **Detailed timing**: Built-in measurement of log processing time
# - **Context preservation**: Maintains Rails.logger compatibility
#
# ## Usage Examples
#
# The logger automatically handles LogStruct types, hashes, and plain messages:
#
# ```ruby
# logger = LogStruct::SemanticLogger::Logger.new("MyApp")
#
# # LogStruct typed logging (optimal performance)
# log_entry = LogStruct::Log::Plain.new(
# message: "User authenticated",
# source: LogStruct::Source::App,
# event: LogStruct::Event::Security
# )
# logger.info(log_entry)
#
# # Hash logging (automatically structured)
# logger.info({
# action: "user_login",
# user_id: 123,
# ip_address: "192.168.1.1"
# })
#
# # Plain string logging (backward compatibility)
# logger.info("User logged in successfully")
# ```
#
# The logger is a drop-in replacement for Rails.logger and maintains full
# API compatibility while providing significantly enhanced performance.
- 1
class Logger < ::SemanticLogger::Logger
- 1
extend T::Sig
- 2
sig { params(name: T.any(String, Symbol, Module, T::Class[T.anything]), level: T.nilable(Symbol), filter: T.untyped).void }
- 1
def initialize(name = "Application", level: nil, filter: nil)
# SemanticLogger::Logger expects positional arguments, not named arguments
- 35
super(name, level, filter)
end
# Override log methods to handle LogStruct types
- 1
%i[debug info warn error fatal].each do |level|
- 5
define_method(level) do |message = nil, payload = nil, &block|
# If message is a LogStruct type, use it as payload
- 30
if message.is_a?(LogStruct::Log::Interfaces::CommonFields) ||
message.is_a?(T::Struct) ||
message.is_a?(Hash)
- 25
payload = message
- 25
message = nil
- 25
super(message, payload: payload, &block)
else
# For plain string messages, pass them through normally
- 5
super(message, payload, &block)
end
end
end
# Support for tagged logging
- 2
sig { params(tags: T.untyped, block: T.proc.returns(T.untyped)).returns(T.untyped) }
- 1
def tagged(*tags, &block)
# Convert tags to array and pass individually to avoid splat issues
- 1
tag_array = tags.flatten
- 1
if tag_array.empty?
super(&block)
else
- 1
super(*T.unsafe(tag_array), &block)
end
end
# Ensure compatibility with Rails.logger interface
- 1
sig { returns(T::Array[T.any(String, Symbol)]) }
- 1
def current_tags
::SemanticLogger.tags
end
- 1
sig { void }
- 1
def clear_tags!
# SemanticLogger doesn't have clear_tags!, use pop_tags instead
count = ::SemanticLogger.tags.length
::SemanticLogger.pop_tags(count) if count > 0
end
- 1
sig { params(tags: T.untyped).returns(T::Array[T.untyped]) }
- 1
def push_tags(*tags)
flat = tags.flatten.compact
flat.each { |tag| ::SemanticLogger.push_tags(tag) }
flat
end
- 1
sig { params(count: Integer).void }
- 1
def pop_tags(count = 1)
::SemanticLogger.pop_tags(count)
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require "semantic_logger"
- 1
require_relative "formatter"
- 1
require_relative "color_formatter"
- 1
require_relative "logger"
- 1
module LogStruct
# SemanticLogger Integration
#
# LogStruct uses SemanticLogger as its core logging engine, providing significant
# performance and functionality benefits over Rails' default logger:
#
# ## Performance Benefits
# - **Asynchronous logging**: Logs are written in a background thread, eliminating
# I/O blocking in your main application threads
# - **High throughput**: Can handle 100,000+ log entries per second
# - **Memory efficient**: Structured data processing with minimal allocations
# - **Zero-copy serialization**: Direct JSON generation without intermediate objects
#
# ## Reliability Benefits
# - **Thread-safe**: All operations are thread-safe by design
# - **Graceful degradation**: Continues logging even if appenders fail
# - **Error isolation**: Logging errors don't crash your application
# - **Buffered writes**: Reduces disk I/O with intelligent batching
#
# ## Feature Benefits
# - **Multiple appenders**: Log to files, STDOUT, databases, cloud services simultaneously
# - **Structured metadata**: Rich context including process ID, thread ID, tags, and more
# - **Log filtering**: Runtime filtering by logger name, level, or custom rules
# - **Formatters**: Pluggable output formatting (JSON, colorized, custom)
# - **Metrics integration**: Built-in performance metrics and timing data
#
# ## Development Experience
# - **Colorized output**: Beautiful, readable logs in development with ANSI colors
# - **Tagged logging**: Hierarchical context tracking (requests, jobs, etc.)
# - **Debugging tools**: Detailed timing and memory usage information
# - **Hot reloading**: Configuration changes without application restart
#
# ## Production Benefits
# - **Log rotation**: Automatic file rotation with size/time-based policies
# - **Compression**: Automatic log compression to save disk space
# - **Cloud integration**: Direct integration with CloudWatch, Splunk, etc.
# - **Alerting**: Built-in support for error alerting and monitoring
#
# ## LogStruct Specific Enhancements
# - **Type safety**: Full Sorbet type annotations for compile-time error detection
# - **Structured data**: Native support for LogStruct's typed log structures
# - **Filtering integration**: Seamless integration with LogStruct's data filters
# - **Error handling**: Enhanced error reporting with full stack traces and context
#
# SemanticLogger is a production-grade logging framework used by companies processing
# millions of requests per day. It provides the performance and reliability needed
# for high-traffic Rails applications while maintaining an elegant developer experience.
- 1
module SemanticLogger
# Handles setup and configuration of SemanticLogger for Rails applications
#
# This module provides the core integration between LogStruct and SemanticLogger,
# configuring appenders, formatters, and logger replacement to provide optimal
# logging performance while maintaining full compatibility with Rails conventions.
- 1
module Setup
- 1
extend T::Sig
# Configures SemanticLogger as the primary logging engine for the Rails application
#
# This method replaces Rails' default logger with SemanticLogger, providing:
# - **10-100x performance improvement** for high-volume logging
# - **Non-blocking I/O** through background thread processing
# - **Enhanced reliability** with graceful error handling
# - **Multiple output destinations** (files, STDOUT, cloud services)
# - **Structured metadata** including process/thread IDs and timing
#
# The configuration automatically:
# - Determines optimal log levels based on environment
# - Sets up appropriate appenders (console, file, etc.)
# - Enables colorized output in development
# - Replaces Rails.logger and component loggers
# - Preserves full Rails.logger API compatibility
#
# @param app [Rails::Application] The Rails application instance
- 1
sig { params(app: T.untyped).void }
- 1
def self.configure_semantic_logger(app)
# Set SemanticLogger configuration
::SemanticLogger.application = Rails.application.class.module_parent_name
::SemanticLogger.environment = Rails.env
# Determine log level from Rails config
log_level = determine_log_level(app)
::SemanticLogger.default_level = log_level
# Clear existing appenders
::SemanticLogger.clear_appenders!
# Add appropriate appenders based on environment
add_appenders(app)
# Replace Rails.logger with SemanticLogger
replace_rails_logger(app)
end
- 1
sig { params(app: T.untyped).returns(Symbol) }
- 1
def self.determine_log_level(app)
if app.config.log_level
app.config.log_level
elsif Rails.env.production?
:info
elsif Rails.env.test?
:warn
else
:debug
end
end
- 1
sig { params(app: T.untyped).void }
- 1
def self.add_appenders(app)
config = LogStruct.config
# Determine output destination
io = determine_output(app)
if Rails.env.development? && config.integrations.enable_color_output
# Use our colorized LogStruct formatter for development
::SemanticLogger.add_appender(
io: io,
formatter: LogStruct::SemanticLogger::ColorFormatter.new(
color_map: config.integrations.color_map
),
filter: determine_filter
)
else
# Use our custom JSON formatter
::SemanticLogger.add_appender(
io: io,
formatter: LogStruct::SemanticLogger::Formatter.new,
filter: determine_filter
)
end
# Add file appender if configured and not already logging to STDOUT/StringIO
if app.config.paths["log"].first && io != $stdout && !io.is_a?(StringIO)
::SemanticLogger.add_appender(
file_name: app.config.paths["log"].first,
formatter: LogStruct::SemanticLogger::Formatter.new,
filter: determine_filter
)
end
end
- 1
sig { params(app: T.untyped).returns(T.any(IO, StringIO)) }
- 1
def self.determine_output(app)
if ENV["RAILS_LOG_TO_STDOUT"].present?
$stdout
elsif Rails.env.test?
# Use StringIO for tests to avoid cluttering test output
StringIO.new
else
# Prefer file logging when not explicitly configured for STDOUT
$stdout
end
end
- 1
sig { returns(T.nilable(Regexp)) }
- 1
def self.determine_filter
# Filter out noisy loggers if configured
config = LogStruct.config
return nil unless config.integrations.filter_noisy_loggers
# Common noisy loggers to filter
/\A(ActionView|ActionController::RoutingError|ActiveRecord::SchemaMigration)/
end
# Replaces Rails.logger and all component loggers with LogStruct's SemanticLogger
#
# This method provides seamless integration by replacing the default Rails logger
# throughout the entire Rails stack, ensuring all logging flows through the
# high-performance SemanticLogger system.
#
# ## Benefits of Complete Logger Replacement:
# - **Consistent performance**: All Rails components benefit from SemanticLogger speed
# - **Unified formatting**: All logs use the same structured JSON format
# - **Centralized configuration**: Single point of control for all logging
# - **Complete compatibility**: Maintains all Rails.logger API contracts
#
# ## Components Updated:
# - Rails.logger (framework core)
# - ActiveRecord::Base.logger (database queries)
# - ActionController::Base.logger (request processing)
# - ActionMailer::Base.logger (email delivery)
# - ActiveJob::Base.logger (background jobs)
# - ActionView::Base.logger (template rendering)
# - ActionCable.server.config.logger (WebSocket connections)
#
# After replacement, all Rails logging maintains API compatibility while gaining
# SemanticLogger's performance, reliability, and feature benefits.
#
# @param app [Rails::Application] The Rails application instance
- 1
sig { params(app: T.untyped).void }
- 1
def self.replace_rails_logger(app)
# Create new SemanticLogger instance
logger = LogStruct::SemanticLogger::Logger.new("Rails")
# Replace Rails.logger
Rails.logger = logger
# Also replace various component loggers
ActiveRecord::Base.logger = logger if defined?(ActiveRecord::Base)
ActionController::Base.logger = logger if defined?(ActionController::Base)
ActionMailer::Base.logger = logger if defined?(ActionMailer::Base)
ActiveJob::Base.logger = logger if defined?(ActiveJob::Base)
ActionView::Base.logger = logger if defined?(ActionView::Base)
ActionCable.server.config.logger = logger if defined?(ActionCable)
# Store reference in app config
app.config.logger = logger
end
end
end
end
# typed: strict
# frozen_string_literal: true
# Note: We use T::Struct for our Log classes so Sorbet is a hard requirement,
# not an optional dependency.
- 1
require "sorbet-runtime"
- 1
require "log_struct/sorbet/serialize_symbol_keys"
# Don't extend T::Sig to all modules! We're just a library, not a private Rails application
# See: https://sorbet.org/docs/sigs
# class Module
# include T::Sig
# end
# typed: strict
# frozen_string_literal: true
- 1
module LogStruct
- 1
module Sorbet
- 1
module SerializeSymbolKeys
- 1
extend T::Sig
- 1
extend T::Helpers
- 1
requires_ancestor { T::Struct }
- 1
sig { params(strict: T::Boolean).returns(T::Hash[Symbol, T.untyped]) }
- 1
def serialize(strict = true)
super.deep_symbolize_keys
end
- 1
sig { returns(T::Hash[Symbol, T.untyped]) }
- 1
def to_h
serialize
end
end
end
end
# typed: strict
# frozen_string_literal: true
- 1
require "digest"
- 1
module LogStruct
# StringScrubber is inspired by logstop by @ankane: https://github.com/ankane/logstop
# Enhancements:
# - Shows which type of data was filtered
# - Includes an SHA256 hash with filtered emails for request tracing
# - Uses configuration options from LogStruct.config
- 1
module StringScrubber
- 1
class << self
- 1
extend T::Sig
# Also supports URL-encoded URLs like https%3A%2F%2Fuser%3Asecret%40example.com
# cspell:ignore Fuser Asecret
- 1
URL_PASSWORD_REGEX = /((?:\/\/|%2F%2F)[^\s\/]+(?::|%3A))[^\s\/]+(@|%40)/
- 1
URL_PASSWORD_REPLACEMENT = '\1[PASSWORD]\2'
- 1
EMAIL_REGEX = /\b[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,}\b/i
- 1
CREDIT_CARD_REGEX_SHORT = /\b[3456]\d{15}\b/
- 1
CREDIT_CARD_REGEX_DELIMITERS = /\b[3456]\d{3}[\s-]\d{4}[\s-]\d{4}[\s-]\d{4}\b/
- 1
CREDIT_CARD_REPLACEMENT = "[CREDIT_CARD]"
- 1
PHONE_REGEX = /\b\d{3}[\s-]\d{3}[\s-]\d{4}\b/
- 1
PHONE_REPLACEMENT = "[PHONE]"
- 1
SSN_REGEX = /\b\d{3}[\s-]\d{2}[\s-]\d{4}\b/
- 1
SSN_REPLACEMENT = "[SSN]"
- 1
IP_REGEX = /\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b/
- 1
IP_REPLACEMENT = "[IP]"
- 1
MAC_REGEX = /\b[0-9a-f]{2}(:[0-9a-f]{2}){5}\b/i
- 1
MAC_REPLACEMENT = "[MAC]"
# Scrub sensitive information from a string
- 2
sig { params(string: String).returns(String) }
- 1
def scrub(string)
- 286
return string if string.empty?
- 286
string = string.to_s.dup
- 286
config = LogStruct.config.filters
# Passwords in URLs
- 286
string.gsub!(URL_PASSWORD_REGEX, URL_PASSWORD_REPLACEMENT) if config.url_passwords
# Emails
- 286
if config.email_addresses
- 285
string.gsub!(EMAIL_REGEX) do |email|
- 11
email_hash = HashUtils.hash_value(email)
- 11
"[EMAIL:#{email_hash}]"
end
end
# Credit card numbers
- 286
if config.credit_card_numbers
- 285
string.gsub!(CREDIT_CARD_REGEX_SHORT, CREDIT_CARD_REPLACEMENT)
- 285
string.gsub!(CREDIT_CARD_REGEX_DELIMITERS, CREDIT_CARD_REPLACEMENT)
end
# Phone numbers
- 286
string.gsub!(PHONE_REGEX, PHONE_REPLACEMENT) if config.phone_numbers
# SSNs
- 286
string.gsub!(SSN_REGEX, SSN_REPLACEMENT) if config.ssns
# IPs
- 286
string.gsub!(IP_REGEX, IP_REPLACEMENT) if config.ip_addresses
# MAC addresses
- 286
string.gsub!(MAC_REGEX, MAC_REPLACEMENT) if config.mac_addresses
# Custom scrubber
- 286
custom_scrubber = LogStruct.config.string_scrubbing_handler
- 286
string = custom_scrubber.call(string) if !custom_scrubber.nil?
- 286
string
end
end
end
end
# typed: strict
# frozen_string_literal: true
# cspell:ignore _tnilable
# rubocop:disable Sorbet/ConstantsFromStrings
# Load LogStruct type definitions
- 1
require_relative "../lib/log_struct"
- 1
require "json"
- 1
require "fileutils"
- 1
require "time"
- 1
module LogStruct
- 1
module Tools
- 1
class LogTypesExporter
- 1
extend T::Sig
- 1
DEFAULT_OUTPUT_TS_FILE = "site/lib/log-generation/log-types.ts"
# Constructor with optional override for log struct classes (for testing)
- 2
sig { params(output_ts_file: String, log_struct_classes: T.nilable(T::Array[T::Class[T::Struct]])).void }
- 1
def initialize(output_ts_file = DEFAULT_OUTPUT_TS_FILE, log_struct_classes = nil)
- 14
@output_ts_file = output_ts_file
- 14
@log_struct_classes = log_struct_classes
end
# Public method to export TypeScript definitions and JSON key mappings to files
- 2
sig { void }
- 1
def export
# Get the data once and reuse for all exports
- 1
data = generate_data
# Export TypeScript definitions
- 1
puts "Exporting LogStruct types to TypeScript..."
- 1
puts "Output file: #{@output_ts_file}"
# Create output directory if needed
- 1
FileUtils.mkdir_p(File.dirname(@output_ts_file))
# Generate the TypeScript content
- 1
content = generate_typescript(data)
# Write to file
- 1
File.write(@output_ts_file, content)
- 1
puts "Exported log types to #{@output_ts_file}"
# Export LOG_KEYS mapping to JSON
- 1
export_keys_to_json
# Export enums and log structs to JSON
- 1
export_data_to_json(data)
end
# Export LOG_KEYS mapping to a JSON file
- 2
sig { params(output_json_file: T.nilable(String)).void }
- 1
def export_keys_to_json(output_json_file = nil)
# Default to the same directory as the TypeScript file
- 1
output_json_file ||= File.join(File.dirname(@output_ts_file), "log-keys.json")
- 1
puts "Exporting LogStruct key mappings to JSON..."
- 1
puts "Output file: #{output_json_file}"
# Create output directory if needed
- 1
FileUtils.mkdir_p(File.dirname(output_json_file))
# Convert LOG_KEYS to a format suitable for JSON
# - Convert keys from symbols to strings
# - Convert values from symbols to strings
- 1
json_keys = LogStruct::LOG_KEYS.transform_keys(&:to_s).transform_values(&:to_s)
# Write to file with pretty formatting
- 1
File.write(output_json_file, JSON.pretty_generate(json_keys))
- 1
puts "Exported key mappings to #{output_json_file}"
end
# Export both enums and log structs to JSON files
- 2
sig { params(data: T::Hash[Symbol, T.untyped]).void }
- 1
def export_data_to_json(data)
# Export enums to JSON
- 1
export_enums_to_json(data[:enums])
# Export log structs to JSON
- 1
export_log_structs_to_json(data[:logs])
end
# Export Sorbet enums to a JSON file
- 2
sig { params(enums_data: T::Hash[Symbol, T::Array[String]], output_json_file: T.nilable(String)).void }
- 1
def export_enums_to_json(enums_data, output_json_file = nil)
# Default to the same directory as the TypeScript file
- 2
output_json_file ||= File.join(File.dirname(@output_ts_file), "sorbet-enums.json")
- 2
puts "Exporting Sorbet enums to JSON..."
- 2
puts "Output file: #{output_json_file}"
# Create output directory if needed
- 2
FileUtils.mkdir_p(File.dirname(output_json_file))
# Format enum data for JSON
- 2
json_enum_data = {}
# For each enum, get the full class name and values
- 2
T::Enum.subclasses
- 14
.select { |klass| klass.name.to_s.start_with?("LogStruct::") }
.each do |enum_class|
# Get the full enum name (e.g., "LogStruct::Level")
- 14
full_name = enum_class.name.to_s
# Get the simple name (e.g., "Level")
- 14
simple_name = full_name.split("::").last
# Skip if we don't have data for this enum
- 14
next unless simple_name && enums_data.key?(simple_name.to_sym)
# Map enum values to their constant names
- 14
values_with_names = enum_class.values.map do |value|
- 826
constant_name = enum_class.constants.find { |const_name| enum_class.const_get(const_name) == value }&.to_s
- 106
serialized = value.serialize
# Return a hash with name and value
{
- 106
name: constant_name,
value: serialized
}
end
# Add to the JSON data
- 14
json_enum_data[full_name] = values_with_names
end
# Write to file with pretty formatting
- 2
File.write(output_json_file, JSON.pretty_generate(json_enum_data))
- 2
puts "Exported Sorbet enums to #{output_json_file}"
end
# Export LogStruct log structs to a JSON file
- 2
sig { params(logs_data: T::Hash[String, T::Hash[Symbol, T.untyped]], output_json_file: T.nilable(String)).void }
- 1
def export_log_structs_to_json(logs_data, output_json_file = nil)
# Default to the same directory as the TypeScript file
- 2
output_json_file ||= File.join(File.dirname(@output_ts_file), "sorbet-log-structs.json")
- 2
puts "Exporting LogStruct log structs to JSON..."
- 2
puts "Output file: #{output_json_file}"
# Create output directory if needed
- 2
FileUtils.mkdir_p(File.dirname(output_json_file))
# Format structs data for JSON
- 2
json_structs_data = {}
# Process each log struct class
- 2
logs_data.each do |struct_name, struct_info|
# Get the full class name
- 24
full_name = "LogStruct::Log::#{struct_name}"
# Add to the structs data
- 24
json_structs_data[full_name] = {
name: struct_name,
fields: struct_info[:fields].transform_keys(&:to_s)
}
end
# Write to file with pretty formatting
- 2
File.write(output_json_file, JSON.pretty_generate(json_structs_data))
- 2
puts "Exported LogStruct log structs to #{output_json_file}"
end
# Public method to generate TypeScript definitions as a string
# This is the method we can test easily without file I/O
- 2
sig { returns(String) }
- 1
def generate_typescript_definitions
# Get the data
- 6
data = generate_data
# Transform data to TypeScript
- 6
generate_typescript(data)
end
- 2
sig { returns(T::Hash[Symbol, T.untyped]) }
- 1
def generate_data
# Export everything as a hash
{
# Export all enum values from LogStruct module
- 9
enums: export_enums,
# Export log structs
logs: export_log_structs
}
end
# Find and export all T::Enum subclasses in the LogStruct module
- 2
sig { returns(T::Hash[Symbol, T::Array[String]]) }
- 1
def export_enums
- 10
enum_hash = {}
# Find all T::Enum subclasses in the LogStruct module
- 10
T::Enum.subclasses
- 70
.select { |klass| klass.name.to_s.start_with?("LogStruct::") }
.each do |enum_class|
# Extract enum name (last part of the class name)
- 70
enum_name = enum_class.name.to_s.split("::").last&.to_sym
- 70
next if enum_name.nil? # Skip if we couldn't get a valid name
# Add enum values to the hash
- 70
enum_hash[enum_name] = enum_class.values.map(&:serialize)
end
- 10
enum_hash
end
- 1
private :generate_data
- 2
sig { params(data: T::Hash[Symbol, T.untyped]).returns(String) }
- 1
def generate_typescript(data)
- 7
ts_content = []
# Add file header (We need 'any' for a lot of unstructured Hashes and Arrays)
- 7
ts_content << "/* eslint-disable @typescript-eslint/no-explicit-any */"
- 7
ts_content << "// Auto-generated TypeScript definitions for LogStruct"
- 7
ts_content << "// Generated on #{Time.now.strftime("%Y-%m-%d %H:%M:%S")}"
- 7
ts_content << ""
# Add enum definitions
- 7
ts_content << "// Enum types"
- 7
data[:enums].each do |enum_name, enum_values|
- 35
ts_content << "export enum #{enum_name} {"
- 35
enum_values.sort.each do |value|
- 343
ts_content << " #{value.upcase} = \"#{value}\","
end
- 35
ts_content << "}"
- 35
ts_content << ""
end
# Add LogType enum
- 7
ts_content << "// Log Types"
- 7
ts_content << "export enum LogType {"
- 7
data[:logs].keys.sort.each do |log_type|
- 84
ts_content << " #{log_type.upcase} = \"#{log_type}\","
end
- 7
ts_content << "}"
- 7
ts_content << ""
# Add array of all log types for iteration
- 7
ts_content << "// Array of all log types for iteration"
- 7
ts_content << "export const AllLogTypes: Array<LogType> = ["
- 7
data[:logs].keys.sort.each do |log_type|
- 84
ts_content << " LogType.#{log_type.upcase},"
end
- 7
ts_content << "];"
- 7
ts_content << ""
# Add interface for each log type
- 7
ts_content << "// Log Interfaces"
# Collect all event union types to generate arrays later
- 7
event_arrays = {}
- 7
data[:logs].each do |log_type, log_info|
- 84
ts_content << "export interface #{log_type}Log {"
# Collect valid event types if this log has an enum_union for events
- 84
event_field_info = log_info[:fields][:event]
- 84
if event_field_info &&
event_field_info[:type] == "enum_union" &&
event_field_info[:base_enum] == "Event" &&
event_field_info[:enum_values]&.any?
- 49
event_arrays[log_type] = event_field_info[:enum_values].map do |value|
# Map Ruby enum names to TypeScript enum values (e.g., "IPSpoof" -> "Event.IP_SPOOF")
- 245
case value
- 7
when "IPSpoof" then "Event.IP_SPOOF"
- 7
when "CSRFViolation" then "Event.CSRF_VIOLATION"
else
# Default conversion of StudlyCaps to SCREAMING_SNAKE_CASE
- 231
"Event.#{value.gsub(/([a-z])([A-Z])/, '\1_\2').upcase}"
end
end
end
# Output all fields with types
- 84
log_info[:fields].each do |field_name, field_info|
- 1134
type_str = typescript_type_for(field_info)
- 1134
optional = field_info[:optional] ? "?" : ""
- 1134
ts_content << " #{field_name}#{optional}: #{type_str};"
end
- 84
ts_content << "}"
- 84
ts_content << ""
end
# Add union type for all logs
- 7
ts_content << "// Union type for all logs"
- 7
ts_content << "export type Log ="
- 91
log_types = data[:logs].keys.sort.map { |type| " | #{type}Log" }
- 7
ts_content << log_types.join("\n")
- 7
ts_content << ";"
- 7
ts_content << ""
# Add event arrays for each log type that has an enum_union
- 7
ts_content << "// Event type arrays for log types"
- 7
event_arrays.each do |log_type, event_values|
# Create a type-safe array with a specific union type for each log type's events
- 49
union_type = event_values.join(" | ")
- 49
ts_content << "export const #{log_type}Events: Array<#{union_type}> = ["
- 49
event_values.each do |event|
- 245
ts_content << " #{event},"
end
- 49
ts_content << "];"
- 49
ts_content << ""
end
# Return the TypeScript content as a string
- 7
ts_content.join("\n")
end
- 1
private :generate_typescript
- 2
sig { returns(T::Hash[String, T::Hash[Symbol, T.untyped]]) }
- 1
def export_log_structs
- 9
result = {}
# Get all log structs using reflection
- 9
T::Struct.subclasses
- 171
.select { |klass| klass.name.to_s.start_with?("LogStruct::Log::") }
.each do |log_class|
# Extract class name (e.g., "Request" from "LogStruct::Log::Request")
- 108
class_name = log_class.name.to_s.split("::").last
# Export fields with their types
- 108
fields = {}
- 108
log_class.props.each do |field_name, prop_info|
# Use http_method -> method conversion for Request
- 1458
field_key = field_name
- 1458
field_key = :method if field_name == :http_method && class_name == "Request"
# Get type information
- 1458
type_info = extract_type_info(prop_info)
# Add to fields
- 1458
fields[field_key] = type_info
end
# Add to result
- 108
result[class_name] = {fields: fields}
end
- 9
result
end
- 2
sig { params(prop_info: T::Hash[Symbol, T.untyped]).returns(T::Hash[Symbol, T.untyped]) }
- 1
def extract_type_info(prop_info)
# Extract type information from prop_info
- 1465
type_obj = prop_info[:type]
- 1465
type_str = type_obj.to_s
# Debug logging for complex types
# if type_str.include?("T.any") || type_str.include?("SecurityEvent")
# puts "Extracting type info for: #{type_str}"
# puts "Type object class: #{type_obj.class}"
# puts "Type object inspect: #{type_obj.inspect}"
# end
# Check for TypedHash specifically (handles metadata field correctly)
- 1465
if type_obj.is_a?(T::Types::TypedHash) || type_obj.instance_of?(::T::Types::TypedHash)
- 145
return {optional: prop_info[:_tnilable] || false, type: "object"}
end
# Check if this is optional (nilable)
- 1320
is_optional = type_str.include?("T.nilable")
# Basic type information
- 1320
result = {optional: is_optional}
# Check for direct enum values (single value restriction case)
# For example: const :source, Source::Job, default: T.let(Source::Job, Source::Job)
- 1320
if type_obj.is_a?(T::Enum) || type_obj.class&.ancestors&.include?(T::Enum)
# This is a direct reference to a specific enum value (not a type)
# Extract the enum class and the specific value
enum_class = type_obj.class
enum_name = enum_class.name.to_s.split("::").last
# Get the enum value name by finding which constant in the enum class has this value
enum_value_name = T.let(nil, T.nilable(String))
enum_class.constants.each do |const_name|
if enum_class.const_get(const_name) == type_obj
enum_value_name = const_name.to_s
break
end
end
# For example: LogStruct::Source::Job => { type: "enum_single", base_enum: "Source", enum_value: "Job" }
result[:type] = "enum_single"
result[:base_enum] = enum_name
result[:enum_value] = enum_value_name
return result
# Check for T::Types::TEnum with a specific enum value
- 1320
elsif type_obj.is_a?(T::Types::TEnum) && type_str.include?("::") && !type_str.include?("T.nilable")
# Handle specific enum types like LogStruct::Source::Job
# The type string will look like "LogStruct::Source::Job"
- 127
parts = type_str.split("::")
- 127
if parts.size >= 3
# Extract the enum name and specific value
- 127
enum_name = parts[-2]
- 127
enum_value_name = parts[-1]
# For example: LogStruct::Source::Job => { type: "enum_single", base_enum: "Source", enum_value: "Job" }
- 127
result[:type] = "enum_single"
- 127
result[:base_enum] = enum_name
- 127
result[:enum_value] = enum_value_name
- 127
return result
end
# Detect union types (T.any) or type aliases
- 1193
elsif type_str.include?("T.any(") || type_str.include?("LogStruct::Log::")
# First, try to extract the base enum type (Event, Level, Source)
- 82
base_enum = nil
- 82
enum_values = []
# Check if it's a Event union type
- 82
if type_str.include?("Event::")
- 64
base_enum = "Event"
- 64
enum_module = LogStruct::Event
- 18
elsif type_str.include?("Level::")
base_enum = "Level"
enum_module = LogStruct::Level
- 18
elsif type_str.include?("Source::")
base_enum = "Source"
enum_module = LogStruct::Source
end
- 82
if base_enum
- 64
result[:type] = "enum_union"
- 64
result[:base_enum] = base_enum
# Try to parse values from the T.any(...) format for direct T.any usage
- 64
if type_str =~ /T\.any\(([^)]+)\)/
- 64
values_str = $1
# Regex to extract enum constants like Event::IPSpoof
- 64
values_str.scan(/#{base_enum}::([A-Za-z0-9_]+)/) do |match|
- 318
enum_values << match.first
end
end
# For type aliases like SecurityEvent, try to resolve the alias
- 64
if enum_values.empty? && type_str =~ /LogStruct::Log::([A-Za-z0-9_]+)::([A-Za-z0-9_]+Event)/
log_class_name = $1
type_alias_name = $2
# Try to get the type alias from the log class
log_class = begin
Object.const_get("LogStruct::Log::#{log_class_name}")
rescue
nil
end
if log_class&.const_defined?(type_alias_name)
# Try to resolve the type alias through the class hierarchy
begin
# Look at the type alias to extract the enum values
# This is specific to LogStruct's enum pattern where the type alias is defined using T.any()
# For this to work, we need to open up the class and extract the type alias content
# Check if there are any constants in the Event module that have this value in their name
enum_module.constants.each do |const_name|
# Check if this constant is used in the type definition at all
potential_match = "#{base_enum}::#{const_name}"
if type_str.include?(potential_match)
enum_values << const_name.to_s
end
end
rescue => e
# Log the error for debugging but continue with what we have
puts "Error resolving type alias #{type_alias_name}: #{e.message}" if ENV["DEBUG"]
end
end
end
- 64
result[:enum_values] = enum_values unless enum_values.empty?
else
# Handle other types of unions that aren't enum-based
- 18
result[:type] = "any"
end
# Standard type handling for simple types
- 1111
elsif type_str.include?("LogStruct::Level")
- 109
result[:type] = "enum"
- 109
result[:values] = "Level"
- 1002
elsif type_str.include?("LogStruct::Source")
- 27
result[:type] = "enum"
- 27
result[:values] = "Source"
- 975
elsif type_str.include?("LogStruct::Event")
result[:type] = "enum"
result[:values] = "Event"
- 975
elsif type_str.include?("T::Array") || type_str.include?("TypedArray") || (type_str == "T::Array[String]") || prop_info.key?(:array)
- 64
result[:type] = "array"
# Get array item type if available
- 64
if prop_info[:array]
- 37
item_type = prop_info[:array].to_s
- 37
result[:item_type] = if item_type.include?("String")
- 37
"string"
elsif item_type.include?("Integer")
"integer"
elsif item_type.include?("Float")
"number"
elsif item_type.include?("Boolean") || item_type.include?("TrueClass") || item_type.include?("FalseClass")
"boolean"
else
"any"
end
end
- 911
elsif type_str.include?("String")
- 540
result[:type] = "string"
- 371
elsif type_str.include?("Integer")
- 99
result[:type] = "integer"
- 272
elsif type_str.include?("Float")
- 99
result[:type] = "number"
- 173
elsif type_str.include?("Boolean") || type_str.include?("TrueClass") || type_str.include?("FalseClass")
- 9
result[:type] = "boolean"
- 164
elsif type_str.include?("Time")
- 127
result[:type] = "string"
- 127
result[:format] = "date-time"
- 37
elsif type_str.include?("T::Hash")
result[:type] = "object"
# Could extract key/value types here if needed
else
- 37
result[:type] = "any"
end
# Uncomment for debugging
# puts "Detected type: #{result[:type]}"
# puts "Enum values: #{result[:enum_values]}" if result[:enum_values]
- 1193
result
end
- 2
sig { params(field_info: T::Hash[Symbol, T.untyped]).returns(String) }
- 1
def typescript_type_for(field_info)
- 1141
case field_info[:type]
when "enum"
- 106
field_info[:values]
when "enum_single"
# Handle single enum value restriction
# (e.g., const :source, Source::Job, default: T.let(Source::Job, Source::Job))
- 99
if field_info[:base_enum] && field_info[:enum_value]
# Create a specific enum value reference like: Source.JOB
- 99
"#{field_info[:base_enum]}.#{field_info[:enum_value].upcase}"
else
# Fallback to the base enum if we couldn't extract the specific value
field_info[:base_enum] || "any"
end
when "enum_union"
# Handle union of enum values
- 50
if field_info[:base_enum] && field_info[:enum_values]
# Create a union type like: Event.IP_SPOOF | Event.CSRF_VIOLATION | Event.BLOCKED_HOST
- 50
field_info[:enum_values].map do |value|
# Get the Ruby enum object for the given value name (e.g., Event::IPSpoof)
- 248
enum_class = case field_info[:base_enum]
- 248
when "Event" then LogStruct::Event
when "Level" then LogStruct::Level
when "Source" then LogStruct::Source
end
- 248
if enum_class
# Look up the actual enum value to get its serialized form
enum_value = begin
- 248
enum_class.const_get(value)
rescue NameError
nil
end
# Convert to TypeScript enum constant (serialized value -> uppercase)
- 248
serialized = enum_value&.serialize&.upcase || value.upcase
- 248
"#{field_info[:base_enum]}.#{serialized}"
else
# Fallback if we can't find the enum class
"#{field_info[:base_enum]}.#{value.upcase}"
end
end.join(" | ")
else
# Fallback to the base enum if we couldn't extract specific values
field_info[:base_enum] || "any"
end
when "string"
- 519
if field_info[:format] == "date-time"
end
- 519
"string"
when "integer", "number"
- 154
"number"
when "boolean"
- 7
"boolean"
when "array"
- 50
if field_info[:item_type]
- 29
"#{field_info[:item_type]}[]"
else
- 21
"any[]"
end
when "object"
- 113
"Record<string, any>"
else
- 43
"any"
end
end
end
end
end
# rubocop:enable Sorbet/ConstantsFromStrings