require 'resque' ENV['FORK_PER_JOB'] = 'false' def shutdown puts "Cleaning up resources..." Stats.destroy! EventMachine.stop_event_loop puts "Terminated!" exit! end Resque.before_first_fork do current = Thread.current Thread.new do JamWebEventMachine.run_em(current) end #ActiveRecord::Base.establish_connection config = { influxdb_database: APP_CONFIG.influxdb_database, influxdb_username: APP_CONFIG.influxdb_username, influxdb_password: APP_CONFIG.influxdb_password, influxdb_hosts: APP_CONFIG.influxdb_hosts, influxdb_port: APP_CONFIG.influxdb_port, influxdb_async: true # if we use async=true, the forked job will die before the stat is sent } # handle these events and force a shutdown. this is required I think due to influxdb-client. Signal.trap("TERM") do shutdown end Signal.trap("INT") do shutdown end JamRuby::Stats.init(config) end # https://devcenter.heroku.com/articles/forked-pg-connections Resque.before_fork do #defined?(ActiveRecord::Base) and # ActiveRecord::Base.connection.disconnect! #JamRuby::Stats.destroy! # reconnect between jobs ActiveRecord::Base.connection_handler.verify_active_connections! end Resque.after_fork do #defined?(ActiveRecord::Base) and # ActiveRecord::Base.establish_connection end # for jobs that do not extend lonely job, just extend this module and get stats module JamRuby module ResqueStats def around_perform(*args) Stats.timer('job.stats') do begin yield end end end end end require 'resque-lonely_job' # for jobs that extend lonely job, we override around_perform already implemented in LonelyJob, and call into it module Resque module Plugins module JamLonelyJob def around_perform(*args) Stats.timer('job.stats') do super end end end end end Resque::Plugins::JamLonelyJob.module_eval { include Resque::Plugins::LonelyJob }