Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions sentry-ruby/Gemfile
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ gem "webrick"
gem "faraday"
gem "excon"
gem "webmock"
gem "ruby_llm"

group :sequel do
gem "sequel"
Expand Down
97 changes: 97 additions & 0 deletions sentry-ruby/lib/sentry/ruby_llm.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
# frozen_string_literal: true

module Sentry
module RubyLLM
OP_NAME = "gen_ai.chat"
EXECUTE_TOOL_OP_NAME = "gen_ai.execute_tool"
SPAN_ORIGIN = "auto.gen_ai.ruby_llm"
LOGGER_NAME = :ruby_llm_logger

module Patch
def ask(message = nil, with: nil, &block)
return super unless Sentry.initialized?

Sentry.with_child_span(op: OP_NAME, start_timestamp: Sentry.utc_now.to_f, origin: SPAN_ORIGIN) do |span|
result = super
model_id = @model&.id

if span
span.set_description("chat #{model_id}")
span.set_data("gen_ai.operation.name", "chat")
span.set_data("gen_ai.request.model", model_id)
span.set_data("gen_ai.system", @model&.provider)

if (response = @messages&.last)
span.set_data("gen_ai.response.model", response.model_id) if response.respond_to?(:model_id)

if response.respond_to?(:input_tokens) && response.input_tokens
span.set_data("gen_ai.usage.input_tokens", response.input_tokens)
end

if response.respond_to?(:output_tokens) && response.output_tokens
span.set_data("gen_ai.usage.output_tokens", response.output_tokens)
end
end

if instance_variable_defined?(:@temperature) && @temperature
span.set_data("gen_ai.request.temperature", @temperature)
end
end

record_breadcrumb("chat", model_id, @model&.provider)

result
end
end

def execute_tool(tool_call)
return super unless Sentry.initialized?

Sentry.with_child_span(op: EXECUTE_TOOL_OP_NAME, start_timestamp: Sentry.utc_now.to_f, origin: SPAN_ORIGIN) do |span|
result = super

if span
span.set_description("execute_tool #{tool_call.name}")
span.set_data("gen_ai.operation.name", "execute_tool")
span.set_data("gen_ai.tool.name", tool_call.name)
span.set_data("gen_ai.tool.call.id", tool_call.id)
span.set_data("gen_ai.tool.type", "function")

if Sentry.configuration.send_default_pii
span.set_data("gen_ai.tool.call.arguments", tool_call.arguments.to_json) if tool_call.arguments
span.set_data("gen_ai.tool.call.result", result.to_s[0..499]) if result
end
end

result
end
end

private

def record_breadcrumb(operation, name, provider = nil)
return unless Sentry.initialized?
return unless Sentry.configuration.breadcrumbs_logger.include?(LOGGER_NAME)

Sentry.add_breadcrumb(
Sentry::Breadcrumb.new(
level: :info,
category: OP_NAME,
type: :info,
data: {
operation: operation,
name: name,
provider: provider
}.compact
)
)
end
end
end
end

Sentry.register_patch(:ruby_llm) do
if defined?(::RubyLLM::Chat)
::RubyLLM::Chat.prepend(Sentry::RubyLLM::Patch)
end
end
226 changes: 226 additions & 0 deletions sentry-ruby/spec/sentry/ruby_llm_spec.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,226 @@
# frozen_string_literal: true

require "spec_helper"

# Stub RubyLLM classes before loading the patch
module RubyLLM
class Model
attr_accessor :id, :provider

def initialize(id:, provider:)
@id = id
@provider = provider
end
end

class Message
attr_accessor :model_id, :input_tokens, :output_tokens, :role, :content

def initialize(role:, content:, model_id: nil, input_tokens: nil, output_tokens: nil)
@role = role
@content = content
@model_id = model_id
@input_tokens = input_tokens
@output_tokens = output_tokens
end
end

class ToolCall
attr_accessor :name, :id, :arguments

def initialize(name:, id:, arguments: nil)
@name = name
@id = id
@arguments = arguments
end
end

class Chat
attr_reader :model, :messages

def initialize(model:)
@model = model
@messages = []
end

def ask(message = nil, with: nil, &block)
response = Message.new(
role: :assistant,
content: "Hello!",
model_id: @model.id,
input_tokens: 10,
output_tokens: 20
)
@messages << response
response
end

def execute_tool(tool_call)
"tool_result"
end
end
end

# Load the patch after stubs are defined
require "sentry/ruby_llm"

RSpec.describe Sentry::RubyLLM do
let(:model) { RubyLLM::Model.new(id: "gpt-4", provider: "openai") }
let(:chat) { RubyLLM::Chat.new(model: model) }

context "with tracing enabled" do
before do
perform_basic_setup do |config|
config.traces_sample_rate = 1.0
config.enabled_patches << :ruby_llm
end
end

it "records a span for ask" do
transaction = Sentry.start_transaction
Sentry.get_current_scope.set_span(transaction)

chat.ask("Hello")

spans = transaction.span_recorder.spans
ai_span = spans.find { |span| span.op == "gen_ai.chat" }

expect(ai_span).not_to be_nil
expect(ai_span.description).to eq("chat gpt-4")
expect(ai_span.origin).to eq("auto.gen_ai.ruby_llm")
expect(ai_span.data["gen_ai.operation.name"]).to eq("chat")
expect(ai_span.data["gen_ai.request.model"]).to eq("gpt-4")
expect(ai_span.data["gen_ai.system"]).to eq("openai")
end

it "records response data from the last message" do
transaction = Sentry.start_transaction
Sentry.get_current_scope.set_span(transaction)

chat.ask("Hello")

spans = transaction.span_recorder.spans
ai_span = spans.find { |span| span.op == "gen_ai.chat" }

expect(ai_span.data["gen_ai.response.model"]).to eq("gpt-4")
expect(ai_span.data["gen_ai.usage.input_tokens"]).to eq(10)
expect(ai_span.data["gen_ai.usage.output_tokens"]).to eq(20)
end

it "records a span for execute_tool" do
transaction = Sentry.start_transaction
Sentry.get_current_scope.set_span(transaction)

tool_call = RubyLLM::ToolCall.new(name: "get_weather", id: "call_123", arguments: { location: "Tokyo" })
chat.execute_tool(tool_call)

spans = transaction.span_recorder.spans
tool_span = spans.find { |span| span.op == "gen_ai.execute_tool" }

expect(tool_span).not_to be_nil
expect(tool_span.description).to eq("execute_tool get_weather")
expect(tool_span.origin).to eq("auto.gen_ai.ruby_llm")
expect(tool_span.data["gen_ai.operation.name"]).to eq("execute_tool")
expect(tool_span.data["gen_ai.tool.name"]).to eq("get_weather")
expect(tool_span.data["gen_ai.tool.call.id"]).to eq("call_123")
expect(tool_span.data["gen_ai.tool.type"]).to eq("function")
end

context "when send_default_pii is true" do
before { Sentry.configuration.send_default_pii = true }

it "records tool arguments and result" do
transaction = Sentry.start_transaction
Sentry.get_current_scope.set_span(transaction)

tool_call = RubyLLM::ToolCall.new(name: "get_weather", id: "call_123", arguments: { location: "Tokyo" })
chat.execute_tool(tool_call)

spans = transaction.span_recorder.spans
tool_span = spans.find { |span| span.op == "gen_ai.execute_tool" }

expect(tool_span.data["gen_ai.tool.call.arguments"]).to eq({ location: "Tokyo" }.to_json)
expect(tool_span.data["gen_ai.tool.call.result"]).to eq("tool_result")
end
end

context "when send_default_pii is false" do
before { Sentry.configuration.send_default_pii = false }

it "does not record tool arguments or result" do
transaction = Sentry.start_transaction
Sentry.get_current_scope.set_span(transaction)

tool_call = RubyLLM::ToolCall.new(name: "get_weather", id: "call_123", arguments: { location: "Tokyo" })
chat.execute_tool(tool_call)

spans = transaction.span_recorder.spans
tool_span = spans.find { |span| span.op == "gen_ai.execute_tool" }

expect(tool_span.data).not_to have_key("gen_ai.tool.call.arguments")
expect(tool_span.data).not_to have_key("gen_ai.tool.call.result")
end
end

it "sets correct timestamps on span" do
transaction = Sentry.start_transaction
Sentry.get_current_scope.set_span(transaction)

chat.ask("Hello")

spans = transaction.span_recorder.spans
ai_span = spans.find { |span| span.op == "gen_ai.chat" }

expect(ai_span.start_timestamp).not_to be_nil
expect(ai_span.timestamp).not_to be_nil
expect(ai_span.start_timestamp).to be < ai_span.timestamp
end
end

context "with breadcrumb logger enabled" do
before do
perform_basic_setup do |config|
config.traces_sample_rate = 1.0
config.enabled_patches << :ruby_llm
config.breadcrumbs_logger << :ruby_llm_logger
end
end

it "records a breadcrumb for ask" do
transaction = Sentry.start_transaction
Sentry.get_current_scope.set_span(transaction)

chat.ask("Hello")

llm_breadcrumb = Sentry.get_current_scope.breadcrumbs.peek

expect(llm_breadcrumb).not_to be_nil
expect(llm_breadcrumb.data[:operation]).to eq("chat")
expect(llm_breadcrumb.data[:name]).to eq("gpt-4")
expect(llm_breadcrumb.data[:provider]).to eq("openai")
end
end

context "without active transaction" do
before do
perform_basic_setup do |config|
config.traces_sample_rate = 1.0
config.enabled_patches << :ruby_llm
end
end

it "does not create spans when no transaction is active" do
result = chat.ask("Hello")
expect(result).to be_a(RubyLLM::Message)
end
end

context "when Sentry is not initialized" do
it "does not interfere with normal operations" do
# Create a fresh chat without Sentry initialized
fresh_chat = RubyLLM::Chat.new(model: model)
result = fresh_chat.ask("Hello")
expect(result).to be_a(RubyLLM::Message)
end
end
end