FEATURE: automation triage using personas (#1126)

## LLM Persona Triage
- Allows automated responses to posts using AI personas
- Configurable to respond as regular posts or whispers
- Adds context-aware formatting for topics and private messages
- Provides special handling for topic metadata (title, category, tags)

## LLM Tool Triage
- Enables custom AI tools to process and respond to posts
- Tools can analyze post content and invoke personas when needed
- Zero-parameter tools can be used for automated workflows
- Not enabled in production yet

## Implementation Details
- Added new scriptable registration in discourse_automation/ directory
- Created core implementation in lib/automation/ modules
- Enhanced PromptMessagesBuilder with topic-style formatting
- Added helper methods for persona and tool selection in UI
- Extended AI Bot functionality to support whisper responses
- Added rate limiting to prevent abuse

## Other Changes
- Added comprehensive test coverage for both automation types
- Enhanced tool runner with LLM integration capabilities
- Improved error handling and logging

This feature allows forum admins to configure AI personas to automatically respond to posts based on custom criteria and leverage AI tools for more complex triage workflows.

Tool Triage has been disabled in production while we finalize details of new scripting capabilities.
This commit is contained in:
Sam 2025-03-06 09:41:09 +11:00 committed by GitHub
parent 8863cf0c86
commit e255c7a8f0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
17 changed files with 899 additions and 61 deletions

View File

@ -90,6 +90,24 @@ en:
label: "Top P"
description: "Top P to use for the LLM, increase to increase randomness (leave empty to use model default)"
llm_tool_triage:
fields:
model:
label: "Model"
description: "The default language model used for triage"
tool:
label: "Tool"
description: "Tool to use for triage (tool must have no parameters defined)"
llm_persona_triage:
fields:
persona:
label: "Persona"
description: "AI Persona to use for triage (must have default LLM and User set)"
whisper:
label: "Reply as Whisper"
description: "Whether the persona's response should be a whisper"
llm_triage:
fields:
system_prompt:

View File

@ -6,6 +6,12 @@ en:
spam: "Flag as spam and hide post"
spam_silence: "Flag as spam, hide post and silence user"
scriptables:
llm_tool_triage:
title: Triage posts using AI Tool
description: "Triage posts using custom logic in an AI tool"
llm_persona_triage:
title: Triage posts using AI Persona
description: "Respond to posts using a specific AI persona"
llm_triage:
title: Triage posts using AI
description: "Triage posts using a large language model"

View File

@ -0,0 +1,55 @@
# frozen_string_literal: true
if defined?(DiscourseAutomation)
DiscourseAutomation::Scriptable.add("llm_persona_triage") do
version 1
run_in_background
triggerables %i[post_created_edited]
field :persona,
component: :choices,
required: true,
extra: {
content: DiscourseAi::Automation.available_persona_choices,
}
field :whisper, component: :boolean
script do |context, fields|
post = context["post"]
next if post&.user&.bot?
persona_id = fields["persona"]["value"]
whisper = fields["whisper"]["value"]
begin
RateLimiter.new(
Discourse.system_user,
"llm_persona_triage_#{post.id}",
SiteSetting.ai_automation_max_triage_per_post_per_minute,
1.minute,
).performed!
RateLimiter.new(
Discourse.system_user,
"llm_persona_triage",
SiteSetting.ai_automation_max_triage_per_minute,
1.minute,
).performed!
DiscourseAi::Automation::LlmPersonaTriage.handle(
post: post,
persona_id: persona_id,
whisper: whisper,
automation: self.automation,
)
rescue => e
Discourse.warn_exception(
e,
message: "llm_persona_triage: skipped triage on post #{post.id}",
)
raise e if Rails.env.tests?
end
end
end
end

View File

@ -0,0 +1,49 @@
# frozen_string_literal: true
# TODO: this is still highly experimental and subject to a lot of change
# leaving it off in production for now Sam
if defined?(DiscourseAutomation) && !Rails.env.production?
DiscourseAutomation::Scriptable.add("llm_tool_triage") do
version 1
run_in_background
triggerables %i[post_created_edited]
field :tool,
component: :choices,
required: true,
extra: {
content: DiscourseAi::Automation.available_custom_tools,
}
script do |context, fields|
tool_id = fields["tool"]["value"]
post = context["post"]
return if post&.user&.bot?
begin
RateLimiter.new(
Discourse.system_user,
"llm_tool_triage_#{post.id}",
SiteSetting.ai_automation_max_triage_per_post_per_minute,
1.minute,
).performed!
RateLimiter.new(
Discourse.system_user,
"llm_tool_triage",
SiteSetting.ai_automation_max_triage_per_minute,
1.minute,
).performed!
DiscourseAi::Automation::LlmToolTriage.handle(
post: post,
tool_id: tool_id,
automation: self.automation,
)
rescue => e
Discourse.warn_exception(e, message: "llm_tool_triage: skipped triage on post #{post.id}")
end
end
end
end

View File

@ -170,7 +170,7 @@ module DiscourseAi
schedule_bot_reply(post) if can_attach?(post)
end
def conversation_context(post)
def conversation_context(post, style: nil)
# Pay attention to the `post_number <= ?` here.
# We want to inject the last post as context because they are translated differently.
@ -205,6 +205,7 @@ module DiscourseAi
)
builder = DiscourseAi::Completions::PromptMessagesBuilder.new
builder.topic = post.topic
context.reverse_each do |raw, username, custom_prompt, upload_ids|
custom_prompt_translation =
@ -245,7 +246,7 @@ module DiscourseAi
end
end
builder.to_a
builder.to_a(style: style || (post.topic.private_message? ? :bot : :topic))
end
def title_playground(post, user)
@ -418,7 +419,7 @@ module DiscourseAi
result
end
def reply_to(post, custom_instructions: nil, &blk)
def reply_to(post, custom_instructions: nil, whisper: nil, context_style: nil, &blk)
# this is a multithreading issue
# post custom prompt is needed and it may not
# be properly loaded, ensure it is loaded
@ -428,12 +429,18 @@ module DiscourseAi
post_streamer = nil
post_type =
post.post_type == Post.types[:whisper] ? Post.types[:whisper] : Post.types[:regular]
(
if (whisper || post.post_type == Post.types[:whisper])
Post.types[:whisper]
else
Post.types[:regular]
end
)
context =
get_context(
participants: post.topic.allowed_users.map(&:username).join(", "),
conversation_context: conversation_context(post),
conversation_context: conversation_context(post, style: context_style),
user: post.user,
)
context[:post_id] = post.id

View File

@ -38,6 +38,7 @@ module DiscourseAi
attach_index(ctx)
attach_upload(ctx)
attach_chain(ctx)
attach_discourse(ctx)
ctx.eval(framework_script)
ctx
end
@ -56,6 +57,7 @@ module DiscourseAi
const llm = {
truncate: _llm_truncate,
generate: _llm_generate,
};
const index = {
@ -70,6 +72,24 @@ module DiscourseAi
setCustomRaw: _chain_set_custom_raw,
};
const discourse = {
getPost: _discourse_get_post,
getUser: _discourse_get_user,
getPersona: function(name) {
return {
respondTo: function(params) {
result = _discourse_respond_to_persona(name, params);
if (result.error) {
throw new Error(result.error);
}
return result;
},
};
},
};
const context = #{JSON.generate(@context)};
function details() { return ""; };
JS
end
@ -175,20 +195,60 @@ module DiscourseAi
"_llm_truncate",
->(text, length) { @llm.tokenizer.truncate(text, length) },
)
mini_racer_context.attach(
"_llm_generate",
->(prompt) do
in_attached_function do
@llm.generate(
convert_js_prompt_to_ruby(prompt),
user: llm_user,
feature_name: "custom_tool_#{tool.name}",
)
end
end,
)
end
def convert_js_prompt_to_ruby(prompt)
if prompt.is_a?(String)
prompt
elsif prompt.is_a?(Hash)
messages = prompt["messages"]
if messages.blank? || !messages.is_a?(Array)
raise Discourse::InvalidParameters.new("Prompt must have messages")
end
messages.each(&:symbolize_keys!)
messages.each { |message| message[:type] = message[:type].to_sym }
DiscourseAi::Completions::Prompt.new(messages: prompt["messages"])
else
raise Discourse::InvalidParameters.new("Prompt must be a string or a hash")
end
end
def llm_user
@llm_user ||=
begin
@context[:llm_user] || post&.user || @bot_user
end
end
def post
return @post if defined?(@post)
post_id = @context[:post_id]
@post = post_id && Post.find_by(id: post_id)
end
def attach_index(mini_racer_context)
mini_racer_context.attach(
"_index_search",
->(*params) do
begin
in_attached_function do
query, options = params
self.running_attached_function = true
options ||= {}
options = options.symbolize_keys
self.rag_search(query, **options)
ensure
self.running_attached_function = false
end
end,
)
@ -198,31 +258,115 @@ module DiscourseAi
mini_racer_context.attach("_chain_set_custom_raw", ->(raw) { self.custom_raw = raw })
end
def attach_discourse(mini_racer_context)
mini_racer_context.attach(
"_discourse_get_post",
->(post_id) do
in_attached_function do
post = Post.find_by(id: post_id)
return nil if post.nil?
guardian = Guardian.new(Discourse.system_user)
recursive_as_json(PostSerializer.new(post, scope: guardian, root: false))
end
end,
)
mini_racer_context.attach(
"_discourse_get_user",
->(user_id_or_username) do
in_attached_function do
user = nil
if user_id_or_username.is_a?(Integer) ||
user_id_or_username.to_i.to_s == user_id_or_username
user = User.find_by(id: user_id_or_username.to_i)
else
user = User.find_by(username: user_id_or_username)
end
return nil if user.nil?
guardian = Guardian.new(Discourse.system_user)
recursive_as_json(UserSerializer.new(user, scope: guardian, root: false))
end
end,
)
mini_racer_context.attach(
"_discourse_respond_to_persona",
->(persona_name, params) do
in_attached_function do
# if we have 1000s of personas this can be slow ... we may need to optimize
persona_class = AiPersona.all_personas.find { |persona| persona.name == persona_name }
return { error: "Persona not found" } if persona_class.nil?
persona = persona_class.new
bot = DiscourseAi::AiBot::Bot.as(@bot_user || persona.user, persona: persona)
playground = DiscourseAi::AiBot::Playground.new(bot)
if @context[:post_id]
post = Post.find_by(id: @context[:post_id])
return { error: "Post not found" } if post.nil?
reply_post =
playground.reply_to(
post,
custom_instructions: params["instructions"],
whisper: params["whisper"],
)
if reply_post
return(
{ success: true, post_id: reply_post.id, post_number: reply_post.post_number }
)
else
return { error: "Failed to create reply" }
end
elsif @context[:message_id] && @context[:channel_id]
message = Chat::Message.find_by(id: @context[:message_id])
channel = Chat::Channel.find_by(id: @context[:channel_id])
return { error: "Message or channel not found" } if message.nil? || channel.nil?
reply =
playground.reply_to_chat_message(message, channel, @context[:context_post_ids])
if reply
return { success: true, message_id: reply.id }
else
return { error: "Failed to create chat reply" }
end
else
return { error: "No valid context for response" }
end
end
end,
)
end
def attach_upload(mini_racer_context)
mini_racer_context.attach(
"_upload_create",
->(filename, base_64_content) do
begin
self.running_attached_function = true
# protect against misuse
filename = File.basename(filename)
in_attached_function do
# protect against misuse
filename = File.basename(filename)
Tempfile.create(filename) do |file|
file.binmode
file.write(Base64.decode64(base_64_content))
file.rewind
Tempfile.create(filename) do |file|
file.binmode
file.write(Base64.decode64(base_64_content))
file.rewind
upload =
UploadCreator.new(
file,
filename,
for_private_message: @context[:private_message],
).create_for(@bot_user.id)
upload =
UploadCreator.new(
file,
filename,
for_private_message: @context[:private_message],
).create_for(@bot_user.id)
{ id: upload.id, short_url: upload.short_url, url: upload.url }
{ id: upload.id, short_url: upload.short_url, url: upload.url }
end
end
ensure
self.running_attached_function = false
end
end,
)
@ -238,18 +382,20 @@ module DiscourseAi
raise TooManyRequestsError.new("Tool made too many HTTP requests")
end
self.running_attached_function = true
headers = (options && options["headers"]) || {}
in_attached_function do
headers = (options && options["headers"]) || {}
result = {}
DiscourseAi::AiBot::Tools::Tool.send_http_request(url, headers: headers) do |response|
result[:body] = response.body
result[:status] = response.code.to_i
result = {}
DiscourseAi::AiBot::Tools::Tool.send_http_request(
url,
headers: headers,
) do |response|
result[:body] = response.body
result[:status] = response.code.to_i
end
result
end
result
ensure
self.running_attached_function = false
end
end,
)
@ -264,35 +410,70 @@ module DiscourseAi
raise TooManyRequestsError.new("Tool made too many HTTP requests")
end
self.running_attached_function = true
headers = (options && options["headers"]) || {}
body = options && options["body"]
in_attached_function do
headers = (options && options["headers"]) || {}
body = options && options["body"]
result = {}
DiscourseAi::AiBot::Tools::Tool.send_http_request(
url,
method: method,
headers: headers,
body: body,
) do |response|
result[:body] = response.body
result[:status] = response.code.to_i
result = {}
DiscourseAi::AiBot::Tools::Tool.send_http_request(
url,
method: method,
headers: headers,
body: body,
) do |response|
result[:body] = response.body
result[:status] = response.code.to_i
end
result
rescue => e
if Rails.env.development?
p url
p options
p e
puts e.backtrace
end
raise e
end
result
rescue => e
p url
p options
p e
puts e.backtrace
raise e
ensure
self.running_attached_function = false
end
end,
)
end
end
def in_attached_function
self.running_attached_function = true
yield
ensure
self.running_attached_function = false
end
def recursive_as_json(obj)
case obj
when Array
obj.map { |item| recursive_as_json(item) }
when Hash
obj.transform_values { |value| recursive_as_json(value) }
when ActiveModel::Serializer, ActiveModel::ArraySerializer
recursive_as_json(obj.as_json)
when ActiveRecord::Base
recursive_as_json(obj.as_json)
else
# Handle objects that respond to as_json but aren't handled above
if obj.respond_to?(:as_json)
result = obj.as_json
if result.equal?(obj)
# If as_json returned the same object, return it to avoid infinite recursion
result
else
recursive_as_json(result)
end
else
# Primitive values like strings, numbers, booleans, nil
obj
end
end
end
end
end
end

View File

@ -12,6 +12,15 @@ module DiscourseAi
},
]
end
def self.available_custom_tools
AiTool
.where(enabled: true)
.where("parameters = '[]'::jsonb")
.pluck(:id, :name, :description)
.map { |id, name, description| { id: id, translated_name: name, description: description } }
end
def self.available_models
values = DB.query_hash(<<~SQL)
SELECT display_name AS translated_name, id AS id
@ -28,5 +37,19 @@ module DiscourseAi
values
end
def self.available_persona_choices
AiPersona
.joins(:user)
.where.not(user_id: nil)
.where.not(default_llm: nil)
.map do |persona|
{
id: persona.id,
translated_name: persona.name,
description: "#{persona.name} (#{persona.user.username})",
}
end
end
end
end

View File

@ -0,0 +1,26 @@
# frozen_string_literal: true
module DiscourseAi
module Automation
module LlmPersonaTriage
def self.handle(post:, persona_id:, whisper: false, automation: nil)
ai_persona = AiPersona.find_by(id: persona_id)
return if ai_persona.nil?
persona_class = ai_persona.class_instance
persona = persona_class.new
bot_user = ai_persona.user
return if bot_user.nil?
bot = DiscourseAi::AiBot::Bot.as(bot_user, persona: persona)
playground = DiscourseAi::AiBot::Playground.new(bot)
playground.reply_to(post, whisper: whisper, context_style: :topic)
rescue => e
Rails.logger.error("Error in LlmPersonaTriage: #{e.message}\n#{e.backtrace.join("\n")}")
raise e if Rails.env.test?
nil
end
end
end
end

View File

@ -0,0 +1,21 @@
# frozen_string_literal: true
module DiscourseAi
module Automation
module LlmToolTriage
def self.handle(post:, tool_id:, automation: nil)
tool = AiTool.find_by(id: tool_id)
return if !tool
return if !tool.parameters.blank?
context = {
post_id: post.id,
automation_id: automation&.id,
automation_name: automation&.name,
}
runner = tool.runner({}, llm: nil, bot_user: Discourse.system_user, context: context)
runner.invoke
end
end
end
end

View File

@ -79,6 +79,21 @@ module DiscourseAi
UploadEncoder.encode(upload_ids: message[:upload_ids], max_pixels: max_pixels)
end
def ==(other)
return false unless other.is_a?(Prompt)
messages == other.messages && tools == other.tools && topic_id == other.topic_id &&
post_id == other.post_id && max_pixels == other.max_pixels &&
tool_choice == other.tool_choice
end
def eql?(other)
self == other
end
def hash
[messages, tools, topic_id, post_id, max_pixels, tool_choice].hash
end
private
def validate_message(message)

View File

@ -4,8 +4,10 @@ module DiscourseAi
module Completions
class PromptMessagesBuilder
MAX_CHAT_UPLOADS = 5
MAX_TOPIC_UPLOADS = 5
attr_reader :chat_context_posts
attr_reader :chat_context_post_upload_ids
attr_accessor :topic
def initialize
@raw_messages = []
@ -41,6 +43,7 @@ module DiscourseAi
def to_a(limit: nil, style: nil)
return chat_array(limit: limit) if style == :chat
return topic_array if style == :topic
result = []
# this will create a "valid" messages array
@ -127,6 +130,57 @@ module DiscourseAi
private
def topic_array
raw_messages = @raw_messages.dup
user_content = +"You are operating in a Discourse forum.\n\n"
if @topic
if @topic.private_message?
user_content << "Private message info.\n"
else
user_content << "Topic information:\n"
end
user_content << "- URL: #{@topic.url}\n"
user_content << "- Title: #{@topic.title}\n"
if SiteSetting.tagging_enabled
tags = @topic.tags.pluck(:name)
tags -= DiscourseTagging.hidden_tag_names if tags.present?
user_content << "- Tags: #{tags.join(", ")}\n" if tags.present?
end
if !@topic.private_message?
user_content << "- Category: #{@topic.category.name}\n" if @topic.category
end
user_content << "- Number of replies: #{@topic.posts_count - 1}\n\n"
end
last_user_message = raw_messages.pop
upload_ids = []
if raw_messages.present?
user_content << "Here is the conversation so far:\n"
raw_messages.each do |message|
user_content << "#{message[:name] || "User"}: #{message[:content]}\n"
upload_ids.concat(message[:upload_ids]) if message[:upload_ids].present?
end
end
if last_user_message
user_content << "You are responding to #{last_user_message[:name] || "User"} who just said:\n #{last_user_message[:content]}"
if last_user_message[:upload_ids].present?
upload_ids.concat(last_user_message[:upload_ids])
end
end
user_message = { type: :user, content: user_content }
if upload_ids.present?
user_message[:upload_ids] = upload_ids[-MAX_TOPIC_UPLOADS..-1] || upload_ids
end
[user_message]
end
def chat_array(limit:)
if @raw_messages.length > 1
buffer =
@ -155,7 +209,7 @@ module DiscourseAi
end
last_message = @raw_messages[-1]
buffer << "#{last_message[:name] || "User"} said #{last_message[:content]} "
buffer << "#{last_message[:name] || "User"}: #{last_message[:content]} "
message = { type: :user, content: buffer }
upload_ids.concat(last_message[:upload_ids]) if last_message[:upload_ids].present?

View File

@ -77,6 +77,8 @@ after_initialize do
# do not autoload this cause we may have no namespace
require_relative "discourse_automation/llm_triage"
require_relative "discourse_automation/llm_report"
require_relative "discourse_automation/llm_tool_triage"
require_relative "discourse_automation/llm_persona_triage"
add_admin_route("discourse_ai.title", "discourse-ai", { use_new_show_route: true })

View File

@ -40,4 +40,28 @@ describe DiscourseAi::Completions::PromptMessagesBuilder do
expected = [{ type: :user, content: "Alice: Echo 123 please\nJames: OK" }]
expect(builder.to_a).to eq(expected)
end
it "should format messages for topic style" do
# Create a topic with tags
topic = Fabricate(:topic, title: "This is an Example Topic")
# Add tags to the topic
topic.tags = [Fabricate(:tag, name: "tag1"), Fabricate(:tag, name: "tag2")]
topic.save!
builder.topic = topic
builder.push(type: :user, content: "I like frogs", name: "Bob")
builder.push(type: :user, content: "How do I solve this?", name: "Alice")
result = builder.to_a(style: :topic)
content = result[0][:content]
expect(content).to include("This is an Example Topic")
expect(content).to include("tag1")
expect(content).to include("tag2")
expect(content).to include("Bob: I like frogs")
expect(content).to include("Alice")
expect(content).to include("How do I solve this")
end
end

View File

@ -0,0 +1,208 @@
# frozen_string_literal: true
return if !defined?(DiscourseAutomation)
describe DiscourseAi::Automation::LlmPersonaTriage do
fab!(:user)
fab!(:bot_user) { Fabricate(:user) }
fab!(:llm_model) do
Fabricate(:llm_model, provider: "anthropic", name: "claude-3-opus", enabled_chat_bot: true)
end
fab!(:ai_persona) do
persona =
Fabricate(
:ai_persona,
name: "Triage Helper",
description: "A persona that helps with triaging posts",
system_prompt: "You are a helpful assistant that triages posts",
default_llm: llm_model,
)
# Create the user for this persona
persona.update!(user_id: bot_user.id)
persona
end
let(:automation) { Fabricate(:automation, script: "llm_persona_triage", enabled: true) }
def add_automation_field(name, value, type: "text")
automation.fields.create!(
component: type,
name: name,
metadata: {
value: value,
},
target: "script",
)
end
before do
SiteSetting.ai_bot_enabled = true
SiteSetting.ai_bot_allowed_groups = "#{Group::AUTO_GROUPS[:trust_level_0]}"
add_automation_field("persona", ai_persona.id, type: "choices")
add_automation_field("whisper", false, type: "boolean")
end
it "can respond to a post using the specified persona" do
post = Fabricate(:post, raw: "This is a test post that needs triage")
response_text = "I've analyzed your post and can help with that."
DiscourseAi::Completions::Llm.with_prepared_responses([response_text]) do
automation.running_in_background!
automation.trigger!({ "post" => post })
end
topic = post.topic.reload
last_post = topic.posts.order(:post_number).last
expect(topic.posts.count).to eq(2)
# Verify that the response was posted by the persona's user
expect(last_post.user_id).to eq(bot_user.id)
expect(last_post.raw).to eq(response_text)
expect(last_post.post_type).to eq(Post.types[:regular]) # Not a whisper
end
it "can respond with a whisper when configured to do so" do
add_automation_field("whisper", true, type: "boolean")
post = Fabricate(:post, raw: "This is another test post for triage")
response_text = "Staff-only response to your post."
DiscourseAi::Completions::Llm.with_prepared_responses([response_text]) do
automation.running_in_background!
automation.trigger!({ "post" => post })
end
topic = post.topic.reload
last_post = topic.posts.order(:post_number).last
# Verify that the response is a whisper
expect(last_post.user_id).to eq(bot_user.id)
expect(last_post.raw).to eq(response_text)
expect(last_post.post_type).to eq(Post.types[:whisper]) # This should be a whisper
end
it "does not respond to posts made by bots" do
bot = Fabricate(:bot)
bot_post = Fabricate(:post, user: bot, raw: "This is a bot post")
# The automation should not trigger for bot posts
DiscourseAi::Completions::Llm.with_prepared_responses(["Response"]) do
automation.running_in_background!
automation.trigger!({ "post" => bot_post })
end
# Verify no new post was created
expect(bot_post.topic.reload.posts.count).to eq(1)
end
it "handles errors gracefully" do
post = Fabricate(:post, raw: "Error-triggering post")
# Set up to cause an error
ai_persona.update!(user_id: nil)
# Should not raise an error
expect {
automation.running_in_background!
automation.trigger!({ "post" => post })
}.not_to raise_error
# Verify no new post was created
expect(post.topic.reload.posts.count).to eq(1)
end
it "passes topic metadata in context when responding to topic" do
# Create a category and tags for the test
category = Fabricate(:category, name: "Test Category")
tag1 = Fabricate(:tag, name: "test-tag")
tag2 = Fabricate(:tag, name: "support")
# Create a topic with category and tags
topic =
Fabricate(
:topic,
title: "Important Question About Feature",
category: category,
tags: [tag1, tag2],
user: user,
)
# Create a post in that topic
_post =
Fabricate(
:post,
topic: topic,
user: user,
raw: "This is a test post in a categorized and tagged topic",
)
post2 =
Fabricate(:post, topic: topic, user: user, raw: "This is another post in the same topic")
# Capture the prompt sent to the LLM to verify it contains metadata
prompt = nil
DiscourseAi::Completions::Llm.with_prepared_responses(
["I've analyzed your question"],
) do |_, _, _prompts|
automation.running_in_background!
automation.trigger!({ "post" => post2 })
prompt = _prompts.first
end
context = prompt.messages[1][:content] # The second message should be the triage prompt
# Verify that topic metadata is included in the context
expect(context).to include("Important Question About Feature")
expect(context).to include("Test Category")
expect(context).to include("test-tag")
expect(context).to include("support")
end
it "passes private message metadata in context when responding to PM" do
# Create a private message topic
pm_topic = Fabricate(:private_message_topic, user: user, title: "Important PM")
# Create initial PM post
pm_post =
Fabricate(
:post,
topic: pm_topic,
user: user,
raw: "This is a private message that needs triage",
)
# Create a follow-up post
pm_post2 =
Fabricate(
:post,
topic: pm_topic,
user: user,
raw: "Adding more context to my private message",
)
# Capture the prompt sent to the LLM
prompt = nil
DiscourseAi::Completions::Llm.with_prepared_responses(
["I've received your private message"],
) do |_, _, _prompts|
automation.running_in_background!
automation.trigger!({ "post" => pm_post2 })
prompt = _prompts.first
end
context = prompt.messages[1][:content]
# Verify that PM metadata is included in the context
expect(context).to include("Important PM")
expect(context).to include(pm_post.raw)
expect(context).to include(pm_post2.raw)
end
end

View File

@ -0,0 +1,113 @@
# frozen_string_literal: true
require "rails_helper"
RSpec.describe DiscourseAi::Automation::LlmToolTriage do
fab!(:solver) { Fabricate(:user) }
fab!(:new_user) { Fabricate(:user, trust_level: TrustLevel[0], created_at: 1.day.ago) }
fab!(:topic) { Fabricate(:topic, user: new_user) }
fab!(:post) { Fabricate(:post, topic: topic, user: new_user, raw: "How do I reset my password?") }
fab!(:llm_model)
fab!(:ai_persona) do
persona = Fabricate(:ai_persona, default_llm: llm_model)
persona.create_user
persona
end
fab!(:tool) do
tool_script = <<~JS
function invoke(params) {
const postId = context.post_id;
const post = discourse.getPost(postId);
const user = discourse.getUser(post.user_id);
if (user.trust_level > 0) {
return {
processed: false,
reason: "User is not new"
};
}
const helper = discourse.getPersona("#{ai_persona.name}");
const answer = helper.respondTo({ post_id: post.id });
return {
answer: answer,
processed: true,
reason: "answered question"
};
}
JS
AiTool.create!(
name: "New User Question Answerer",
tool_name: "new_user_question_answerer",
description: "Automatically answers questions from new users when possible",
parameters: [], # No parameters as required by llm_tool_triage
script: tool_script,
created_by_id: Discourse.system_user.id,
summary: "Answers new user questions",
enabled: true,
)
end
before do
SiteSetting.discourse_ai_enabled = true
SiteSetting.ai_bot_enabled = true
end
it "It is able to answer new user questions" do
result = nil
DiscourseAi::Completions::Llm.with_prepared_responses(
["this is how you reset your password"],
) { result = described_class.handle(post: post, tool_id: tool.id) }
expect(result["processed"]).to eq(true)
response = post.topic.reload.posts.order(:post_number).last
expect(response.raw).to eq("this is how you reset your password")
end
it "Is able to respond as a whisper if instructed" do
# Create a tool with a script that explicitly requests a whisper response
whisper_tool =
AiTool.create!(
name: "Whisper Triage Tool",
tool_name: "whisper_triage_tool",
description: "Responds with whispers to moderation issues",
parameters: [],
script: <<~JS,
function invoke(params) {
const postId = context.post_id;
const post = discourse.getPost(postId);
const helper = discourse.getPersona("#{ai_persona.name}");
// Pass instructions to make response a whisper
const answer = helper.respondTo({
post_id: post.id,
instructions: "Respond as a whisper for moderators only",
whisper: true
});
return {
answer: answer,
processed: true,
reason: "responded with whisper"
};
}
JS
created_by_id: Discourse.system_user.id,
summary: "Responds with whispers",
enabled: true,
)
result = nil
DiscourseAi::Completions::Llm.with_prepared_responses(
["This moderation note is only visible to staff"],
) { result = described_class.handle(post: post, tool_id: whisper_tool.id) }
expect(result["processed"]).to eq(true)
response = post.topic.reload.posts.order(:post_number).last
expect(response.raw).to eq("This moderation note is only visible to staff")
# Check that the response is indeed a whisper
expect(response.post_type).to eq(Post.types[:whisper])
end
end

View File

@ -141,18 +141,20 @@ RSpec.describe DiscourseAi::AiBot::Playground do
it "can force usage of a tool" do
tool_name = "custom-#{custom_tool.id}"
ai_persona.update!(tools: [[tool_name, nil, true]], forced_tool_count: 1)
responses = [tool_call, "custom tool did stuff (maybe)"]
responses = [tool_call, ["custom tool did stuff (maybe)"], ["new PM title"]]
prompts = nil
reply_post = nil
private_message = Fabricate(:private_message_topic, user: user)
DiscourseAi::Completions::Llm.with_prepared_responses(responses) do |_, _, _prompts|
new_post = Fabricate(:post, raw: "Can you use the custom tool?")
new_post = Fabricate(:post, raw: "Can you use the custom tool?", topic: private_message)
reply_post = playground.reply_to(new_post)
prompts = _prompts
end
expect(prompts.length).to eq(2)
expect(prompts.length).to eq(3)
expect(prompts[0].tool_choice).to eq("search")
expect(prompts[1].tool_choice).to eq(nil)
@ -381,7 +383,7 @@ RSpec.describe DiscourseAi::AiBot::Playground do
}}}
Your instructions:
#{user.username} said Hello
#{user.username}: Hello
TEXT
expect(content.strip).to eq(expected)

View File

@ -188,6 +188,40 @@ RSpec.describe AiTool do
expect(result).to eq("Hello")
end
it "is able to run llm completions" do
script = <<~JS
function invoke(params) {
return llm.generate("question two") + llm.generate(
{ messages: [
{ type: "system", content: "system message" },
{ type: "user", content: "user message" }
]}
);
}
JS
tool = create_tool(script: script)
result = nil
prompts = nil
responses = ["Hello ", "World"]
DiscourseAi::Completions::Llm.with_prepared_responses(responses) do |_, _, _prompts|
runner = tool.runner({}, llm: llm, bot_user: nil, context: {})
result = runner.invoke
prompts = _prompts
end
prompt =
DiscourseAi::Completions::Prompt.new(
"system message",
messages: [{ type: :user, content: "user message" }],
)
expect(result).to eq("Hello World")
expect(prompts[0]).to eq("question two")
expect(prompts[1]).to eq(prompt)
end
it "can timeout slow JS" do
script = <<~JS
function invoke(params) {